2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
293 #define GET_BBLOCK(cfg,tblock,ip) do { \
294 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
296 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
297 NEW_BBLOCK (cfg, (tblock)); \
298 (tblock)->cil_code = (ip); \
299 ADD_BBLOCK (cfg, (tblock)); \
303 #if defined(TARGET_X86) || defined(TARGET_AMD64)
304 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
305 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
306 (dest)->dreg = alloc_preg ((cfg)); \
307 (dest)->sreg1 = (sr1); \
308 (dest)->sreg2 = (sr2); \
309 (dest)->inst_imm = (imm); \
310 (dest)->backend.shift_amount = (shift); \
311 MONO_ADD_INS ((cfg)->cbb, (dest)); \
315 #if SIZEOF_REGISTER == 8
316 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
317 /* FIXME: Need to add many more cases */ \
318 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
320 int dr = alloc_preg (cfg); \
321 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
322 (ins)->sreg2 = widen->dreg; \
326 #define ADD_WIDEN_OP(ins, arg1, arg2)
329 #define ADD_BINOP(op) do { \
330 MONO_INST_NEW (cfg, ins, (op)); \
332 ins->sreg1 = sp [0]->dreg; \
333 ins->sreg2 = sp [1]->dreg; \
334 type_from_op (ins, sp [0], sp [1]); \
336 /* Have to insert a widening op */ \
337 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
338 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
339 MONO_ADD_INS ((cfg)->cbb, (ins)); \
340 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
343 #define ADD_UNOP(op) do { \
344 MONO_INST_NEW (cfg, ins, (op)); \
346 ins->sreg1 = sp [0]->dreg; \
347 type_from_op (ins, sp [0], NULL); \
349 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
350 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 *sp++ = mono_decompose_opcode (cfg, ins); \
354 #define ADD_BINCOND(next_block) do { \
357 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
358 cmp->sreg1 = sp [0]->dreg; \
359 cmp->sreg2 = sp [1]->dreg; \
360 type_from_op (cmp, sp [0], sp [1]); \
362 type_from_op (ins, sp [0], sp [1]); \
363 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
364 GET_BBLOCK (cfg, tblock, target); \
365 link_bblock (cfg, bblock, tblock); \
366 ins->inst_true_bb = tblock; \
367 if ((next_block)) { \
368 link_bblock (cfg, bblock, (next_block)); \
369 ins->inst_false_bb = (next_block); \
370 start_new_bblock = 1; \
372 GET_BBLOCK (cfg, tblock, ip); \
373 link_bblock (cfg, bblock, tblock); \
374 ins->inst_false_bb = tblock; \
375 start_new_bblock = 2; \
377 if (sp != stack_start) { \
378 handle_stack_args (cfg, stack_start, sp - stack_start); \
379 CHECK_UNVERIFIABLE (cfg); \
381 MONO_ADD_INS (bblock, cmp); \
382 MONO_ADD_INS (bblock, ins); \
386 * link_bblock: Links two basic blocks
388 * links two basic blocks in the control flow graph, the 'from'
389 * argument is the starting block and the 'to' argument is the block
390 * the control flow ends to after 'from'.
393 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
395 MonoBasicBlock **newa;
399 if (from->cil_code) {
401 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
403 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
406 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
408 printf ("edge from entry to exit\n");
413 for (i = 0; i < from->out_count; ++i) {
414 if (to == from->out_bb [i]) {
420 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
421 for (i = 0; i < from->out_count; ++i) {
422 newa [i] = from->out_bb [i];
430 for (i = 0; i < to->in_count; ++i) {
431 if (from == to->in_bb [i]) {
437 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
438 for (i = 0; i < to->in_count; ++i) {
439 newa [i] = to->in_bb [i];
448 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
450 link_bblock (cfg, from, to);
454 * mono_find_block_region:
456 * We mark each basic block with a region ID. We use that to avoid BB
457 * optimizations when blocks are in different regions.
460 * A region token that encodes where this region is, and information
461 * about the clause owner for this block.
463 * The region encodes the try/catch/filter clause that owns this block
464 * as well as the type. -1 is a special value that represents a block
465 * that is in none of try/catch/filter.
468 mono_find_block_region (MonoCompile *cfg, int offset)
470 MonoMethodHeader *header = cfg->header;
471 MonoExceptionClause *clause;
474 for (i = 0; i < header->num_clauses; ++i) {
475 clause = &header->clauses [i];
476 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
477 (offset < (clause->handler_offset)))
478 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
480 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
481 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
482 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
483 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
484 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
486 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
489 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
490 return ((i + 1) << 8) | clause->flags;
497 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
499 MonoMethodHeader *header = cfg->header;
500 MonoExceptionClause *clause;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type)
509 res = g_list_append (res, clause);
516 mono_create_spvar_for_region (MonoCompile *cfg, int region)
520 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
524 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
525 /* prevent it from being register allocated */
526 var->flags |= MONO_INST_INDIRECT;
528 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
532 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
534 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
538 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
542 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
547 /* prevent it from being register allocated */
548 var->flags |= MONO_INST_INDIRECT;
550 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
556 * Returns the type used in the eval stack when @type is loaded.
557 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
560 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
564 inst->klass = klass = mono_class_from_mono_type (type);
566 inst->type = STACK_MP;
571 switch (type->type) {
573 inst->type = STACK_INV;
577 case MONO_TYPE_BOOLEAN:
583 inst->type = STACK_I4;
588 case MONO_TYPE_FNPTR:
589 inst->type = STACK_PTR;
591 case MONO_TYPE_CLASS:
592 case MONO_TYPE_STRING:
593 case MONO_TYPE_OBJECT:
594 case MONO_TYPE_SZARRAY:
595 case MONO_TYPE_ARRAY:
596 inst->type = STACK_OBJ;
600 inst->type = STACK_I8;
604 inst->type = STACK_R8;
606 case MONO_TYPE_VALUETYPE:
607 if (type->data.klass->enumtype) {
608 type = mono_class_enum_basetype (type->data.klass);
612 inst->type = STACK_VTYPE;
615 case MONO_TYPE_TYPEDBYREF:
616 inst->klass = mono_defaults.typed_reference_class;
617 inst->type = STACK_VTYPE;
619 case MONO_TYPE_GENERICINST:
620 type = &type->data.generic_class->container_class->byval_arg;
623 case MONO_TYPE_MVAR :
624 /* FIXME: all the arguments must be references for now,
625 * later look inside cfg and see if the arg num is
628 g_assert (cfg->generic_sharing_context);
629 inst->type = STACK_OBJ;
632 g_error ("unknown type 0x%02x in eval stack type", type->type);
637 * The following tables are used to quickly validate the IL code in type_from_op ().
640 bin_num_table [STACK_MAX] [STACK_MAX] = {
641 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
653 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
656 /* reduce the size of this table */
658 bin_int_table [STACK_MAX] [STACK_MAX] = {
659 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
670 bin_comp_table [STACK_MAX] [STACK_MAX] = {
671 /* Inv i L p F & O vt */
673 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
674 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
675 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
676 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
677 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
678 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
679 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
682 /* reduce the size of this table */
684 shift_table [STACK_MAX] [STACK_MAX] = {
685 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
696 * Tables to map from the non-specific opcode to the matching
697 * type-specific opcode.
699 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
701 binops_op_map [STACK_MAX] = {
702 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
705 /* handles from CEE_NEG to CEE_CONV_U8 */
707 unops_op_map [STACK_MAX] = {
708 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
711 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
713 ovfops_op_map [STACK_MAX] = {
714 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
717 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
719 ovf2ops_op_map [STACK_MAX] = {
720 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
723 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
725 ovf3ops_op_map [STACK_MAX] = {
726 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
729 /* handles from CEE_BEQ to CEE_BLT_UN */
731 beqops_op_map [STACK_MAX] = {
732 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
735 /* handles from CEE_CEQ to CEE_CLT_UN */
737 ceqops_op_map [STACK_MAX] = {
738 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
742 * Sets ins->type (the type on the eval stack) according to the
743 * type of the opcode and the arguments to it.
744 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
746 * FIXME: this function sets ins->type unconditionally in some cases, but
747 * it should set it to invalid for some types (a conv.x on an object)
750 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
752 switch (ins->opcode) {
759 /* FIXME: check unverifiable args for STACK_MP */
760 ins->type = bin_num_table [src1->type] [src2->type];
761 ins->opcode += binops_op_map [ins->type];
768 ins->type = bin_int_table [src1->type] [src2->type];
769 ins->opcode += binops_op_map [ins->type];
774 ins->type = shift_table [src1->type] [src2->type];
775 ins->opcode += binops_op_map [ins->type];
780 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
781 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
782 ins->opcode = OP_LCOMPARE;
783 else if (src1->type == STACK_R8)
784 ins->opcode = OP_FCOMPARE;
786 ins->opcode = OP_ICOMPARE;
788 case OP_ICOMPARE_IMM:
789 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
790 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
791 ins->opcode = OP_LCOMPARE_IMM;
803 ins->opcode += beqops_op_map [src1->type];
806 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
807 ins->opcode += ceqops_op_map [src1->type];
813 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
814 ins->opcode += ceqops_op_map [src1->type];
818 ins->type = neg_table [src1->type];
819 ins->opcode += unops_op_map [ins->type];
822 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
823 ins->type = src1->type;
825 ins->type = STACK_INV;
826 ins->opcode += unops_op_map [ins->type];
832 ins->type = STACK_I4;
833 ins->opcode += unops_op_map [src1->type];
836 ins->type = STACK_R8;
837 switch (src1->type) {
840 ins->opcode = OP_ICONV_TO_R_UN;
843 ins->opcode = OP_LCONV_TO_R_UN;
847 case CEE_CONV_OVF_I1:
848 case CEE_CONV_OVF_U1:
849 case CEE_CONV_OVF_I2:
850 case CEE_CONV_OVF_U2:
851 case CEE_CONV_OVF_I4:
852 case CEE_CONV_OVF_U4:
853 ins->type = STACK_I4;
854 ins->opcode += ovf3ops_op_map [src1->type];
856 case CEE_CONV_OVF_I_UN:
857 case CEE_CONV_OVF_U_UN:
858 ins->type = STACK_PTR;
859 ins->opcode += ovf2ops_op_map [src1->type];
861 case CEE_CONV_OVF_I1_UN:
862 case CEE_CONV_OVF_I2_UN:
863 case CEE_CONV_OVF_I4_UN:
864 case CEE_CONV_OVF_U1_UN:
865 case CEE_CONV_OVF_U2_UN:
866 case CEE_CONV_OVF_U4_UN:
867 ins->type = STACK_I4;
868 ins->opcode += ovf2ops_op_map [src1->type];
871 ins->type = STACK_PTR;
872 switch (src1->type) {
874 ins->opcode = OP_ICONV_TO_U;
878 #if SIZEOF_REGISTER == 8
879 ins->opcode = OP_LCONV_TO_U;
881 ins->opcode = OP_MOVE;
885 ins->opcode = OP_LCONV_TO_U;
888 ins->opcode = OP_FCONV_TO_U;
894 ins->type = STACK_I8;
895 ins->opcode += unops_op_map [src1->type];
897 case CEE_CONV_OVF_I8:
898 case CEE_CONV_OVF_U8:
899 ins->type = STACK_I8;
900 ins->opcode += ovf3ops_op_map [src1->type];
902 case CEE_CONV_OVF_U8_UN:
903 case CEE_CONV_OVF_I8_UN:
904 ins->type = STACK_I8;
905 ins->opcode += ovf2ops_op_map [src1->type];
909 ins->type = STACK_R8;
910 ins->opcode += unops_op_map [src1->type];
913 ins->type = STACK_R8;
917 ins->type = STACK_I4;
918 ins->opcode += ovfops_op_map [src1->type];
923 ins->type = STACK_PTR;
924 ins->opcode += ovfops_op_map [src1->type];
932 ins->type = bin_num_table [src1->type] [src2->type];
933 ins->opcode += ovfops_op_map [src1->type];
934 if (ins->type == STACK_R8)
935 ins->type = STACK_INV;
937 case OP_LOAD_MEMBASE:
938 ins->type = STACK_PTR;
940 case OP_LOADI1_MEMBASE:
941 case OP_LOADU1_MEMBASE:
942 case OP_LOADI2_MEMBASE:
943 case OP_LOADU2_MEMBASE:
944 case OP_LOADI4_MEMBASE:
945 case OP_LOADU4_MEMBASE:
946 ins->type = STACK_PTR;
948 case OP_LOADI8_MEMBASE:
949 ins->type = STACK_I8;
951 case OP_LOADR4_MEMBASE:
952 case OP_LOADR8_MEMBASE:
953 ins->type = STACK_R8;
956 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
960 if (ins->type == STACK_MP)
961 ins->klass = mono_defaults.object_class;
966 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
972 param_table [STACK_MAX] [STACK_MAX] = {
977 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
981 switch (args->type) {
991 for (i = 0; i < sig->param_count; ++i) {
992 switch (args [i].type) {
996 if (!sig->params [i]->byref)
1000 if (sig->params [i]->byref)
1002 switch (sig->params [i]->type) {
1003 case MONO_TYPE_CLASS:
1004 case MONO_TYPE_STRING:
1005 case MONO_TYPE_OBJECT:
1006 case MONO_TYPE_SZARRAY:
1007 case MONO_TYPE_ARRAY:
1014 if (sig->params [i]->byref)
1016 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1025 /*if (!param_table [args [i].type] [sig->params [i]->type])
1033 * When we need a pointer to the current domain many times in a method, we
1034 * call mono_domain_get() once and we store the result in a local variable.
1035 * This function returns the variable that represents the MonoDomain*.
1037 inline static MonoInst *
1038 mono_get_domainvar (MonoCompile *cfg)
1040 if (!cfg->domainvar)
1041 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1042 return cfg->domainvar;
1046 * The got_var contains the address of the Global Offset Table when AOT
1050 mono_get_got_var (MonoCompile *cfg)
1052 #ifdef MONO_ARCH_NEED_GOT_VAR
1053 if (!cfg->compile_aot)
1055 if (!cfg->got_var) {
1056 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1058 return cfg->got_var;
1065 mono_get_vtable_var (MonoCompile *cfg)
1067 g_assert (cfg->generic_sharing_context);
1069 if (!cfg->rgctx_var) {
1070 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1071 /* force the var to be stack allocated */
1072 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1075 return cfg->rgctx_var;
1079 type_from_stack_type (MonoInst *ins) {
1080 switch (ins->type) {
1081 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1082 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1083 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1084 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1086 return &ins->klass->this_arg;
1087 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1088 case STACK_VTYPE: return &ins->klass->byval_arg;
1090 g_error ("stack type %d to monotype not handled\n", ins->type);
1095 static G_GNUC_UNUSED int
1096 type_to_stack_type (MonoType *t)
1098 t = mono_type_get_underlying_type (t);
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= cfg->header->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1379 int ibitmap_reg = alloc_preg (cfg);
1380 #ifdef COMPRESSED_INTERFACE_BITMAP
1382 MonoInst *res, *ins;
1383 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1384 MONO_ADD_INS (cfg->cbb, ins);
1386 if (cfg->compile_aot)
1387 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1389 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1390 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1391 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1393 int ibitmap_byte_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1397 if (cfg->compile_aot) {
1398 int iid_reg = alloc_preg (cfg);
1399 int shifted_iid_reg = alloc_preg (cfg);
1400 int ibitmap_byte_address_reg = alloc_preg (cfg);
1401 int masked_iid_reg = alloc_preg (cfg);
1402 int iid_one_bit_reg = alloc_preg (cfg);
1403 int iid_bit_reg = alloc_preg (cfg);
1404 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1406 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1409 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1410 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1411 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1420 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1421 * stored in "klass_reg" implements the interface "klass".
1424 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1426 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1430 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1431 * stored in "vtable_reg" implements the interface "klass".
1434 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1436 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1440 * Emit code which checks whenever the interface id of @klass is smaller than
1441 * than the value given by max_iid_reg.
1444 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1445 MonoBasicBlock *false_target)
1447 if (cfg->compile_aot) {
1448 int iid_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1450 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1457 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1460 /* Same as above, but obtains max_iid from a vtable */
1462 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1463 MonoBasicBlock *false_target)
1465 int max_iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1468 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1471 /* Same as above, but obtains max_iid from a klass */
1473 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1474 MonoBasicBlock *false_target)
1476 int max_iid_reg = alloc_preg (cfg);
1478 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1479 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1483 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1485 int idepth_reg = alloc_preg (cfg);
1486 int stypes_reg = alloc_preg (cfg);
1487 int stype = alloc_preg (cfg);
1489 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1490 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1497 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1498 } else if (cfg->compile_aot) {
1499 int const_reg = alloc_preg (cfg);
1500 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1501 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1509 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1511 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1515 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 int intf_reg = alloc_preg (cfg);
1519 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1520 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1525 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1529 * Variant of the above that takes a register to the class, not the vtable.
1532 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1534 int intf_bit_reg = alloc_preg (cfg);
1536 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1537 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1542 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1546 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1549 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1550 } else if (cfg->compile_aot) {
1551 int const_reg = alloc_preg (cfg);
1552 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1553 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1557 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1561 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1563 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1567 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1569 if (cfg->compile_aot) {
1570 int const_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1572 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1576 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1580 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1583 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1586 int rank_reg = alloc_preg (cfg);
1587 int eclass_reg = alloc_preg (cfg);
1589 g_assert (!klass_inst);
1590 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1592 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1593 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1595 if (klass->cast_class == mono_defaults.object_class) {
1596 int parent_reg = alloc_preg (cfg);
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1598 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1601 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class == mono_defaults.enum_class) {
1604 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1605 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1606 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1608 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1609 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1612 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1613 /* Check that the object is a vector too */
1614 int bounds_reg = alloc_preg (cfg);
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1617 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1620 int idepth_reg = alloc_preg (cfg);
1621 int stypes_reg = alloc_preg (cfg);
1622 int stype = alloc_preg (cfg);
1624 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1625 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1627 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1631 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1636 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1638 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1642 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1646 g_assert (val == 0);
1651 if ((size <= 4) && (size <= align)) {
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1662 #if SIZEOF_REGISTER == 8
1664 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1670 val_reg = alloc_preg (cfg);
1672 if (SIZEOF_REGISTER == 8)
1673 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1675 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1678 /* This could be optimized further if neccesary */
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1687 #if !NO_UNALIGNED_ACCESS
1688 if (SIZEOF_REGISTER == 8) {
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1695 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1708 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1720 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1727 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1728 g_assert (size < 10000);
1731 /* This could be optimized further if neccesary */
1733 cur_reg = alloc_preg (cfg);
1734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1742 #if !NO_UNALIGNED_ACCESS
1743 if (SIZEOF_REGISTER == 8) {
1745 cur_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1756 cur_reg = alloc_preg (cfg);
1757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1758 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1782 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 type = mini_get_basic_type_from_generic (gsctx, type);
1789 switch (type->type) {
1790 case MONO_TYPE_VOID:
1791 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1794 case MONO_TYPE_BOOLEAN:
1797 case MONO_TYPE_CHAR:
1800 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_FNPTR:
1805 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1806 case MONO_TYPE_CLASS:
1807 case MONO_TYPE_STRING:
1808 case MONO_TYPE_OBJECT:
1809 case MONO_TYPE_SZARRAY:
1810 case MONO_TYPE_ARRAY:
1811 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1814 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1817 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1818 case MONO_TYPE_VALUETYPE:
1819 if (type->data.klass->enumtype) {
1820 type = mono_class_enum_basetype (type->data.klass);
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_TYPEDBYREF:
1825 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1826 case MONO_TYPE_GENERICINST:
1827 type = &type->data.generic_class->container_class->byval_arg;
1830 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1836 * target_type_is_incompatible:
1837 * @cfg: MonoCompile context
1839 * Check that the item @arg on the evaluation stack can be stored
1840 * in the target type (can be a local, or field, etc).
1841 * The cfg arg can be used to check if we need verification or just
1844 * Returns: non-0 value if arg can't be stored on a target.
1847 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1849 MonoType *simple_type;
1852 if (target->byref) {
1853 /* FIXME: check that the pointed to types match */
1854 if (arg->type == STACK_MP)
1855 return arg->klass != mono_class_from_mono_type (target);
1856 if (arg->type == STACK_PTR)
1861 simple_type = mono_type_get_underlying_type (target);
1862 switch (simple_type->type) {
1863 case MONO_TYPE_VOID:
1867 case MONO_TYPE_BOOLEAN:
1870 case MONO_TYPE_CHAR:
1873 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1877 /* STACK_MP is needed when setting pinned locals */
1878 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1883 case MONO_TYPE_FNPTR:
1884 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1887 case MONO_TYPE_CLASS:
1888 case MONO_TYPE_STRING:
1889 case MONO_TYPE_OBJECT:
1890 case MONO_TYPE_SZARRAY:
1891 case MONO_TYPE_ARRAY:
1892 if (arg->type != STACK_OBJ)
1894 /* FIXME: check type compatibility */
1898 if (arg->type != STACK_I8)
1903 if (arg->type != STACK_R8)
1906 case MONO_TYPE_VALUETYPE:
1907 if (arg->type != STACK_VTYPE)
1909 klass = mono_class_from_mono_type (simple_type);
1910 if (klass != arg->klass)
1913 case MONO_TYPE_TYPEDBYREF:
1914 if (arg->type != STACK_VTYPE)
1916 klass = mono_class_from_mono_type (simple_type);
1917 if (klass != arg->klass)
1920 case MONO_TYPE_GENERICINST:
1921 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1922 if (arg->type != STACK_VTYPE)
1924 klass = mono_class_from_mono_type (simple_type);
1925 if (klass != arg->klass)
1929 if (arg->type != STACK_OBJ)
1931 /* FIXME: check type compatibility */
1935 case MONO_TYPE_MVAR:
1936 /* FIXME: all the arguments must be references for now,
1937 * later look inside cfg and see if the arg num is
1938 * really a reference
1940 g_assert (cfg->generic_sharing_context);
1941 if (arg->type != STACK_OBJ)
1945 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1951 * Prepare arguments for passing to a function call.
1952 * Return a non-zero value if the arguments can't be passed to the given
1954 * The type checks are not yet complete and some conversions may need
1955 * casts on 32 or 64 bit architectures.
1957 * FIXME: implement this using target_type_is_incompatible ()
1960 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1962 MonoType *simple_type;
1966 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1970 for (i = 0; i < sig->param_count; ++i) {
1971 if (sig->params [i]->byref) {
1972 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1976 simple_type = sig->params [i];
1977 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1979 switch (simple_type->type) {
1980 case MONO_TYPE_VOID:
1985 case MONO_TYPE_BOOLEAN:
1988 case MONO_TYPE_CHAR:
1991 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1997 case MONO_TYPE_FNPTR:
1998 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2001 case MONO_TYPE_CLASS:
2002 case MONO_TYPE_STRING:
2003 case MONO_TYPE_OBJECT:
2004 case MONO_TYPE_SZARRAY:
2005 case MONO_TYPE_ARRAY:
2006 if (args [i]->type != STACK_OBJ)
2011 if (args [i]->type != STACK_I8)
2016 if (args [i]->type != STACK_R8)
2019 case MONO_TYPE_VALUETYPE:
2020 if (simple_type->data.klass->enumtype) {
2021 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2024 if (args [i]->type != STACK_VTYPE)
2027 case MONO_TYPE_TYPEDBYREF:
2028 if (args [i]->type != STACK_VTYPE)
2031 case MONO_TYPE_GENERICINST:
2032 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2036 g_error ("unknown type 0x%02x in check_call_signature",
2044 callvirt_to_call (int opcode)
2049 case OP_VOIDCALLVIRT:
2058 g_assert_not_reached ();
2065 callvirt_to_call_membase (int opcode)
2069 return OP_CALL_MEMBASE;
2070 case OP_VOIDCALLVIRT:
2071 return OP_VOIDCALL_MEMBASE;
2073 return OP_FCALL_MEMBASE;
2075 return OP_LCALL_MEMBASE;
2077 return OP_VCALL_MEMBASE;
2079 g_assert_not_reached ();
2085 #ifdef MONO_ARCH_HAVE_IMT
2087 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2091 if (COMPILE_LLVM (cfg)) {
2092 method_reg = alloc_preg (cfg);
2095 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2096 } else if (cfg->compile_aot) {
2097 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2100 MONO_INST_NEW (cfg, ins, OP_PCONST);
2101 ins->inst_p0 = call->method;
2102 ins->dreg = method_reg;
2103 MONO_ADD_INS (cfg->cbb, ins);
2107 call->imt_arg_reg = method_reg;
2109 #ifdef MONO_ARCH_IMT_REG
2110 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2112 /* Need this to keep the IMT arg alive */
2113 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2118 #ifdef MONO_ARCH_IMT_REG
2119 method_reg = alloc_preg (cfg);
2122 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2123 } else if (cfg->compile_aot) {
2124 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2127 MONO_INST_NEW (cfg, ins, OP_PCONST);
2128 ins->inst_p0 = call->method;
2129 ins->dreg = method_reg;
2130 MONO_ADD_INS (cfg->cbb, ins);
2133 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2135 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2140 static MonoJumpInfo *
2141 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2143 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2147 ji->data.target = target;
2152 inline static MonoCallInst *
2153 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2154 MonoInst **args, int calli, int virtual, int tail)
2157 #ifdef MONO_ARCH_SOFT_FLOAT
2162 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2164 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2167 call->signature = sig;
2169 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2172 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2173 call->vret_var = cfg->vret_addr;
2174 //g_assert_not_reached ();
2176 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2177 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2180 temp->backend.is_pinvoke = sig->pinvoke;
2183 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2184 * address of return value to increase optimization opportunities.
2185 * Before vtype decomposition, the dreg of the call ins itself represents the
2186 * fact the call modifies the return value. After decomposition, the call will
2187 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2188 * will be transformed into an LDADDR.
2190 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2191 loada->dreg = alloc_preg (cfg);
2192 loada->inst_p0 = temp;
2193 /* We reference the call too since call->dreg could change during optimization */
2194 loada->inst_p1 = call;
2195 MONO_ADD_INS (cfg->cbb, loada);
2197 call->inst.dreg = temp->dreg;
2199 call->vret_var = loada;
2200 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2201 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2203 #ifdef MONO_ARCH_SOFT_FLOAT
2204 if (COMPILE_SOFT_FLOAT (cfg)) {
2206 * If the call has a float argument, we would need to do an r8->r4 conversion using
2207 * an icall, but that cannot be done during the call sequence since it would clobber
2208 * the call registers + the stack. So we do it before emitting the call.
2210 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2212 MonoInst *in = call->args [i];
2214 if (i >= sig->hasthis)
2215 t = sig->params [i - sig->hasthis];
2217 t = &mono_defaults.int_class->byval_arg;
2218 t = mono_type_get_underlying_type (t);
2220 if (!t->byref && t->type == MONO_TYPE_R4) {
2221 MonoInst *iargs [1];
2225 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2227 /* The result will be in an int vreg */
2228 call->args [i] = conv;
2235 if (COMPILE_LLVM (cfg))
2236 mono_llvm_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2240 mono_arch_emit_call (cfg, call);
2243 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2244 cfg->flags |= MONO_CFG_HAS_CALLS;
2249 inline static MonoInst*
2250 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2252 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2254 call->inst.sreg1 = addr->dreg;
2256 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2258 return (MonoInst*)call;
2262 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2264 #ifdef MONO_ARCH_RGCTX_REG
2265 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2266 cfg->uses_rgctx_reg = TRUE;
2267 call->rgctx_reg = TRUE;
2269 call->rgctx_arg_reg = rgctx_reg;
2276 inline static MonoInst*
2277 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2283 rgctx_reg = mono_alloc_preg (cfg);
2284 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2286 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2288 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2289 return (MonoInst*)call;
2293 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2295 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2298 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2299 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2301 gboolean might_be_remote;
2302 gboolean virtual = this != NULL;
2303 gboolean enable_for_aot = TRUE;
2307 if (method->string_ctor) {
2308 /* Create the real signature */
2309 /* FIXME: Cache these */
2310 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2311 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2316 might_be_remote = this && sig->hasthis &&
2317 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2318 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2320 context_used = mono_method_check_context_used (method);
2321 if (might_be_remote && context_used) {
2324 g_assert (cfg->generic_sharing_context);
2326 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2328 return mono_emit_calli (cfg, sig, args, addr);
2331 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2333 if (might_be_remote)
2334 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2336 call->method = method;
2337 call->inst.flags |= MONO_INST_HAS_METHOD;
2338 call->inst.inst_left = this;
2341 int vtable_reg, slot_reg, this_reg;
2343 this_reg = this->dreg;
2345 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2346 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2347 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2349 /* Make a call to delegate->invoke_impl */
2350 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2351 call->inst.inst_basereg = this_reg;
2352 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2353 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2355 return (MonoInst*)call;
2359 if ((!cfg->compile_aot || enable_for_aot) &&
2360 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2361 (MONO_METHOD_IS_FINAL (method) &&
2362 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2363 !(method->klass->marshalbyref && context_used)) {
2365 * the method is not virtual, we just need to ensure this is not null
2366 * and then we can call the method directly.
2368 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2370 * The check above ensures method is not gshared, this is needed since
2371 * gshared methods can't have wrappers.
2373 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2376 if (!method->string_ctor)
2377 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2379 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2381 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2383 return (MonoInst*)call;
2386 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2388 * the method is virtual, but we can statically dispatch since either
2389 * it's class or the method itself are sealed.
2390 * But first we need to ensure it's not a null reference.
2392 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2394 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2395 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2397 return (MonoInst*)call;
2400 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2402 vtable_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2404 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2406 #ifdef MONO_ARCH_HAVE_IMT
2408 guint32 imt_slot = mono_method_get_imt_slot (method);
2409 emit_imt_argument (cfg, call, imt_arg);
2410 slot_reg = vtable_reg;
2411 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2414 if (slot_reg == -1) {
2415 slot_reg = alloc_preg (cfg);
2416 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2417 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2420 slot_reg = vtable_reg;
2421 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2422 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2423 #ifdef MONO_ARCH_HAVE_IMT
2425 g_assert (mono_method_signature (method)->generic_param_count);
2426 emit_imt_argument (cfg, call, imt_arg);
2431 call->inst.sreg1 = slot_reg;
2432 call->virtual = TRUE;
2435 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2437 return (MonoInst*)call;
2441 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2442 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2449 rgctx_reg = mono_alloc_preg (cfg);
2450 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2452 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2454 call = (MonoCallInst*)ins;
2456 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2462 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2464 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2468 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2475 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2478 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2480 return (MonoInst*)call;
2484 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2486 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2490 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2494 * mono_emit_abs_call:
2496 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2498 inline static MonoInst*
2499 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2500 MonoMethodSignature *sig, MonoInst **args)
2502 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2506 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2509 if (cfg->abs_patches == NULL)
2510 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2511 g_hash_table_insert (cfg->abs_patches, ji, ji);
2512 ins = mono_emit_native_call (cfg, ji, sig, args);
2513 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2518 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2520 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2521 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2525 * Native code might return non register sized integers
2526 * without initializing the upper bits.
2528 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2529 case OP_LOADI1_MEMBASE:
2530 widen_op = OP_ICONV_TO_I1;
2532 case OP_LOADU1_MEMBASE:
2533 widen_op = OP_ICONV_TO_U1;
2535 case OP_LOADI2_MEMBASE:
2536 widen_op = OP_ICONV_TO_I2;
2538 case OP_LOADU2_MEMBASE:
2539 widen_op = OP_ICONV_TO_U2;
2545 if (widen_op != -1) {
2546 int dreg = alloc_preg (cfg);
2549 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2550 widen->type = ins->type;
2560 get_memcpy_method (void)
2562 static MonoMethod *memcpy_method = NULL;
2563 if (!memcpy_method) {
2564 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2566 g_error ("Old corlib found. Install a new one");
2568 return memcpy_method;
2572 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2574 MonoClassField *field;
2575 gpointer iter = NULL;
2577 while ((field = mono_class_get_fields (klass, &iter))) {
2580 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2582 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2583 if (mono_type_is_reference (field->type)) {
2584 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2585 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2587 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2588 MonoClass *field_class = mono_class_from_mono_type (field->type);
2589 if (field_class->has_references)
2590 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2596 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2599 int card_table_shift_bits;
2600 gpointer card_table_mask;
2601 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2602 MonoInst *dummy_use;
2604 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2605 int nursery_shift_bits;
2606 size_t nursery_size;
2608 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2610 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2613 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2614 wbarrier->sreg1 = ptr->dreg;
2616 wbarrier->sreg2 = value->dreg;
2618 wbarrier->sreg2 = value_reg;
2619 MONO_ADD_INS (cfg->cbb, wbarrier);
2623 int offset_reg = alloc_preg (cfg);
2624 int card_reg = alloc_preg (cfg);
2627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2628 if (card_table_mask)
2629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2631 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2632 * IMM's larger than 32bits.
2634 if (cfg->compile_aot) {
2635 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2637 MONO_INST_NEW (cfg, ins, OP_PCONST);
2638 ins->inst_p0 = card_table;
2639 ins->dreg = card_reg;
2640 MONO_ADD_INS (cfg->cbb, ins);
2643 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2644 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2646 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2647 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2653 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2654 dummy_use->sreg1 = value_reg;
2655 MONO_ADD_INS (cfg->cbb, dummy_use);
2661 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2663 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2664 unsigned need_wb = 0;
2669 /*types with references can't have alignment smaller than sizeof(void*) */
2670 if (align < SIZEOF_VOID_P)
2673 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2674 if (size > 32 * SIZEOF_VOID_P)
2677 create_write_barrier_bitmap (klass, &need_wb, 0);
2679 /* We don't unroll more than 5 stores to avoid code bloat. */
2680 if (size > 5 * SIZEOF_VOID_P) {
2681 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2682 size += (SIZEOF_VOID_P - 1);
2683 size &= ~(SIZEOF_VOID_P - 1);
2685 EMIT_NEW_ICONST (cfg, iargs [2], size);
2686 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2687 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2691 destreg = iargs [0]->dreg;
2692 srcreg = iargs [1]->dreg;
2695 dest_ptr_reg = alloc_preg (cfg);
2696 tmp_reg = alloc_preg (cfg);
2699 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2701 while (size >= SIZEOF_VOID_P) {
2702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2706 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2708 offset += SIZEOF_VOID_P;
2709 size -= SIZEOF_VOID_P;
2712 /*tmp += sizeof (void*)*/
2713 if (size >= SIZEOF_VOID_P) {
2714 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2715 MONO_ADD_INS (cfg->cbb, iargs [0]);
2719 /* Those cannot be references since size < sizeof (void*) */
2721 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2745 * Emit code to copy a valuetype of type @klass whose address is stored in
2746 * @src->dreg to memory whose address is stored at @dest->dreg.
2749 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2751 MonoInst *iargs [4];
2754 MonoMethod *memcpy_method;
2758 * This check breaks with spilled vars... need to handle it during verification anyway.
2759 * g_assert (klass && klass == src->klass && klass == dest->klass);
2763 n = mono_class_native_size (klass, &align);
2765 n = mono_class_value_size (klass, &align);
2767 /* if native is true there should be no references in the struct */
2768 if (cfg->gen_write_barriers && klass->has_references && !native) {
2769 /* Avoid barriers when storing to the stack */
2770 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2771 (dest->opcode == OP_LDADDR))) {
2772 int context_used = 0;
2777 if (cfg->generic_sharing_context)
2778 context_used = mono_class_check_context_used (klass);
2780 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2781 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2783 } else if (context_used) {
2784 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2786 if (cfg->compile_aot) {
2787 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2789 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2790 mono_class_compute_gc_descriptor (klass);
2794 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2799 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2800 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2801 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2805 EMIT_NEW_ICONST (cfg, iargs [2], n);
2807 memcpy_method = get_memcpy_method ();
2808 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2813 get_memset_method (void)
2815 static MonoMethod *memset_method = NULL;
2816 if (!memset_method) {
2817 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2819 g_error ("Old corlib found. Install a new one");
2821 return memset_method;
2825 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2827 MonoInst *iargs [3];
2830 MonoMethod *memset_method;
2832 /* FIXME: Optimize this for the case when dest is an LDADDR */
2834 mono_class_init (klass);
2835 n = mono_class_value_size (klass, &align);
2837 if (n <= sizeof (gpointer) * 5) {
2838 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2841 memset_method = get_memset_method ();
2843 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2844 EMIT_NEW_ICONST (cfg, iargs [2], n);
2845 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2850 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2852 MonoInst *this = NULL;
2854 g_assert (cfg->generic_sharing_context);
2856 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2857 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2858 !method->klass->valuetype)
2859 EMIT_NEW_ARGLOAD (cfg, this, 0);
2861 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2862 MonoInst *mrgctx_loc, *mrgctx_var;
2865 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2867 mrgctx_loc = mono_get_vtable_var (cfg);
2868 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2871 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2872 MonoInst *vtable_loc, *vtable_var;
2876 vtable_loc = mono_get_vtable_var (cfg);
2877 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2879 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2880 MonoInst *mrgctx_var = vtable_var;
2883 vtable_reg = alloc_preg (cfg);
2884 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2885 vtable_var->type = STACK_PTR;
2891 int vtable_reg, res_reg;
2893 vtable_reg = alloc_preg (cfg);
2894 res_reg = alloc_preg (cfg);
2895 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2900 static MonoJumpInfoRgctxEntry *
2901 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2903 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2904 res->method = method;
2905 res->in_mrgctx = in_mrgctx;
2906 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2907 res->data->type = patch_type;
2908 res->data->data.target = patch_data;
2909 res->info_type = info_type;
2914 static inline MonoInst*
2915 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2917 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2921 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2922 MonoClass *klass, int rgctx_type)
2924 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2925 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2927 return emit_rgctx_fetch (cfg, rgctx, entry);
2931 * emit_get_rgctx_method:
2933 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2934 * normal constants, else emit a load from the rgctx.
2937 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2938 MonoMethod *cmethod, int rgctx_type)
2940 if (!context_used) {
2943 switch (rgctx_type) {
2944 case MONO_RGCTX_INFO_METHOD:
2945 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2947 case MONO_RGCTX_INFO_METHOD_RGCTX:
2948 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2951 g_assert_not_reached ();
2954 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2955 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2957 return emit_rgctx_fetch (cfg, rgctx, entry);
2962 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2963 MonoClassField *field, int rgctx_type)
2965 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2966 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2968 return emit_rgctx_fetch (cfg, rgctx, entry);
2972 * On return the caller must check @klass for load errors.
2975 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2977 MonoInst *vtable_arg;
2979 int context_used = 0;
2981 if (cfg->generic_sharing_context)
2982 context_used = mono_class_check_context_used (klass);
2985 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2986 klass, MONO_RGCTX_INFO_VTABLE);
2988 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2995 if (COMPILE_LLVM (cfg))
2996 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2998 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2999 #ifdef MONO_ARCH_VTABLE_REG
3000 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3001 cfg->uses_vtable_reg = TRUE;
3008 * On return the caller must check @array_class for load errors
3011 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3013 int vtable_reg = alloc_preg (cfg);
3014 int context_used = 0;
3016 if (cfg->generic_sharing_context)
3017 context_used = mono_class_check_context_used (array_class);
3019 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3021 if (cfg->opt & MONO_OPT_SHARED) {
3022 int class_reg = alloc_preg (cfg);
3023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3024 if (cfg->compile_aot) {
3025 int klass_reg = alloc_preg (cfg);
3026 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3027 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3031 } else if (context_used) {
3032 MonoInst *vtable_ins;
3034 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3035 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3037 if (cfg->compile_aot) {
3041 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3043 vt_reg = alloc_preg (cfg);
3044 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3045 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3048 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3054 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3058 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3060 if (mini_get_debug_options ()->better_cast_details) {
3061 int to_klass_reg = alloc_preg (cfg);
3062 int vtable_reg = alloc_preg (cfg);
3063 int klass_reg = alloc_preg (cfg);
3064 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3067 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3071 MONO_ADD_INS (cfg->cbb, tls_get);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3076 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3082 reset_cast_details (MonoCompile *cfg)
3084 /* Reset the variables holding the cast details */
3085 if (mini_get_debug_options ()->better_cast_details) {
3086 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3088 MONO_ADD_INS (cfg->cbb, tls_get);
3089 /* It is enough to reset the from field */
3090 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3095 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3096 * generic code is generated.
3099 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3101 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3104 MonoInst *rgctx, *addr;
3106 /* FIXME: What if the class is shared? We might not
3107 have to get the address of the method from the
3109 addr = emit_get_rgctx_method (cfg, context_used, method,
3110 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3112 rgctx = emit_get_rgctx (cfg, method, context_used);
3114 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3116 return mono_emit_method_call (cfg, method, &val, NULL);
3121 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3125 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3126 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3127 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3128 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3130 obj_reg = sp [0]->dreg;
3131 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3134 /* FIXME: generics */
3135 g_assert (klass->rank == 0);
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3139 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3145 MonoInst *element_class;
3147 /* This assertion is from the unboxcast insn */
3148 g_assert (klass->rank == 0);
3150 element_class = emit_get_rgctx_klass (cfg, context_used,
3151 klass->element_class, MONO_RGCTX_INFO_KLASS);
3153 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3154 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3156 save_cast_details (cfg, klass->element_class, obj_reg);
3157 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3158 reset_cast_details (cfg);
3161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3162 MONO_ADD_INS (cfg->cbb, add);
3163 add->type = STACK_MP;
3170 * Returns NULL and set the cfg exception on error.
3173 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3175 MonoInst *iargs [2];
3181 MonoInst *iargs [2];
3184 FIXME: we cannot get managed_alloc here because we can't get
3185 the class's vtable (because it's not a closed class)
3187 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3188 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3191 if (cfg->opt & MONO_OPT_SHARED)
3192 rgctx_info = MONO_RGCTX_INFO_KLASS;
3194 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3195 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3197 if (cfg->opt & MONO_OPT_SHARED) {
3198 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3200 alloc_ftn = mono_object_new;
3203 alloc_ftn = mono_object_new_specific;
3206 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3209 if (cfg->opt & MONO_OPT_SHARED) {
3210 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3211 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3213 alloc_ftn = mono_object_new;
3214 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3215 /* This happens often in argument checking code, eg. throw new FooException... */
3216 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3217 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3218 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3220 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3221 MonoMethod *managed_alloc = NULL;
3225 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3226 cfg->exception_ptr = klass;
3230 #ifndef MONO_CROSS_COMPILE
3231 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3234 if (managed_alloc) {
3235 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3236 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3238 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3240 guint32 lw = vtable->klass->instance_size;
3241 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3242 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3243 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3246 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3250 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3254 * Returns NULL and set the cfg exception on error.
3257 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3259 MonoInst *alloc, *ins;
3261 if (mono_class_is_nullable (klass)) {
3262 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3265 /* FIXME: What if the class is shared? We might not
3266 have to get the method address from the RGCTX. */
3267 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3268 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3269 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3271 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3273 return mono_emit_method_call (cfg, method, &val, NULL);
3277 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3281 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3286 // FIXME: This doesn't work yet (class libs tests fail?)
3287 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3290 * Returns NULL and set the cfg exception on error.
3293 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3295 MonoBasicBlock *is_null_bb;
3296 int obj_reg = src->dreg;
3297 int vtable_reg = alloc_preg (cfg);
3298 MonoInst *klass_inst = NULL;
3303 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3304 klass, MONO_RGCTX_INFO_KLASS);
3306 if (is_complex_isinst (klass)) {
3307 /* Complex case, handle by an icall */
3313 args [1] = klass_inst;
3315 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3317 /* Simple case, handled by the code below */
3321 NEW_BBLOCK (cfg, is_null_bb);
3323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3326 save_cast_details (cfg, klass, obj_reg);
3328 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3330 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3332 int klass_reg = alloc_preg (cfg);
3334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3336 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3337 /* the remoting code is broken, access the class for now */
3338 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3339 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3341 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3342 cfg->exception_ptr = klass;
3345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3350 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3353 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3357 MONO_START_BB (cfg, is_null_bb);
3359 reset_cast_details (cfg);
3365 * Returns NULL and set the cfg exception on error.
3368 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3371 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3372 int obj_reg = src->dreg;
3373 int vtable_reg = alloc_preg (cfg);
3374 int res_reg = alloc_preg (cfg);
3375 MonoInst *klass_inst = NULL;
3378 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3380 if (is_complex_isinst (klass)) {
3383 /* Complex case, handle by an icall */
3389 args [1] = klass_inst;
3391 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3393 /* Simple case, the code below can handle it */
3397 NEW_BBLOCK (cfg, is_null_bb);
3398 NEW_BBLOCK (cfg, false_bb);
3399 NEW_BBLOCK (cfg, end_bb);
3401 /* Do the assignment at the beginning, so the other assignment can be if converted */
3402 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3403 ins->type = STACK_OBJ;
3406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3411 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3412 g_assert (!context_used);
3413 /* the is_null_bb target simply copies the input register to the output */
3414 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3416 int klass_reg = alloc_preg (cfg);
3419 int rank_reg = alloc_preg (cfg);
3420 int eclass_reg = alloc_preg (cfg);
3422 g_assert (!context_used);
3423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3428 if (klass->cast_class == mono_defaults.object_class) {
3429 int parent_reg = alloc_preg (cfg);
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3431 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3432 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3434 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3435 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3436 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3438 } else if (klass->cast_class == mono_defaults.enum_class) {
3439 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3441 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3442 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3444 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3445 /* Check that the object is a vector too */
3446 int bounds_reg = alloc_preg (cfg);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3452 /* the is_null_bb target simply copies the input register to the output */
3453 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3455 } else if (mono_class_is_nullable (klass)) {
3456 g_assert (!context_used);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3458 /* the is_null_bb target simply copies the input register to the output */
3459 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3461 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3462 g_assert (!context_used);
3463 /* the remoting code is broken, access the class for now */
3464 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3465 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3467 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3468 cfg->exception_ptr = klass;
3471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3473 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3480 /* the is_null_bb target simply copies the input register to the output */
3481 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3486 MONO_START_BB (cfg, false_bb);
3488 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3491 MONO_START_BB (cfg, is_null_bb);
3493 MONO_START_BB (cfg, end_bb);
3499 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3501 /* This opcode takes as input an object reference and a class, and returns:
3502 0) if the object is an instance of the class,
3503 1) if the object is not instance of the class,
3504 2) if the object is a proxy whose type cannot be determined */
3507 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3508 int obj_reg = src->dreg;
3509 int dreg = alloc_ireg (cfg);
3511 int klass_reg = alloc_preg (cfg);
3513 NEW_BBLOCK (cfg, true_bb);
3514 NEW_BBLOCK (cfg, false_bb);
3515 NEW_BBLOCK (cfg, false2_bb);
3516 NEW_BBLOCK (cfg, end_bb);
3517 NEW_BBLOCK (cfg, no_proxy_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3522 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3523 NEW_BBLOCK (cfg, interface_fail_bb);
3525 tmp_reg = alloc_preg (cfg);
3526 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3527 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3528 MONO_START_BB (cfg, interface_fail_bb);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3531 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3533 tmp_reg = alloc_preg (cfg);
3534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3538 tmp_reg = alloc_preg (cfg);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3542 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3543 tmp_reg = alloc_preg (cfg);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3547 tmp_reg = alloc_preg (cfg);
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3552 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3555 MONO_START_BB (cfg, no_proxy_bb);
3557 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3560 MONO_START_BB (cfg, false_bb);
3562 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3565 MONO_START_BB (cfg, false2_bb);
3567 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3570 MONO_START_BB (cfg, true_bb);
3572 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3574 MONO_START_BB (cfg, end_bb);
3577 MONO_INST_NEW (cfg, ins, OP_ICONST);
3579 ins->type = STACK_I4;
3585 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3587 /* This opcode takes as input an object reference and a class, and returns:
3588 0) if the object is an instance of the class,
3589 1) if the object is a proxy whose type cannot be determined
3590 an InvalidCastException exception is thrown otherwhise*/
3593 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3594 int obj_reg = src->dreg;
3595 int dreg = alloc_ireg (cfg);
3596 int tmp_reg = alloc_preg (cfg);
3597 int klass_reg = alloc_preg (cfg);
3599 NEW_BBLOCK (cfg, end_bb);
3600 NEW_BBLOCK (cfg, ok_result_bb);
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3605 save_cast_details (cfg, klass, obj_reg);
3607 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3608 NEW_BBLOCK (cfg, interface_fail_bb);
3610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3611 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3612 MONO_START_BB (cfg, interface_fail_bb);
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3615 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3617 tmp_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3620 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3622 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3626 NEW_BBLOCK (cfg, no_proxy_bb);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3630 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3632 tmp_reg = alloc_preg (cfg);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3636 tmp_reg = alloc_preg (cfg);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3641 NEW_BBLOCK (cfg, fail_1_bb);
3643 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3645 MONO_START_BB (cfg, fail_1_bb);
3647 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3650 MONO_START_BB (cfg, no_proxy_bb);
3652 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3655 MONO_START_BB (cfg, ok_result_bb);
3657 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3659 MONO_START_BB (cfg, end_bb);
3662 MONO_INST_NEW (cfg, ins, OP_ICONST);
3664 ins->type = STACK_I4;
3670 * Returns NULL and set the cfg exception on error.
3672 static G_GNUC_UNUSED MonoInst*
3673 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3677 gpointer *trampoline;
3678 MonoInst *obj, *method_ins, *tramp_ins;
3682 obj = handle_alloc (cfg, klass, FALSE, 0);
3686 /* Inline the contents of mono_delegate_ctor */
3688 /* Set target field */
3689 /* Optimize away setting of NULL target */
3690 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3692 if (cfg->gen_write_barriers) {
3693 dreg = alloc_preg (cfg);
3694 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3695 emit_write_barrier (cfg, ptr, target, 0);
3699 /* Set method field */
3700 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3702 if (cfg->gen_write_barriers) {
3703 dreg = alloc_preg (cfg);
3704 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3705 emit_write_barrier (cfg, ptr, method_ins, 0);
3708 * To avoid looking up the compiled code belonging to the target method
3709 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3710 * store it, and we fill it after the method has been compiled.
3712 if (!cfg->compile_aot && !method->dynamic) {
3713 MonoInst *code_slot_ins;
3716 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3718 domain = mono_domain_get ();
3719 mono_domain_lock (domain);
3720 if (!domain_jit_info (domain)->method_code_hash)
3721 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3722 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3724 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3725 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3727 mono_domain_unlock (domain);
3729 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3734 /* Set invoke_impl field */
3735 if (cfg->compile_aot) {
3736 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3738 trampoline = mono_create_delegate_trampoline (klass);
3739 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3743 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3749 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3751 MonoJitICallInfo *info;
3753 /* Need to register the icall so it gets an icall wrapper */
3754 info = mono_get_array_new_va_icall (rank);
3756 cfg->flags |= MONO_CFG_HAS_VARARGS;
3758 /* mono_array_new_va () needs a vararg calling convention */
3759 cfg->disable_llvm = TRUE;
3761 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3762 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3766 mono_emit_load_got_addr (MonoCompile *cfg)
3768 MonoInst *getaddr, *dummy_use;
3770 if (!cfg->got_var || cfg->got_var_allocated)
3773 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3774 getaddr->dreg = cfg->got_var->dreg;
3776 /* Add it to the start of the first bblock */
3777 if (cfg->bb_entry->code) {
3778 getaddr->next = cfg->bb_entry->code;
3779 cfg->bb_entry->code = getaddr;
3782 MONO_ADD_INS (cfg->bb_entry, getaddr);
3784 cfg->got_var_allocated = TRUE;
3787 * Add a dummy use to keep the got_var alive, since real uses might
3788 * only be generated by the back ends.
3789 * Add it to end_bblock, so the variable's lifetime covers the whole
3791 * It would be better to make the usage of the got var explicit in all
3792 * cases when the backend needs it (i.e. calls, throw etc.), so this
3793 * wouldn't be needed.
3795 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3796 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3799 static int inline_limit;
3800 static gboolean inline_limit_inited;
3803 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3805 MonoMethodHeaderSummary header;
3807 #ifdef MONO_ARCH_SOFT_FLOAT
3808 MonoMethodSignature *sig = mono_method_signature (method);
3812 if (cfg->generic_sharing_context)
3815 if (cfg->inline_depth > 10)
3818 #ifdef MONO_ARCH_HAVE_LMF_OPS
3819 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3820 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3821 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3826 if (!mono_method_get_header_summary (method, &header))
3829 /*runtime, icall and pinvoke are checked by summary call*/
3830 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3831 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3832 (method->klass->marshalbyref) ||
3836 /* also consider num_locals? */
3837 /* Do the size check early to avoid creating vtables */
3838 if (!inline_limit_inited) {
3839 if (getenv ("MONO_INLINELIMIT"))
3840 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3842 inline_limit = INLINE_LENGTH_LIMIT;
3843 inline_limit_inited = TRUE;
3845 if (header.code_size >= inline_limit)
3849 * if we can initialize the class of the method right away, we do,
3850 * otherwise we don't allow inlining if the class needs initialization,
3851 * since it would mean inserting a call to mono_runtime_class_init()
3852 * inside the inlined code
3854 if (!(cfg->opt & MONO_OPT_SHARED)) {
3855 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3856 if (cfg->run_cctors && method->klass->has_cctor) {
3857 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3858 if (!method->klass->runtime_info)
3859 /* No vtable created yet */
3861 vtable = mono_class_vtable (cfg->domain, method->klass);
3864 /* This makes so that inline cannot trigger */
3865 /* .cctors: too many apps depend on them */
3866 /* running with a specific order... */
3867 if (! vtable->initialized)
3869 mono_runtime_class_init (vtable);
3871 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3872 if (!method->klass->runtime_info)
3873 /* No vtable created yet */
3875 vtable = mono_class_vtable (cfg->domain, method->klass);
3878 if (!vtable->initialized)
3883 * If we're compiling for shared code
3884 * the cctor will need to be run at aot method load time, for example,
3885 * or at the end of the compilation of the inlining method.
3887 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3892 * CAS - do not inline methods with declarative security
3893 * Note: this has to be before any possible return TRUE;
3895 if (mono_method_has_declsec (method))
3898 #ifdef MONO_ARCH_SOFT_FLOAT
3900 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3902 for (i = 0; i < sig->param_count; ++i)
3903 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3911 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3913 if (vtable->initialized && !cfg->compile_aot)
3916 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3919 if (!mono_class_needs_cctor_run (vtable->klass, method))
3922 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3923 /* The initialization is already done before the method is called */
3930 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3934 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3936 mono_class_init (klass);
3937 size = mono_class_array_element_size (klass);
3939 mult_reg = alloc_preg (cfg);
3940 array_reg = arr->dreg;
3941 index_reg = index->dreg;
3943 #if SIZEOF_REGISTER == 8
3944 /* The array reg is 64 bits but the index reg is only 32 */
3945 if (COMPILE_LLVM (cfg)) {
3947 index2_reg = index_reg;
3949 index2_reg = alloc_preg (cfg);
3950 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3953 if (index->type == STACK_I8) {
3954 index2_reg = alloc_preg (cfg);
3955 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3957 index2_reg = index_reg;
3962 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3964 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3965 if (size == 1 || size == 2 || size == 4 || size == 8) {
3966 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3968 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3969 ins->type = STACK_PTR;
3975 add_reg = alloc_preg (cfg);
3977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3978 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3979 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3980 ins->type = STACK_PTR;
3981 MONO_ADD_INS (cfg->cbb, ins);
3986 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3988 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3990 int bounds_reg = alloc_preg (cfg);
3991 int add_reg = alloc_preg (cfg);
3992 int mult_reg = alloc_preg (cfg);
3993 int mult2_reg = alloc_preg (cfg);
3994 int low1_reg = alloc_preg (cfg);
3995 int low2_reg = alloc_preg (cfg);
3996 int high1_reg = alloc_preg (cfg);
3997 int high2_reg = alloc_preg (cfg);
3998 int realidx1_reg = alloc_preg (cfg);
3999 int realidx2_reg = alloc_preg (cfg);
4000 int sum_reg = alloc_preg (cfg);
4005 mono_class_init (klass);
4006 size = mono_class_array_element_size (klass);
4008 index1 = index_ins1->dreg;
4009 index2 = index_ins2->dreg;
4011 /* range checking */
4012 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4013 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4015 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4016 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4017 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4018 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4019 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4020 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4021 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4023 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4024 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4025 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4026 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4027 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4028 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4029 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4031 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4032 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4034 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4035 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4037 ins->type = STACK_MP;
4039 MONO_ADD_INS (cfg->cbb, ins);
4046 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4050 MonoMethod *addr_method;
4053 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4056 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4058 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4059 /* emit_ldelema_2 depends on OP_LMUL */
4060 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4061 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4065 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4066 addr_method = mono_marshal_get_array_address (rank, element_size);
4067 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4072 static MonoBreakPolicy
4073 always_insert_breakpoint (MonoMethod *method)
4075 return MONO_BREAK_POLICY_ALWAYS;
4078 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4081 * mono_set_break_policy:
4082 * policy_callback: the new callback function
4084 * Allow embedders to decide wherther to actually obey breakpoint instructions
4085 * (both break IL instructions and Debugger.Break () method calls), for example
4086 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4087 * untrusted or semi-trusted code.
4089 * @policy_callback will be called every time a break point instruction needs to
4090 * be inserted with the method argument being the method that calls Debugger.Break()
4091 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4092 * if it wants the breakpoint to not be effective in the given method.
4093 * #MONO_BREAK_POLICY_ALWAYS is the default.
4096 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4098 if (policy_callback)
4099 break_policy_func = policy_callback;
4101 break_policy_func = always_insert_breakpoint;
4105 should_insert_brekpoint (MonoMethod *method) {
4106 switch (break_policy_func (method)) {
4107 case MONO_BREAK_POLICY_ALWAYS:
4109 case MONO_BREAK_POLICY_NEVER:
4111 case MONO_BREAK_POLICY_ON_DBG:
4112 return mono_debug_using_mono_debugger ();
4114 g_warning ("Incorrect value returned from break policy callback");
4119 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4121 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4123 MonoInst *addr, *store, *load;
4124 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4126 /* the bounds check is already done by the callers */
4127 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4129 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4130 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4132 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4133 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4139 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4141 MonoInst *ins = NULL;
4142 #ifdef MONO_ARCH_SIMD_INTRINSICS
4143 if (cfg->opt & MONO_OPT_SIMD) {
4144 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4154 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4156 MonoInst *ins = NULL;
4158 static MonoClass *runtime_helpers_class = NULL;
4159 if (! runtime_helpers_class)
4160 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4161 "System.Runtime.CompilerServices", "RuntimeHelpers");
4163 if (cmethod->klass == mono_defaults.string_class) {
4164 if (strcmp (cmethod->name, "get_Chars") == 0) {
4165 int dreg = alloc_ireg (cfg);
4166 int index_reg = alloc_preg (cfg);
4167 int mult_reg = alloc_preg (cfg);
4168 int add_reg = alloc_preg (cfg);
4170 #if SIZEOF_REGISTER == 8
4171 /* The array reg is 64 bits but the index reg is only 32 */
4172 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4174 index_reg = args [1]->dreg;
4176 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4178 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4179 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4180 add_reg = ins->dreg;
4181 /* Avoid a warning */
4183 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4187 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4188 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4189 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4191 type_from_op (ins, NULL, NULL);
4193 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4194 int dreg = alloc_ireg (cfg);
4195 /* Decompose later to allow more optimizations */
4196 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4197 ins->type = STACK_I4;
4198 ins->flags |= MONO_INST_FAULT;
4199 cfg->cbb->has_array_access = TRUE;
4200 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4203 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4204 int mult_reg = alloc_preg (cfg);
4205 int add_reg = alloc_preg (cfg);
4207 /* The corlib functions check for oob already. */
4208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4209 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4210 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4211 return cfg->cbb->last_ins;
4214 } else if (cmethod->klass == mono_defaults.object_class) {
4216 if (strcmp (cmethod->name, "GetType") == 0) {
4217 int dreg = alloc_preg (cfg);
4218 int vt_reg = alloc_preg (cfg);
4219 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4220 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4221 type_from_op (ins, NULL, NULL);
4224 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4225 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4226 int dreg = alloc_ireg (cfg);
4227 int t1 = alloc_ireg (cfg);
4229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4230 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4231 ins->type = STACK_I4;
4235 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4236 MONO_INST_NEW (cfg, ins, OP_NOP);
4237 MONO_ADD_INS (cfg->cbb, ins);
4241 } else if (cmethod->klass == mono_defaults.array_class) {
4242 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4243 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4245 #ifndef MONO_BIG_ARRAYS
4247 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4250 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4251 int dreg = alloc_ireg (cfg);
4252 int bounds_reg = alloc_ireg (cfg);
4253 MonoBasicBlock *end_bb, *szarray_bb;
4254 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4256 NEW_BBLOCK (cfg, end_bb);
4257 NEW_BBLOCK (cfg, szarray_bb);
4259 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4260 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4263 /* Non-szarray case */
4265 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4266 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4268 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4269 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4270 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4271 MONO_START_BB (cfg, szarray_bb);
4274 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4275 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4277 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4278 MONO_START_BB (cfg, end_bb);
4280 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4281 ins->type = STACK_I4;
4287 if (cmethod->name [0] != 'g')
4290 if (strcmp (cmethod->name, "get_Rank") == 0) {
4291 int dreg = alloc_ireg (cfg);
4292 int vtable_reg = alloc_preg (cfg);
4293 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4294 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4295 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4296 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4297 type_from_op (ins, NULL, NULL);
4300 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4301 int dreg = alloc_ireg (cfg);
4303 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4304 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4305 type_from_op (ins, NULL, NULL);
4310 } else if (cmethod->klass == runtime_helpers_class) {
4312 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4313 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4317 } else if (cmethod->klass == mono_defaults.thread_class) {
4318 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4319 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4320 MONO_ADD_INS (cfg->cbb, ins);
4322 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4323 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4324 MONO_ADD_INS (cfg->cbb, ins);
4327 } else if (cmethod->klass == mono_defaults.monitor_class) {
4328 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4329 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4332 if (COMPILE_LLVM (cfg)) {
4334 * Pass the argument normally, the LLVM backend will handle the
4335 * calling convention problems.
4337 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4339 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4340 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4341 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4342 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4345 return (MonoInst*)call;
4346 } else if (strcmp (cmethod->name, "Exit") == 0) {
4349 if (COMPILE_LLVM (cfg)) {
4350 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4352 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4353 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4354 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4355 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4358 return (MonoInst*)call;
4360 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4361 MonoMethod *fast_method = NULL;
4363 /* Avoid infinite recursion */
4364 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4365 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4366 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4369 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4370 strcmp (cmethod->name, "Exit") == 0)
4371 fast_method = mono_monitor_get_fast_path (cmethod);
4375 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4377 } else if (cmethod->klass->image == mono_defaults.corlib &&
4378 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4379 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4382 #if SIZEOF_REGISTER == 8
4383 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4384 /* 64 bit reads are already atomic */
4385 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4386 ins->dreg = mono_alloc_preg (cfg);
4387 ins->inst_basereg = args [0]->dreg;
4388 ins->inst_offset = 0;
4389 MONO_ADD_INS (cfg->cbb, ins);
4393 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4394 if (strcmp (cmethod->name, "Increment") == 0) {
4395 MonoInst *ins_iconst;
4398 if (fsig->params [0]->type == MONO_TYPE_I4)
4399 opcode = OP_ATOMIC_ADD_NEW_I4;
4400 #if SIZEOF_REGISTER == 8
4401 else if (fsig->params [0]->type == MONO_TYPE_I8)
4402 opcode = OP_ATOMIC_ADD_NEW_I8;
4405 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4406 ins_iconst->inst_c0 = 1;
4407 ins_iconst->dreg = mono_alloc_ireg (cfg);
4408 MONO_ADD_INS (cfg->cbb, ins_iconst);
4410 MONO_INST_NEW (cfg, ins, opcode);
4411 ins->dreg = mono_alloc_ireg (cfg);
4412 ins->inst_basereg = args [0]->dreg;
4413 ins->inst_offset = 0;
4414 ins->sreg2 = ins_iconst->dreg;
4415 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4416 MONO_ADD_INS (cfg->cbb, ins);
4418 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4419 MonoInst *ins_iconst;
4422 if (fsig->params [0]->type == MONO_TYPE_I4)
4423 opcode = OP_ATOMIC_ADD_NEW_I4;
4424 #if SIZEOF_REGISTER == 8
4425 else if (fsig->params [0]->type == MONO_TYPE_I8)
4426 opcode = OP_ATOMIC_ADD_NEW_I8;
4429 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4430 ins_iconst->inst_c0 = -1;
4431 ins_iconst->dreg = mono_alloc_ireg (cfg);
4432 MONO_ADD_INS (cfg->cbb, ins_iconst);
4434 MONO_INST_NEW (cfg, ins, opcode);
4435 ins->dreg = mono_alloc_ireg (cfg);
4436 ins->inst_basereg = args [0]->dreg;
4437 ins->inst_offset = 0;
4438 ins->sreg2 = ins_iconst->dreg;
4439 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4440 MONO_ADD_INS (cfg->cbb, ins);
4442 } else if (strcmp (cmethod->name, "Add") == 0) {
4445 if (fsig->params [0]->type == MONO_TYPE_I4)
4446 opcode = OP_ATOMIC_ADD_NEW_I4;
4447 #if SIZEOF_REGISTER == 8
4448 else if (fsig->params [0]->type == MONO_TYPE_I8)
4449 opcode = OP_ATOMIC_ADD_NEW_I8;
4453 MONO_INST_NEW (cfg, ins, opcode);
4454 ins->dreg = mono_alloc_ireg (cfg);
4455 ins->inst_basereg = args [0]->dreg;
4456 ins->inst_offset = 0;
4457 ins->sreg2 = args [1]->dreg;
4458 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4459 MONO_ADD_INS (cfg->cbb, ins);
4462 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4464 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4465 if (strcmp (cmethod->name, "Exchange") == 0) {
4467 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4469 if (fsig->params [0]->type == MONO_TYPE_I4)
4470 opcode = OP_ATOMIC_EXCHANGE_I4;
4471 #if SIZEOF_REGISTER == 8
4472 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4473 (fsig->params [0]->type == MONO_TYPE_I))
4474 opcode = OP_ATOMIC_EXCHANGE_I8;
4476 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4477 opcode = OP_ATOMIC_EXCHANGE_I4;
4482 MONO_INST_NEW (cfg, ins, opcode);
4483 ins->dreg = mono_alloc_ireg (cfg);
4484 ins->inst_basereg = args [0]->dreg;
4485 ins->inst_offset = 0;
4486 ins->sreg2 = args [1]->dreg;
4487 MONO_ADD_INS (cfg->cbb, ins);
4489 switch (fsig->params [0]->type) {
4491 ins->type = STACK_I4;
4495 ins->type = STACK_I8;
4497 case MONO_TYPE_OBJECT:
4498 ins->type = STACK_OBJ;
4501 g_assert_not_reached ();
4504 if (cfg->gen_write_barriers && is_ref)
4505 emit_write_barrier (cfg, args [0], args [1], -1);
4507 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4509 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4510 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4512 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4513 if (fsig->params [1]->type == MONO_TYPE_I4)
4515 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4516 size = sizeof (gpointer);
4517 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4520 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4521 ins->dreg = alloc_ireg (cfg);
4522 ins->sreg1 = args [0]->dreg;
4523 ins->sreg2 = args [1]->dreg;
4524 ins->sreg3 = args [2]->dreg;
4525 ins->type = STACK_I4;
4526 MONO_ADD_INS (cfg->cbb, ins);
4527 } else if (size == 8) {
4528 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4529 ins->dreg = alloc_ireg (cfg);
4530 ins->sreg1 = args [0]->dreg;
4531 ins->sreg2 = args [1]->dreg;
4532 ins->sreg3 = args [2]->dreg;
4533 ins->type = STACK_I8;
4534 MONO_ADD_INS (cfg->cbb, ins);
4536 /* g_assert_not_reached (); */
4538 if (cfg->gen_write_barriers && is_ref)
4539 emit_write_barrier (cfg, args [0], args [1], -1);
4541 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4545 } else if (cmethod->klass->image == mono_defaults.corlib) {
4546 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4547 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4548 if (should_insert_brekpoint (cfg->method))
4549 MONO_INST_NEW (cfg, ins, OP_BREAK);
4551 MONO_INST_NEW (cfg, ins, OP_NOP);
4552 MONO_ADD_INS (cfg->cbb, ins);
4555 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4556 && strcmp (cmethod->klass->name, "Environment") == 0) {
4558 EMIT_NEW_ICONST (cfg, ins, 1);
4560 EMIT_NEW_ICONST (cfg, ins, 0);
4564 } else if (cmethod->klass == mono_defaults.math_class) {
4566 * There is general branches code for Min/Max, but it does not work for
4568 * http://everything2.com/?node_id=1051618
4572 #ifdef MONO_ARCH_SIMD_INTRINSICS
4573 if (cfg->opt & MONO_OPT_SIMD) {
4574 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4580 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4584 * This entry point could be used later for arbitrary method
4587 inline static MonoInst*
4588 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4589 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4591 if (method->klass == mono_defaults.string_class) {
4592 /* managed string allocation support */
4593 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4594 MonoInst *iargs [2];
4595 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4596 MonoMethod *managed_alloc = NULL;
4598 g_assert (vtable); /*Should not fail since it System.String*/
4599 #ifndef MONO_CROSS_COMPILE
4600 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4604 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4605 iargs [1] = args [0];
4606 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4613 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4615 MonoInst *store, *temp;
4618 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4619 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4622 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4623 * would be different than the MonoInst's used to represent arguments, and
4624 * the ldelema implementation can't deal with that.
4625 * Solution: When ldelema is used on an inline argument, create a var for
4626 * it, emit ldelema on that var, and emit the saving code below in
4627 * inline_method () if needed.
4629 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4630 cfg->args [i] = temp;
4631 /* This uses cfg->args [i] which is set by the preceeding line */
4632 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4633 store->cil_code = sp [0]->cil_code;
4638 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4639 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4641 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4643 check_inline_called_method_name_limit (MonoMethod *called_method)
4646 static char *limit = NULL;
4648 if (limit == NULL) {
4649 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4651 if (limit_string != NULL)
4652 limit = limit_string;
4654 limit = (char *) "";
4657 if (limit [0] != '\0') {
4658 char *called_method_name = mono_method_full_name (called_method, TRUE);
4660 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4661 g_free (called_method_name);
4663 //return (strncmp_result <= 0);
4664 return (strncmp_result == 0);
4671 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4673 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4676 static char *limit = NULL;
4678 if (limit == NULL) {
4679 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4680 if (limit_string != NULL) {
4681 limit = limit_string;
4683 limit = (char *) "";
4687 if (limit [0] != '\0') {
4688 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4690 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4691 g_free (caller_method_name);
4693 //return (strncmp_result <= 0);
4694 return (strncmp_result == 0);
4702 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4703 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4705 MonoInst *ins, *rvar = NULL;
4706 MonoMethodHeader *cheader;
4707 MonoBasicBlock *ebblock, *sbblock;
4709 MonoMethod *prev_inlined_method;
4710 MonoInst **prev_locals, **prev_args;
4711 MonoType **prev_arg_types;
4712 guint prev_real_offset;
4713 GHashTable *prev_cbb_hash;
4714 MonoBasicBlock **prev_cil_offset_to_bb;
4715 MonoBasicBlock *prev_cbb;
4716 unsigned char* prev_cil_start;
4717 guint32 prev_cil_offset_to_bb_len;
4718 MonoMethod *prev_current_method;
4719 MonoGenericContext *prev_generic_context;
4720 gboolean ret_var_set, prev_ret_var_set;
4722 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4724 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4725 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4728 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4729 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4733 if (cfg->verbose_level > 2)
4734 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4736 if (!cmethod->inline_info) {
4737 mono_jit_stats.inlineable_methods++;
4738 cmethod->inline_info = 1;
4741 /* allocate local variables */
4742 cheader = mono_method_get_header (cmethod);
4744 if (cheader == NULL || mono_loader_get_last_error ()) {
4746 mono_metadata_free_mh (cheader);
4747 mono_loader_clear_error ();
4751 /*Must verify before creating locals as it can cause the JIT to assert.*/
4752 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4753 mono_metadata_free_mh (cheader);
4757 /* allocate space to store the return value */
4758 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4759 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4763 prev_locals = cfg->locals;
4764 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4765 for (i = 0; i < cheader->num_locals; ++i)
4766 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4768 /* allocate start and end blocks */
4769 /* This is needed so if the inline is aborted, we can clean up */
4770 NEW_BBLOCK (cfg, sbblock);
4771 sbblock->real_offset = real_offset;
4773 NEW_BBLOCK (cfg, ebblock);
4774 ebblock->block_num = cfg->num_bblocks++;
4775 ebblock->real_offset = real_offset;
4777 prev_args = cfg->args;
4778 prev_arg_types = cfg->arg_types;
4779 prev_inlined_method = cfg->inlined_method;
4780 cfg->inlined_method = cmethod;
4781 cfg->ret_var_set = FALSE;
4782 cfg->inline_depth ++;
4783 prev_real_offset = cfg->real_offset;
4784 prev_cbb_hash = cfg->cbb_hash;
4785 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4786 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4787 prev_cil_start = cfg->cil_start;
4788 prev_cbb = cfg->cbb;
4789 prev_current_method = cfg->current_method;
4790 prev_generic_context = cfg->generic_context;
4791 prev_ret_var_set = cfg->ret_var_set;
4793 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4795 ret_var_set = cfg->ret_var_set;
4797 cfg->inlined_method = prev_inlined_method;
4798 cfg->real_offset = prev_real_offset;
4799 cfg->cbb_hash = prev_cbb_hash;
4800 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4801 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4802 cfg->cil_start = prev_cil_start;
4803 cfg->locals = prev_locals;
4804 cfg->args = prev_args;
4805 cfg->arg_types = prev_arg_types;
4806 cfg->current_method = prev_current_method;
4807 cfg->generic_context = prev_generic_context;
4808 cfg->ret_var_set = prev_ret_var_set;
4809 cfg->inline_depth --;
4811 if ((costs >= 0 && costs < 60) || inline_allways) {
4812 if (cfg->verbose_level > 2)
4813 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4815 mono_jit_stats.inlined_methods++;
4817 /* always add some code to avoid block split failures */
4818 MONO_INST_NEW (cfg, ins, OP_NOP);
4819 MONO_ADD_INS (prev_cbb, ins);
4821 prev_cbb->next_bb = sbblock;
4822 link_bblock (cfg, prev_cbb, sbblock);
4825 * Get rid of the begin and end bblocks if possible to aid local
4828 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4830 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4831 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4833 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4834 MonoBasicBlock *prev = ebblock->in_bb [0];
4835 mono_merge_basic_blocks (cfg, prev, ebblock);
4837 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4838 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4839 cfg->cbb = prev_cbb;
4847 * If the inlined method contains only a throw, then the ret var is not
4848 * set, so set it to a dummy value.
4851 static double r8_0 = 0.0;
4853 switch (rvar->type) {
4855 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4858 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4863 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4866 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4867 ins->type = STACK_R8;
4868 ins->inst_p0 = (void*)&r8_0;
4869 ins->dreg = rvar->dreg;
4870 MONO_ADD_INS (cfg->cbb, ins);
4873 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4876 g_assert_not_reached ();
4880 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4883 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4886 if (cfg->verbose_level > 2)
4887 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4888 cfg->exception_type = MONO_EXCEPTION_NONE;
4889 mono_loader_clear_error ();
4891 /* This gets rid of the newly added bblocks */
4892 cfg->cbb = prev_cbb;
4894 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4899 * Some of these comments may well be out-of-date.
4900 * Design decisions: we do a single pass over the IL code (and we do bblock
4901 * splitting/merging in the few cases when it's required: a back jump to an IL
4902 * address that was not already seen as bblock starting point).
4903 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4904 * Complex operations are decomposed in simpler ones right away. We need to let the
4905 * arch-specific code peek and poke inside this process somehow (except when the
4906 * optimizations can take advantage of the full semantic info of coarse opcodes).
4907 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4908 * MonoInst->opcode initially is the IL opcode or some simplification of that
4909 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4910 * opcode with value bigger than OP_LAST.
4911 * At this point the IR can be handed over to an interpreter, a dumb code generator
4912 * or to the optimizing code generator that will translate it to SSA form.
4914 * Profiling directed optimizations.
4915 * We may compile by default with few or no optimizations and instrument the code
4916 * or the user may indicate what methods to optimize the most either in a config file
4917 * or through repeated runs where the compiler applies offline the optimizations to
4918 * each method and then decides if it was worth it.
4921 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4922 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4923 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4924 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4925 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4926 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4927 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4928 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
4930 /* offset from br.s -> br like opcodes */
4931 #define BIG_BRANCH_OFFSET 13
4934 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4936 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4938 return b == NULL || b == bb;
4942 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4944 unsigned char *ip = start;
4945 unsigned char *target;
4948 MonoBasicBlock *bblock;
4949 const MonoOpcode *opcode;
4952 cli_addr = ip - start;
4953 i = mono_opcode_value ((const guint8 **)&ip, end);
4956 opcode = &mono_opcodes [i];
4957 switch (opcode->argument) {
4958 case MonoInlineNone:
4961 case MonoInlineString:
4962 case MonoInlineType:
4963 case MonoInlineField:
4964 case MonoInlineMethod:
4967 case MonoShortInlineR:
4974 case MonoShortInlineVar:
4975 case MonoShortInlineI:
4978 case MonoShortInlineBrTarget:
4979 target = start + cli_addr + 2 + (signed char)ip [1];
4980 GET_BBLOCK (cfg, bblock, target);
4983 GET_BBLOCK (cfg, bblock, ip);
4985 case MonoInlineBrTarget:
4986 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4987 GET_BBLOCK (cfg, bblock, target);
4990 GET_BBLOCK (cfg, bblock, ip);
4992 case MonoInlineSwitch: {
4993 guint32 n = read32 (ip + 1);
4996 cli_addr += 5 + 4 * n;
4997 target = start + cli_addr;
4998 GET_BBLOCK (cfg, bblock, target);
5000 for (j = 0; j < n; ++j) {
5001 target = start + cli_addr + (gint32)read32 (ip);
5002 GET_BBLOCK (cfg, bblock, target);
5012 g_assert_not_reached ();
5015 if (i == CEE_THROW) {
5016 unsigned char *bb_start = ip - 1;
5018 /* Find the start of the bblock containing the throw */
5020 while ((bb_start >= start) && !bblock) {
5021 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5025 bblock->out_of_line = 1;
5034 static inline MonoMethod *
5035 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5039 if (m->wrapper_type != MONO_WRAPPER_NONE)
5040 return mono_method_get_wrapper_data (m, token);
5042 method = mono_get_method_full (m->klass->image, token, klass, context);
5047 static inline MonoMethod *
5048 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5050 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5052 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5058 static inline MonoClass*
5059 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5063 if (method->wrapper_type != MONO_WRAPPER_NONE)
5064 klass = mono_method_get_wrapper_data (method, token);
5066 klass = mono_class_get_full (method->klass->image, token, context);
5068 mono_class_init (klass);
5073 * Returns TRUE if the JIT should abort inlining because "callee"
5074 * is influenced by security attributes.
5077 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5081 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5085 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5086 if (result == MONO_JIT_SECURITY_OK)
5089 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5090 /* Generate code to throw a SecurityException before the actual call/link */
5091 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5094 NEW_ICONST (cfg, args [0], 4);
5095 NEW_METHODCONST (cfg, args [1], caller);
5096 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5097 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5098 /* don't hide previous results */
5099 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5100 cfg->exception_data = result;
5108 throw_exception (void)
5110 static MonoMethod *method = NULL;
5113 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5114 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5121 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5123 MonoMethod *thrower = throw_exception ();
5126 EMIT_NEW_PCONST (cfg, args [0], ex);
5127 mono_emit_method_call (cfg, thrower, args, NULL);
5131 * Return the original method is a wrapper is specified. We can only access
5132 * the custom attributes from the original method.
5135 get_original_method (MonoMethod *method)
5137 if (method->wrapper_type == MONO_WRAPPER_NONE)
5140 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5141 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5144 /* in other cases we need to find the original method */
5145 return mono_marshal_method_from_wrapper (method);
5149 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5150 MonoBasicBlock *bblock, unsigned char *ip)
5152 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5153 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5155 emit_throw_exception (cfg, ex);
5159 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5160 MonoBasicBlock *bblock, unsigned char *ip)
5162 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5163 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5165 emit_throw_exception (cfg, ex);
5169 * Check that the IL instructions at ip are the array initialization
5170 * sequence and return the pointer to the data and the size.
5173 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5176 * newarr[System.Int32]
5178 * ldtoken field valuetype ...
5179 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5181 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5182 guint32 token = read32 (ip + 7);
5183 guint32 field_token = read32 (ip + 2);
5184 guint32 field_index = field_token & 0xffffff;
5186 const char *data_ptr;
5188 MonoMethod *cmethod;
5189 MonoClass *dummy_class;
5190 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5196 *out_field_token = field_token;
5198 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5201 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5203 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5204 case MONO_TYPE_BOOLEAN:
5208 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5209 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5210 case MONO_TYPE_CHAR:
5220 return NULL; /* stupid ARM FP swapped format */
5230 if (size > mono_type_size (field->type, &dummy_align))
5233 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5234 if (!method->klass->image->dynamic) {
5235 field_index = read32 (ip + 2) & 0xffffff;
5236 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5237 data_ptr = mono_image_rva_map (method->klass->image, rva);
5238 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5239 /* for aot code we do the lookup on load */
5240 if (aot && data_ptr)
5241 return GUINT_TO_POINTER (rva);
5243 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5245 data_ptr = mono_field_get_data (field);
5253 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5255 char *method_fname = mono_method_full_name (method, TRUE);
5257 MonoMethodHeader *header = mono_method_get_header (method);
5259 if (header->code_size == 0)
5260 method_code = g_strdup ("method body is empty.");
5262 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5263 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5264 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5265 g_free (method_fname);
5266 g_free (method_code);
5267 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5271 set_exception_object (MonoCompile *cfg, MonoException *exception)
5273 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5274 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5275 cfg->exception_ptr = exception;
5279 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5283 if (cfg->generic_sharing_context)
5284 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5286 type = &klass->byval_arg;
5287 return MONO_TYPE_IS_REFERENCE (type);
5291 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5294 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5295 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5296 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5297 /* Optimize reg-reg moves away */
5299 * Can't optimize other opcodes, since sp[0] might point to
5300 * the last ins of a decomposed opcode.
5302 sp [0]->dreg = (cfg)->locals [n]->dreg;
5304 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5309 * ldloca inhibits many optimizations so try to get rid of it in common
5312 static inline unsigned char *
5313 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5322 local = read16 (ip + 2);
5326 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5327 gboolean skip = FALSE;
5329 /* From the INITOBJ case */
5330 token = read32 (ip + 2);
5331 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5332 CHECK_TYPELOAD (klass);
5333 if (generic_class_is_reference_type (cfg, klass)) {
5334 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5335 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5336 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5337 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5338 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5351 is_exception_class (MonoClass *class)
5354 if (class == mono_defaults.exception_class)
5356 class = class->parent;
5362 * mono_method_to_ir:
5364 * Translate the .net IL into linear IR.
5367 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5368 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5369 guint inline_offset, gboolean is_virtual_call)
5372 MonoInst *ins, **sp, **stack_start;
5373 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5374 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5375 MonoMethod *cmethod, *method_definition;
5376 MonoInst **arg_array;
5377 MonoMethodHeader *header;
5379 guint32 token, ins_flag;
5381 MonoClass *constrained_call = NULL;
5382 unsigned char *ip, *end, *target, *err_pos;
5383 static double r8_0 = 0.0;
5384 MonoMethodSignature *sig;
5385 MonoGenericContext *generic_context = NULL;
5386 MonoGenericContainer *generic_container = NULL;
5387 MonoType **param_types;
5388 int i, n, start_new_bblock, dreg;
5389 int num_calls = 0, inline_costs = 0;
5390 int breakpoint_id = 0;
5392 MonoBoolean security, pinvoke;
5393 MonoSecurityManager* secman = NULL;
5394 MonoDeclSecurityActions actions;
5395 GSList *class_inits = NULL;
5396 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5398 gboolean init_locals, seq_points, skip_dead_blocks;
5400 /* serialization and xdomain stuff may need access to private fields and methods */
5401 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5402 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5403 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5404 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5405 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5406 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5408 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5410 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5411 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5412 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5413 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5415 image = method->klass->image;
5416 header = mono_method_get_header (method);
5418 MonoLoaderError *error;
5420 if ((error = mono_loader_get_last_error ())) {
5421 mono_cfg_set_exception (cfg, error->exception_type);
5423 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5424 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5426 goto exception_exit;
5428 generic_container = mono_method_get_generic_container (method);
5429 sig = mono_method_signature (method);
5430 num_args = sig->hasthis + sig->param_count;
5431 ip = (unsigned char*)header->code;
5432 cfg->cil_start = ip;
5433 end = ip + header->code_size;
5434 mono_jit_stats.cil_code_size += header->code_size;
5435 init_locals = header->init_locals;
5437 seq_points = cfg->gen_seq_points && cfg->method == method;
5440 * Methods without init_locals set could cause asserts in various passes
5445 method_definition = method;
5446 while (method_definition->is_inflated) {
5447 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5448 method_definition = imethod->declaring;
5451 /* SkipVerification is not allowed if core-clr is enabled */
5452 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5454 dont_verify_stloc = TRUE;
5457 if (mono_debug_using_mono_debugger ())
5458 cfg->keep_cil_nops = TRUE;
5460 if (sig->is_inflated)
5461 generic_context = mono_method_get_context (method);
5462 else if (generic_container)
5463 generic_context = &generic_container->context;
5464 cfg->generic_context = generic_context;
5466 if (!cfg->generic_sharing_context)
5467 g_assert (!sig->has_type_parameters);
5469 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5470 g_assert (method->is_inflated);
5471 g_assert (mono_method_get_context (method)->method_inst);
5473 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5474 g_assert (sig->generic_param_count);
5476 if (cfg->method == method) {
5477 cfg->real_offset = 0;
5479 cfg->real_offset = inline_offset;
5482 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5483 cfg->cil_offset_to_bb_len = header->code_size;
5485 cfg->current_method = method;
5487 if (cfg->verbose_level > 2)
5488 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5490 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5492 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5493 for (n = 0; n < sig->param_count; ++n)
5494 param_types [n + sig->hasthis] = sig->params [n];
5495 cfg->arg_types = param_types;
5497 dont_inline = g_list_prepend (dont_inline, method);
5498 if (cfg->method == method) {
5500 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5501 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5504 NEW_BBLOCK (cfg, start_bblock);
5505 cfg->bb_entry = start_bblock;
5506 start_bblock->cil_code = NULL;
5507 start_bblock->cil_length = 0;
5510 NEW_BBLOCK (cfg, end_bblock);
5511 cfg->bb_exit = end_bblock;
5512 end_bblock->cil_code = NULL;
5513 end_bblock->cil_length = 0;
5514 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5515 g_assert (cfg->num_bblocks == 2);
5517 arg_array = cfg->args;
5519 if (header->num_clauses) {
5520 cfg->spvars = g_hash_table_new (NULL, NULL);
5521 cfg->exvars = g_hash_table_new (NULL, NULL);
5523 /* handle exception clauses */
5524 for (i = 0; i < header->num_clauses; ++i) {
5525 MonoBasicBlock *try_bb;
5526 MonoExceptionClause *clause = &header->clauses [i];
5527 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5528 try_bb->real_offset = clause->try_offset;
5529 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5530 tblock->real_offset = clause->handler_offset;
5531 tblock->flags |= BB_EXCEPTION_HANDLER;
5533 link_bblock (cfg, try_bb, tblock);
5535 if (*(ip + clause->handler_offset) == CEE_POP)
5536 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5538 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5539 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5540 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5541 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5542 MONO_ADD_INS (tblock, ins);
5544 /* todo: is a fault block unsafe to optimize? */
5545 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5546 tblock->flags |= BB_EXCEPTION_UNSAFE;
5550 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5552 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5554 /* catch and filter blocks get the exception object on the stack */
5555 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5556 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5557 MonoInst *dummy_use;
5559 /* mostly like handle_stack_args (), but just sets the input args */
5560 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5561 tblock->in_scount = 1;
5562 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5563 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5566 * Add a dummy use for the exvar so its liveness info will be
5570 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5572 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5573 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5574 tblock->flags |= BB_EXCEPTION_HANDLER;
5575 tblock->real_offset = clause->data.filter_offset;
5576 tblock->in_scount = 1;
5577 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5578 /* The filter block shares the exvar with the handler block */
5579 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5580 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5581 MONO_ADD_INS (tblock, ins);
5585 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5586 clause->data.catch_class &&
5587 cfg->generic_sharing_context &&
5588 mono_class_check_context_used (clause->data.catch_class)) {
5590 * In shared generic code with catch
5591 * clauses containing type variables
5592 * the exception handling code has to
5593 * be able to get to the rgctx.
5594 * Therefore we have to make sure that
5595 * the vtable/mrgctx argument (for
5596 * static or generic methods) or the
5597 * "this" argument (for non-static
5598 * methods) are live.
5600 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5601 mini_method_get_context (method)->method_inst ||
5602 method->klass->valuetype) {
5603 mono_get_vtable_var (cfg);
5605 MonoInst *dummy_use;
5607 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5612 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5613 cfg->cbb = start_bblock;
5614 cfg->args = arg_array;
5615 mono_save_args (cfg, sig, inline_args);
5618 /* FIRST CODE BLOCK */
5619 NEW_BBLOCK (cfg, bblock);
5620 bblock->cil_code = ip;
5624 ADD_BBLOCK (cfg, bblock);
5626 if (cfg->method == method) {
5627 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5628 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5629 MONO_INST_NEW (cfg, ins, OP_BREAK);
5630 MONO_ADD_INS (bblock, ins);
5634 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5635 secman = mono_security_manager_get_methods ();
5637 security = (secman && mono_method_has_declsec (method));
5638 /* at this point having security doesn't mean we have any code to generate */
5639 if (security && (cfg->method == method)) {
5640 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5641 * And we do not want to enter the next section (with allocation) if we
5642 * have nothing to generate */
5643 security = mono_declsec_get_demands (method, &actions);
5646 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5647 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5649 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5650 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5651 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5653 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5654 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5658 mono_custom_attrs_free (custom);
5661 custom = mono_custom_attrs_from_class (wrapped->klass);
5662 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5666 mono_custom_attrs_free (custom);
5669 /* not a P/Invoke after all */
5674 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5675 /* we use a separate basic block for the initialization code */
5676 NEW_BBLOCK (cfg, init_localsbb);
5677 cfg->bb_init = init_localsbb;
5678 init_localsbb->real_offset = cfg->real_offset;
5679 start_bblock->next_bb = init_localsbb;
5680 init_localsbb->next_bb = bblock;
5681 link_bblock (cfg, start_bblock, init_localsbb);
5682 link_bblock (cfg, init_localsbb, bblock);
5684 cfg->cbb = init_localsbb;
5686 start_bblock->next_bb = bblock;
5687 link_bblock (cfg, start_bblock, bblock);
5690 /* at this point we know, if security is TRUE, that some code needs to be generated */
5691 if (security && (cfg->method == method)) {
5694 mono_jit_stats.cas_demand_generation++;
5696 if (actions.demand.blob) {
5697 /* Add code for SecurityAction.Demand */
5698 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5699 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5700 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5701 mono_emit_method_call (cfg, secman->demand, args, NULL);
5703 if (actions.noncasdemand.blob) {
5704 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5705 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5706 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5707 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5708 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5709 mono_emit_method_call (cfg, secman->demand, args, NULL);
5711 if (actions.demandchoice.blob) {
5712 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5713 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5714 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5715 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5716 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5720 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5722 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5725 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5726 /* check if this is native code, e.g. an icall or a p/invoke */
5727 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5728 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5730 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5731 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5733 /* if this ia a native call then it can only be JITted from platform code */
5734 if ((icall || pinvk) && method->klass && method->klass->image) {
5735 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5736 MonoException *ex = icall ? mono_get_exception_security () :
5737 mono_get_exception_method_access ();
5738 emit_throw_exception (cfg, ex);
5745 if (header->code_size == 0)
5748 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5753 if (cfg->method == method)
5754 mono_debug_init_method (cfg, bblock, breakpoint_id);
5756 for (n = 0; n < header->num_locals; ++n) {
5757 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5762 /* We force the vtable variable here for all shared methods
5763 for the possibility that they might show up in a stack
5764 trace where their exact instantiation is needed. */
5765 if (cfg->generic_sharing_context && method == cfg->method) {
5766 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5767 mini_method_get_context (method)->method_inst ||
5768 method->klass->valuetype) {
5769 mono_get_vtable_var (cfg);
5771 /* FIXME: Is there a better way to do this?
5772 We need the variable live for the duration
5773 of the whole method. */
5774 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5778 /* add a check for this != NULL to inlined methods */
5779 if (is_virtual_call) {
5782 NEW_ARGLOAD (cfg, arg_ins, 0);
5783 MONO_ADD_INS (cfg->cbb, arg_ins);
5784 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5787 skip_dead_blocks = !dont_verify;
5788 if (skip_dead_blocks) {
5789 original_bb = bb = mono_basic_block_split (method, &error);
5790 if (!mono_error_ok (&error)) {
5791 mono_error_cleanup (&error);
5797 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5798 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5801 start_new_bblock = 0;
5804 if (cfg->method == method)
5805 cfg->real_offset = ip - header->code;
5807 cfg->real_offset = inline_offset;
5812 if (start_new_bblock) {
5813 bblock->cil_length = ip - bblock->cil_code;
5814 if (start_new_bblock == 2) {
5815 g_assert (ip == tblock->cil_code);
5817 GET_BBLOCK (cfg, tblock, ip);
5819 bblock->next_bb = tblock;
5822 start_new_bblock = 0;
5823 for (i = 0; i < bblock->in_scount; ++i) {
5824 if (cfg->verbose_level > 3)
5825 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5826 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5830 g_slist_free (class_inits);
5833 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5834 link_bblock (cfg, bblock, tblock);
5835 if (sp != stack_start) {
5836 handle_stack_args (cfg, stack_start, sp - stack_start);
5838 CHECK_UNVERIFIABLE (cfg);
5840 bblock->next_bb = tblock;
5843 for (i = 0; i < bblock->in_scount; ++i) {
5844 if (cfg->verbose_level > 3)
5845 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5846 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5849 g_slist_free (class_inits);
5854 if (skip_dead_blocks) {
5855 int ip_offset = ip - header->code;
5857 if (ip_offset == bb->end)
5861 int op_size = mono_opcode_size (ip, end);
5862 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5864 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5866 if (ip_offset + op_size == bb->end) {
5867 MONO_INST_NEW (cfg, ins, OP_NOP);
5868 MONO_ADD_INS (bblock, ins);
5869 start_new_bblock = 1;
5877 * Sequence points are points where the debugger can place a breakpoint.
5878 * Currently, we generate these automatically at points where the IL
5881 if (seq_points && sp == stack_start) {
5882 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5883 MONO_ADD_INS (cfg->cbb, ins);
5886 bblock->real_offset = cfg->real_offset;
5888 if ((cfg->method == method) && cfg->coverage_info) {
5889 guint32 cil_offset = ip - header->code;
5890 cfg->coverage_info->data [cil_offset].cil_code = ip;
5892 /* TODO: Use an increment here */
5893 #if defined(TARGET_X86)
5894 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5895 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5897 MONO_ADD_INS (cfg->cbb, ins);
5899 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5900 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5904 if (cfg->verbose_level > 3)
5905 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5909 if (cfg->keep_cil_nops)
5910 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5912 MONO_INST_NEW (cfg, ins, OP_NOP);
5914 MONO_ADD_INS (bblock, ins);
5917 if (should_insert_brekpoint (cfg->method))
5918 MONO_INST_NEW (cfg, ins, OP_BREAK);
5920 MONO_INST_NEW (cfg, ins, OP_NOP);
5922 MONO_ADD_INS (bblock, ins);
5928 CHECK_STACK_OVF (1);
5929 n = (*ip)-CEE_LDARG_0;
5931 EMIT_NEW_ARGLOAD (cfg, ins, n);
5939 CHECK_STACK_OVF (1);
5940 n = (*ip)-CEE_LDLOC_0;
5942 EMIT_NEW_LOCLOAD (cfg, ins, n);
5951 n = (*ip)-CEE_STLOC_0;
5954 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5956 emit_stloc_ir (cfg, sp, header, n);
5963 CHECK_STACK_OVF (1);
5966 EMIT_NEW_ARGLOAD (cfg, ins, n);
5972 CHECK_STACK_OVF (1);
5975 NEW_ARGLOADA (cfg, ins, n);
5976 MONO_ADD_INS (cfg->cbb, ins);
5986 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5988 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5993 CHECK_STACK_OVF (1);
5996 EMIT_NEW_LOCLOAD (cfg, ins, n);
6000 case CEE_LDLOCA_S: {
6001 unsigned char *tmp_ip;
6003 CHECK_STACK_OVF (1);
6004 CHECK_LOCAL (ip [1]);
6006 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6012 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6021 CHECK_LOCAL (ip [1]);
6022 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6024 emit_stloc_ir (cfg, sp, header, ip [1]);
6029 CHECK_STACK_OVF (1);
6030 EMIT_NEW_PCONST (cfg, ins, NULL);
6031 ins->type = STACK_OBJ;
6036 CHECK_STACK_OVF (1);
6037 EMIT_NEW_ICONST (cfg, ins, -1);
6050 CHECK_STACK_OVF (1);
6051 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6057 CHECK_STACK_OVF (1);
6059 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6065 CHECK_STACK_OVF (1);
6066 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6072 CHECK_STACK_OVF (1);
6073 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6074 ins->type = STACK_I8;
6075 ins->dreg = alloc_dreg (cfg, STACK_I8);
6077 ins->inst_l = (gint64)read64 (ip);
6078 MONO_ADD_INS (bblock, ins);
6084 gboolean use_aotconst = FALSE;
6086 #ifdef TARGET_POWERPC
6087 /* FIXME: Clean this up */
6088 if (cfg->compile_aot)
6089 use_aotconst = TRUE;
6092 /* FIXME: we should really allocate this only late in the compilation process */
6093 f = mono_domain_alloc (cfg->domain, sizeof (float));
6095 CHECK_STACK_OVF (1);
6101 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6103 dreg = alloc_freg (cfg);
6104 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6105 ins->type = STACK_R8;
6107 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6108 ins->type = STACK_R8;
6109 ins->dreg = alloc_dreg (cfg, STACK_R8);
6111 MONO_ADD_INS (bblock, ins);
6121 gboolean use_aotconst = FALSE;
6123 #ifdef TARGET_POWERPC
6124 /* FIXME: Clean this up */
6125 if (cfg->compile_aot)
6126 use_aotconst = TRUE;
6129 /* FIXME: we should really allocate this only late in the compilation process */
6130 d = mono_domain_alloc (cfg->domain, sizeof (double));
6132 CHECK_STACK_OVF (1);
6138 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6140 dreg = alloc_freg (cfg);
6141 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6142 ins->type = STACK_R8;
6144 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6145 ins->type = STACK_R8;
6146 ins->dreg = alloc_dreg (cfg, STACK_R8);
6148 MONO_ADD_INS (bblock, ins);
6157 MonoInst *temp, *store;
6159 CHECK_STACK_OVF (1);
6163 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6164 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6166 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6169 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6182 if (sp [0]->type == STACK_R8)
6183 /* we need to pop the value from the x86 FP stack */
6184 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6193 if (stack_start != sp)
6195 token = read32 (ip + 1);
6196 /* FIXME: check the signature matches */
6197 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6199 if (!cmethod || mono_loader_get_last_error ())
6202 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6203 GENERIC_SHARING_FAILURE (CEE_JMP);
6205 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6206 CHECK_CFG_EXCEPTION;
6208 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6210 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6213 /* Handle tail calls similarly to calls */
6214 n = fsig->param_count + fsig->hasthis;
6216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6217 call->method = cmethod;
6218 call->tail_call = TRUE;
6219 call->signature = mono_method_signature (cmethod);
6220 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6221 call->inst.inst_p0 = cmethod;
6222 for (i = 0; i < n; ++i)
6223 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6225 mono_arch_emit_call (cfg, call);
6226 MONO_ADD_INS (bblock, (MonoInst*)call);
6229 for (i = 0; i < num_args; ++i)
6230 /* Prevent arguments from being optimized away */
6231 arg_array [i]->flags |= MONO_INST_VOLATILE;
6233 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6234 ins = (MonoInst*)call;
6235 ins->inst_p0 = cmethod;
6236 MONO_ADD_INS (bblock, ins);
6240 start_new_bblock = 1;
6245 case CEE_CALLVIRT: {
6246 MonoInst *addr = NULL;
6247 MonoMethodSignature *fsig = NULL;
6249 int virtual = *ip == CEE_CALLVIRT;
6250 int calli = *ip == CEE_CALLI;
6251 gboolean pass_imt_from_rgctx = FALSE;
6252 MonoInst *imt_arg = NULL;
6253 gboolean pass_vtable = FALSE;
6254 gboolean pass_mrgctx = FALSE;
6255 MonoInst *vtable_arg = NULL;
6256 gboolean check_this = FALSE;
6257 gboolean supported_tail_call = FALSE;
6260 token = read32 (ip + 1);
6267 if (method->wrapper_type != MONO_WRAPPER_NONE)
6268 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6270 fsig = mono_metadata_parse_signature (image, token);
6272 n = fsig->param_count + fsig->hasthis;
6274 if (method->dynamic && fsig->pinvoke) {
6278 * This is a call through a function pointer using a pinvoke
6279 * signature. Have to create a wrapper and call that instead.
6280 * FIXME: This is very slow, need to create a wrapper at JIT time
6281 * instead based on the signature.
6283 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6284 EMIT_NEW_PCONST (cfg, args [1], fsig);
6286 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6289 MonoMethod *cil_method;
6291 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6292 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6293 cil_method = cmethod;
6294 } else if (constrained_call) {
6295 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6297 * This is needed since get_method_constrained can't find
6298 * the method in klass representing a type var.
6299 * The type var is guaranteed to be a reference type in this
6302 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6303 cil_method = cmethod;
6304 g_assert (!cmethod->klass->valuetype);
6306 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6309 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6310 cil_method = cmethod;
6313 if (!cmethod || mono_loader_get_last_error ())
6315 if (!dont_verify && !cfg->skip_visibility) {
6316 MonoMethod *target_method = cil_method;
6317 if (method->is_inflated) {
6318 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6320 if (!mono_method_can_access_method (method_definition, target_method) &&
6321 !mono_method_can_access_method (method, cil_method))
6322 METHOD_ACCESS_FAILURE;
6325 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6326 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6328 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6329 /* MS.NET seems to silently convert this to a callvirt */
6334 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6335 * converts to a callvirt.
6337 * tests/bug-515884.il is an example of this behavior
6339 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6340 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6341 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6345 if (!cmethod->klass->inited)
6346 if (!mono_class_init (cmethod->klass))
6349 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6350 mini_class_is_system_array (cmethod->klass)) {
6351 array_rank = cmethod->klass->rank;
6352 fsig = mono_method_signature (cmethod);
6354 fsig = mono_method_signature (cmethod);
6359 if (fsig->pinvoke) {
6360 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6361 check_for_pending_exc, FALSE);
6362 fsig = mono_method_signature (wrapper);
6363 } else if (constrained_call) {
6364 fsig = mono_method_signature (cmethod);
6366 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6370 mono_save_token_info (cfg, image, token, cil_method);
6372 n = fsig->param_count + fsig->hasthis;
6374 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6375 if (check_linkdemand (cfg, method, cmethod))
6377 CHECK_CFG_EXCEPTION;
6380 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6381 g_assert_not_reached ();
6384 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6387 if (!cfg->generic_sharing_context && cmethod)
6388 g_assert (!mono_method_check_context_used (cmethod));
6392 //g_assert (!virtual || fsig->hasthis);
6396 if (constrained_call) {
6398 * We have the `constrained.' prefix opcode.
6400 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6402 * The type parameter is instantiated as a valuetype,
6403 * but that type doesn't override the method we're
6404 * calling, so we need to box `this'.
6406 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6407 ins->klass = constrained_call;
6408 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6409 CHECK_CFG_EXCEPTION;
6410 } else if (!constrained_call->valuetype) {
6411 int dreg = alloc_preg (cfg);
6414 * The type parameter is instantiated as a reference
6415 * type. We have a managed pointer on the stack, so
6416 * we need to dereference it here.
6418 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6419 ins->type = STACK_OBJ;
6421 } else if (cmethod->klass->valuetype)
6423 constrained_call = NULL;
6426 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6430 * If the callee is a shared method, then its static cctor
6431 * might not get called after the call was patched.
6433 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6434 emit_generic_class_init (cfg, cmethod->klass);
6435 CHECK_TYPELOAD (cmethod->klass);
6438 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6439 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6440 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6441 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6442 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6445 * Pass vtable iff target method might
6446 * be shared, which means that sharing
6447 * is enabled for its class and its
6448 * context is sharable (and it's not a
6451 if (sharing_enabled && context_sharable &&
6452 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6456 if (cmethod && mini_method_get_context (cmethod) &&
6457 mini_method_get_context (cmethod)->method_inst) {
6458 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6459 MonoGenericContext *context = mini_method_get_context (cmethod);
6460 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6462 g_assert (!pass_vtable);
6464 if (sharing_enabled && context_sharable)
6468 if (cfg->generic_sharing_context && cmethod) {
6469 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6471 context_used = mono_method_check_context_used (cmethod);
6473 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6474 /* Generic method interface
6475 calls are resolved via a
6476 helper function and don't
6478 if (!cmethod_context || !cmethod_context->method_inst)
6479 pass_imt_from_rgctx = TRUE;
6483 * If a shared method calls another
6484 * shared method then the caller must
6485 * have a generic sharing context
6486 * because the magic trampoline
6487 * requires it. FIXME: We shouldn't
6488 * have to force the vtable/mrgctx
6489 * variable here. Instead there
6490 * should be a flag in the cfg to
6491 * request a generic sharing context.
6494 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6495 mono_get_vtable_var (cfg);
6500 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6502 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6504 CHECK_TYPELOAD (cmethod->klass);
6505 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6510 g_assert (!vtable_arg);
6512 if (!cfg->compile_aot) {
6514 * emit_get_rgctx_method () calls mono_class_vtable () so check
6515 * for type load errors before.
6517 mono_class_setup_vtable (cmethod->klass);
6518 CHECK_TYPELOAD (cmethod->klass);
6521 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6523 /* !marshalbyref is needed to properly handle generic methods + remoting */
6524 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6525 MONO_METHOD_IS_FINAL (cmethod)) &&
6526 !cmethod->klass->marshalbyref) {
6533 if (pass_imt_from_rgctx) {
6534 g_assert (!pass_vtable);
6537 imt_arg = emit_get_rgctx_method (cfg, context_used,
6538 cmethod, MONO_RGCTX_INFO_METHOD);
6542 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6544 /* Calling virtual generic methods */
6545 if (cmethod && virtual &&
6546 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6547 !(MONO_METHOD_IS_FINAL (cmethod) &&
6548 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6549 mono_method_signature (cmethod)->generic_param_count) {
6550 MonoInst *this_temp, *this_arg_temp, *store;
6551 MonoInst *iargs [4];
6553 g_assert (mono_method_signature (cmethod)->is_inflated);
6555 /* Prevent inlining of methods that contain indirect calls */
6558 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6559 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6560 g_assert (!imt_arg);
6562 g_assert (cmethod->is_inflated);
6563 imt_arg = emit_get_rgctx_method (cfg, context_used,
6564 cmethod, MONO_RGCTX_INFO_METHOD);
6565 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6569 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6570 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6571 MONO_ADD_INS (bblock, store);
6573 /* FIXME: This should be a managed pointer */
6574 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6576 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6577 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6578 cmethod, MONO_RGCTX_INFO_METHOD);
6579 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6580 addr = mono_emit_jit_icall (cfg,
6581 mono_helper_compile_generic_method, iargs);
6583 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6585 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6588 if (!MONO_TYPE_IS_VOID (fsig->ret))
6589 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6591 CHECK_CFG_EXCEPTION;
6598 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6599 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6601 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6605 /* FIXME: runtime generic context pointer for jumps? */
6606 /* FIXME: handle this for generic sharing eventually */
6607 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6610 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6613 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6614 /* Handle tail calls similarly to calls */
6615 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6617 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6618 call->tail_call = TRUE;
6619 call->method = cmethod;
6620 call->signature = mono_method_signature (cmethod);
6623 * We implement tail calls by storing the actual arguments into the
6624 * argument variables, then emitting a CEE_JMP.
6626 for (i = 0; i < n; ++i) {
6627 /* Prevent argument from being register allocated */
6628 arg_array [i]->flags |= MONO_INST_VOLATILE;
6629 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6633 ins = (MonoInst*)call;
6634 ins->inst_p0 = cmethod;
6635 ins->inst_p1 = arg_array [0];
6636 MONO_ADD_INS (bblock, ins);
6637 link_bblock (cfg, bblock, end_bblock);
6638 start_new_bblock = 1;
6640 CHECK_CFG_EXCEPTION;
6642 /* skip CEE_RET as well */
6648 /* Conversion to a JIT intrinsic */
6649 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6651 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6652 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6657 CHECK_CFG_EXCEPTION;
6665 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6666 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6667 mono_method_check_inlining (cfg, cmethod) &&
6668 !g_list_find (dont_inline, cmethod)) {
6670 gboolean allways = FALSE;
6672 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6673 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6674 /* Prevent inlining of methods that call wrappers */
6676 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6680 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6682 cfg->real_offset += 5;
6685 if (!MONO_TYPE_IS_VOID (fsig->ret))
6686 /* *sp is already set by inline_method */
6689 inline_costs += costs;
6695 inline_costs += 10 * num_calls++;
6697 /* Tail recursion elimination */
6698 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6699 gboolean has_vtargs = FALSE;
6702 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6705 /* keep it simple */
6706 for (i = fsig->param_count - 1; i >= 0; i--) {
6707 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6712 for (i = 0; i < n; ++i)
6713 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6714 MONO_INST_NEW (cfg, ins, OP_BR);
6715 MONO_ADD_INS (bblock, ins);
6716 tblock = start_bblock->out_bb [0];
6717 link_bblock (cfg, bblock, tblock);
6718 ins->inst_target_bb = tblock;
6719 start_new_bblock = 1;
6721 /* skip the CEE_RET, too */
6722 if (ip_in_bb (cfg, bblock, ip + 5))
6732 /* Generic sharing */
6733 /* FIXME: only do this for generic methods if
6734 they are not shared! */
6735 if (context_used && !imt_arg && !array_rank &&
6736 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6737 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6738 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6739 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6742 g_assert (cfg->generic_sharing_context && cmethod);
6746 * We are compiling a call to a
6747 * generic method from shared code,
6748 * which means that we have to look up
6749 * the method in the rgctx and do an
6752 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6755 /* Indirect calls */
6757 g_assert (!imt_arg);
6759 if (*ip == CEE_CALL)
6760 g_assert (context_used);
6761 else if (*ip == CEE_CALLI)
6762 g_assert (!vtable_arg);
6764 /* FIXME: what the hell is this??? */
6765 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6766 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6768 /* Prevent inlining of methods with indirect calls */
6773 int rgctx_reg = mono_alloc_preg (cfg);
6775 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6776 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6777 call = (MonoCallInst*)ins;
6778 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6780 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6782 * Instead of emitting an indirect call, emit a direct call
6783 * with the contents of the aotconst as the patch info.
6785 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6787 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6788 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6791 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6794 if (!MONO_TYPE_IS_VOID (fsig->ret))
6795 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6797 CHECK_CFG_EXCEPTION;
6808 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6809 if (sp [fsig->param_count]->type == STACK_OBJ) {
6810 MonoInst *iargs [2];
6813 iargs [1] = sp [fsig->param_count];
6815 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6818 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6819 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6820 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6821 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6826 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6827 if (!cmethod->klass->element_class->valuetype && !readonly)
6828 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6829 CHECK_TYPELOAD (cmethod->klass);
6832 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6835 g_assert_not_reached ();
6838 CHECK_CFG_EXCEPTION;
6845 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6847 if (!MONO_TYPE_IS_VOID (fsig->ret))
6848 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6850 CHECK_CFG_EXCEPTION;
6860 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6862 } else if (imt_arg) {
6863 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6865 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6868 if (!MONO_TYPE_IS_VOID (fsig->ret))
6869 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6871 CHECK_CFG_EXCEPTION;
6878 if (cfg->method != method) {
6879 /* return from inlined method */
6881 * If in_count == 0, that means the ret is unreachable due to
6882 * being preceeded by a throw. In that case, inline_method () will
6883 * handle setting the return value
6884 * (test case: test_0_inline_throw ()).
6886 if (return_var && cfg->cbb->in_count) {
6890 //g_assert (returnvar != -1);
6891 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6892 cfg->ret_var_set = TRUE;
6896 MonoType *ret_type = mono_method_signature (method)->ret;
6900 * Place a seq point here too even through the IL stack is not
6901 * empty, so a step over on
6904 * will work correctly.
6906 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6907 MONO_ADD_INS (cfg->cbb, ins);
6910 g_assert (!return_var);
6913 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6916 if (!cfg->vret_addr) {
6919 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6921 EMIT_NEW_RETLOADA (cfg, ret_addr);
6923 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6924 ins->klass = mono_class_from_mono_type (ret_type);
6927 #ifdef MONO_ARCH_SOFT_FLOAT
6928 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6929 MonoInst *iargs [1];
6933 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6934 mono_arch_emit_setret (cfg, method, conv);
6936 mono_arch_emit_setret (cfg, method, *sp);
6939 mono_arch_emit_setret (cfg, method, *sp);
6944 if (sp != stack_start)
6946 MONO_INST_NEW (cfg, ins, OP_BR);
6948 ins->inst_target_bb = end_bblock;
6949 MONO_ADD_INS (bblock, ins);
6950 link_bblock (cfg, bblock, end_bblock);
6951 start_new_bblock = 1;
6955 MONO_INST_NEW (cfg, ins, OP_BR);
6957 target = ip + 1 + (signed char)(*ip);
6959 GET_BBLOCK (cfg, tblock, target);
6960 link_bblock (cfg, bblock, tblock);
6961 ins->inst_target_bb = tblock;
6962 if (sp != stack_start) {
6963 handle_stack_args (cfg, stack_start, sp - stack_start);
6965 CHECK_UNVERIFIABLE (cfg);
6967 MONO_ADD_INS (bblock, ins);
6968 start_new_bblock = 1;
6969 inline_costs += BRANCH_COST;
6983 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6985 target = ip + 1 + *(signed char*)ip;
6991 inline_costs += BRANCH_COST;
6995 MONO_INST_NEW (cfg, ins, OP_BR);
6998 target = ip + 4 + (gint32)read32(ip);
7000 GET_BBLOCK (cfg, tblock, target);
7001 link_bblock (cfg, bblock, tblock);
7002 ins->inst_target_bb = tblock;
7003 if (sp != stack_start) {
7004 handle_stack_args (cfg, stack_start, sp - stack_start);
7006 CHECK_UNVERIFIABLE (cfg);
7009 MONO_ADD_INS (bblock, ins);
7011 start_new_bblock = 1;
7012 inline_costs += BRANCH_COST;
7019 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7020 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7021 guint32 opsize = is_short ? 1 : 4;
7023 CHECK_OPSIZE (opsize);
7025 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7028 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7033 GET_BBLOCK (cfg, tblock, target);
7034 link_bblock (cfg, bblock, tblock);
7035 GET_BBLOCK (cfg, tblock, ip);
7036 link_bblock (cfg, bblock, tblock);
7038 if (sp != stack_start) {
7039 handle_stack_args (cfg, stack_start, sp - stack_start);
7040 CHECK_UNVERIFIABLE (cfg);
7043 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7044 cmp->sreg1 = sp [0]->dreg;
7045 type_from_op (cmp, sp [0], NULL);
7048 #if SIZEOF_REGISTER == 4
7049 if (cmp->opcode == OP_LCOMPARE_IMM) {
7050 /* Convert it to OP_LCOMPARE */
7051 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7052 ins->type = STACK_I8;
7053 ins->dreg = alloc_dreg (cfg, STACK_I8);
7055 MONO_ADD_INS (bblock, ins);
7056 cmp->opcode = OP_LCOMPARE;
7057 cmp->sreg2 = ins->dreg;
7060 MONO_ADD_INS (bblock, cmp);
7062 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7063 type_from_op (ins, sp [0], NULL);
7064 MONO_ADD_INS (bblock, ins);
7065 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7066 GET_BBLOCK (cfg, tblock, target);
7067 ins->inst_true_bb = tblock;
7068 GET_BBLOCK (cfg, tblock, ip);
7069 ins->inst_false_bb = tblock;
7070 start_new_bblock = 2;
7073 inline_costs += BRANCH_COST;
7088 MONO_INST_NEW (cfg, ins, *ip);
7090 target = ip + 4 + (gint32)read32(ip);
7096 inline_costs += BRANCH_COST;
7100 MonoBasicBlock **targets;
7101 MonoBasicBlock *default_bblock;
7102 MonoJumpInfoBBTable *table;
7103 int offset_reg = alloc_preg (cfg);
7104 int target_reg = alloc_preg (cfg);
7105 int table_reg = alloc_preg (cfg);
7106 int sum_reg = alloc_preg (cfg);
7107 gboolean use_op_switch;
7111 n = read32 (ip + 1);
7114 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7118 CHECK_OPSIZE (n * sizeof (guint32));
7119 target = ip + n * sizeof (guint32);
7121 GET_BBLOCK (cfg, default_bblock, target);
7122 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7124 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7125 for (i = 0; i < n; ++i) {
7126 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7127 targets [i] = tblock;
7128 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7132 if (sp != stack_start) {
7134 * Link the current bb with the targets as well, so handle_stack_args
7135 * will set their in_stack correctly.
7137 link_bblock (cfg, bblock, default_bblock);
7138 for (i = 0; i < n; ++i)
7139 link_bblock (cfg, bblock, targets [i]);
7141 handle_stack_args (cfg, stack_start, sp - stack_start);
7143 CHECK_UNVERIFIABLE (cfg);
7146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7150 for (i = 0; i < n; ++i)
7151 link_bblock (cfg, bblock, targets [i]);
7153 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7154 table->table = targets;
7155 table->table_size = n;
7157 use_op_switch = FALSE;
7159 /* ARM implements SWITCH statements differently */
7160 /* FIXME: Make it use the generic implementation */
7161 if (!cfg->compile_aot)
7162 use_op_switch = TRUE;
7165 if (COMPILE_LLVM (cfg))
7166 use_op_switch = TRUE;
7168 cfg->cbb->has_jump_table = 1;
7170 if (use_op_switch) {
7171 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7172 ins->sreg1 = src1->dreg;
7173 ins->inst_p0 = table;
7174 ins->inst_many_bb = targets;
7175 ins->klass = GUINT_TO_POINTER (n);
7176 MONO_ADD_INS (cfg->cbb, ins);
7178 if (sizeof (gpointer) == 8)
7179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7181 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7183 #if SIZEOF_REGISTER == 8
7184 /* The upper word might not be zero, and we add it to a 64 bit address later */
7185 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7188 if (cfg->compile_aot) {
7189 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7191 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7192 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7193 ins->inst_p0 = table;
7194 ins->dreg = table_reg;
7195 MONO_ADD_INS (cfg->cbb, ins);
7198 /* FIXME: Use load_memindex */
7199 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7201 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7203 start_new_bblock = 1;
7204 inline_costs += (BRANCH_COST * 2);
7224 dreg = alloc_freg (cfg);
7227 dreg = alloc_lreg (cfg);
7230 dreg = alloc_preg (cfg);
7233 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7234 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7235 ins->flags |= ins_flag;
7237 MONO_ADD_INS (bblock, ins);
7252 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7253 ins->flags |= ins_flag;
7255 MONO_ADD_INS (bblock, ins);
7257 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7258 emit_write_barrier (cfg, sp [0], sp [1], -1);
7267 MONO_INST_NEW (cfg, ins, (*ip));
7269 ins->sreg1 = sp [0]->dreg;
7270 ins->sreg2 = sp [1]->dreg;
7271 type_from_op (ins, sp [0], sp [1]);
7273 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7275 /* Use the immediate opcodes if possible */
7276 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7277 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7278 if (imm_opcode != -1) {
7279 ins->opcode = imm_opcode;
7280 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7283 sp [1]->opcode = OP_NOP;
7287 MONO_ADD_INS ((cfg)->cbb, (ins));
7289 *sp++ = mono_decompose_opcode (cfg, ins);
7306 MONO_INST_NEW (cfg, ins, (*ip));
7308 ins->sreg1 = sp [0]->dreg;
7309 ins->sreg2 = sp [1]->dreg;
7310 type_from_op (ins, sp [0], sp [1]);
7312 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7313 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7315 /* FIXME: Pass opcode to is_inst_imm */
7317 /* Use the immediate opcodes if possible */
7318 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7321 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7322 if (imm_opcode != -1) {
7323 ins->opcode = imm_opcode;
7324 if (sp [1]->opcode == OP_I8CONST) {
7325 #if SIZEOF_REGISTER == 8
7326 ins->inst_imm = sp [1]->inst_l;
7328 ins->inst_ls_word = sp [1]->inst_ls_word;
7329 ins->inst_ms_word = sp [1]->inst_ms_word;
7333 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7336 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7337 if (sp [1]->next == NULL)
7338 sp [1]->opcode = OP_NOP;
7341 MONO_ADD_INS ((cfg)->cbb, (ins));
7343 *sp++ = mono_decompose_opcode (cfg, ins);
7356 case CEE_CONV_OVF_I8:
7357 case CEE_CONV_OVF_U8:
7361 /* Special case this earlier so we have long constants in the IR */
7362 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7363 int data = sp [-1]->inst_c0;
7364 sp [-1]->opcode = OP_I8CONST;
7365 sp [-1]->type = STACK_I8;
7366 #if SIZEOF_REGISTER == 8
7367 if ((*ip) == CEE_CONV_U8)
7368 sp [-1]->inst_c0 = (guint32)data;
7370 sp [-1]->inst_c0 = data;
7372 sp [-1]->inst_ls_word = data;
7373 if ((*ip) == CEE_CONV_U8)
7374 sp [-1]->inst_ms_word = 0;
7376 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7378 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7385 case CEE_CONV_OVF_I4:
7386 case CEE_CONV_OVF_I1:
7387 case CEE_CONV_OVF_I2:
7388 case CEE_CONV_OVF_I:
7389 case CEE_CONV_OVF_U:
7392 if (sp [-1]->type == STACK_R8) {
7393 ADD_UNOP (CEE_CONV_OVF_I8);
7400 case CEE_CONV_OVF_U1:
7401 case CEE_CONV_OVF_U2:
7402 case CEE_CONV_OVF_U4:
7405 if (sp [-1]->type == STACK_R8) {
7406 ADD_UNOP (CEE_CONV_OVF_U8);
7413 case CEE_CONV_OVF_I1_UN:
7414 case CEE_CONV_OVF_I2_UN:
7415 case CEE_CONV_OVF_I4_UN:
7416 case CEE_CONV_OVF_I8_UN:
7417 case CEE_CONV_OVF_U1_UN:
7418 case CEE_CONV_OVF_U2_UN:
7419 case CEE_CONV_OVF_U4_UN:
7420 case CEE_CONV_OVF_U8_UN:
7421 case CEE_CONV_OVF_I_UN:
7422 case CEE_CONV_OVF_U_UN:
7429 CHECK_CFG_EXCEPTION;
7433 case CEE_ADD_OVF_UN:
7435 case CEE_MUL_OVF_UN:
7437 case CEE_SUB_OVF_UN:
7445 token = read32 (ip + 1);
7446 klass = mini_get_class (method, token, generic_context);
7447 CHECK_TYPELOAD (klass);
7449 if (generic_class_is_reference_type (cfg, klass)) {
7450 MonoInst *store, *load;
7451 int dreg = alloc_preg (cfg);
7453 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7454 load->flags |= ins_flag;
7455 MONO_ADD_INS (cfg->cbb, load);
7457 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7458 store->flags |= ins_flag;
7459 MONO_ADD_INS (cfg->cbb, store);
7461 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7462 emit_write_barrier (cfg, sp [0], sp [1], -1);
7464 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7476 token = read32 (ip + 1);
7477 klass = mini_get_class (method, token, generic_context);
7478 CHECK_TYPELOAD (klass);
7480 /* Optimize the common ldobj+stloc combination */
7490 loc_index = ip [5] - CEE_STLOC_0;
7497 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7498 CHECK_LOCAL (loc_index);
7500 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7501 ins->dreg = cfg->locals [loc_index]->dreg;
7507 /* Optimize the ldobj+stobj combination */
7508 /* The reference case ends up being a load+store anyway */
7509 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7514 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7521 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7530 CHECK_STACK_OVF (1);
7532 n = read32 (ip + 1);
7534 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7535 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7536 ins->type = STACK_OBJ;
7539 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7540 MonoInst *iargs [1];
7542 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7543 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7545 if (cfg->opt & MONO_OPT_SHARED) {
7546 MonoInst *iargs [3];
7548 if (cfg->compile_aot) {
7549 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7551 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7552 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7553 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7554 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7555 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7557 if (bblock->out_of_line) {
7558 MonoInst *iargs [2];
7560 if (image == mono_defaults.corlib) {
7562 * Avoid relocations in AOT and save some space by using a
7563 * version of helper_ldstr specialized to mscorlib.
7565 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7566 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7568 /* Avoid creating the string object */
7569 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7570 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7571 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7575 if (cfg->compile_aot) {
7576 NEW_LDSTRCONST (cfg, ins, image, n);
7578 MONO_ADD_INS (bblock, ins);
7581 NEW_PCONST (cfg, ins, NULL);
7582 ins->type = STACK_OBJ;
7583 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7585 MONO_ADD_INS (bblock, ins);
7594 MonoInst *iargs [2];
7595 MonoMethodSignature *fsig;
7598 MonoInst *vtable_arg = NULL;
7601 token = read32 (ip + 1);
7602 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7603 if (!cmethod || mono_loader_get_last_error ())
7605 fsig = mono_method_get_signature (cmethod, image, token);
7609 mono_save_token_info (cfg, image, token, cmethod);
7611 if (!mono_class_init (cmethod->klass))
7614 if (cfg->generic_sharing_context)
7615 context_used = mono_method_check_context_used (cmethod);
7617 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7618 if (check_linkdemand (cfg, method, cmethod))
7620 CHECK_CFG_EXCEPTION;
7621 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7622 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7625 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7626 emit_generic_class_init (cfg, cmethod->klass);
7627 CHECK_TYPELOAD (cmethod->klass);
7630 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7631 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7632 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7633 mono_class_vtable (cfg->domain, cmethod->klass);
7634 CHECK_TYPELOAD (cmethod->klass);
7636 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7637 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7640 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7641 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7643 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7645 CHECK_TYPELOAD (cmethod->klass);
7646 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7651 n = fsig->param_count;
7655 * Generate smaller code for the common newobj <exception> instruction in
7656 * argument checking code.
7658 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7659 is_exception_class (cmethod->klass) && n <= 2 &&
7660 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7661 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7662 MonoInst *iargs [3];
7664 g_assert (!vtable_arg);
7668 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7671 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7675 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7680 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7683 g_assert_not_reached ();
7691 /* move the args to allow room for 'this' in the first position */
7697 /* check_call_signature () requires sp[0] to be set */
7698 this_ins.type = STACK_OBJ;
7700 if (check_call_signature (cfg, fsig, sp))
7705 if (mini_class_is_system_array (cmethod->klass)) {
7706 g_assert (!vtable_arg);
7708 *sp = emit_get_rgctx_method (cfg, context_used,
7709 cmethod, MONO_RGCTX_INFO_METHOD);
7711 /* Avoid varargs in the common case */
7712 if (fsig->param_count == 1)
7713 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7714 else if (fsig->param_count == 2)
7715 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7716 else if (fsig->param_count == 3)
7717 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7719 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7720 } else if (cmethod->string_ctor) {
7721 g_assert (!context_used);
7722 g_assert (!vtable_arg);
7723 /* we simply pass a null pointer */
7724 EMIT_NEW_PCONST (cfg, *sp, NULL);
7725 /* now call the string ctor */
7726 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7728 MonoInst* callvirt_this_arg = NULL;
7730 if (cmethod->klass->valuetype) {
7731 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7732 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7733 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7738 * The code generated by mini_emit_virtual_call () expects
7739 * iargs [0] to be a boxed instance, but luckily the vcall
7740 * will be transformed into a normal call there.
7742 } else if (context_used) {
7743 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7746 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7748 CHECK_TYPELOAD (cmethod->klass);
7751 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7752 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7753 * As a workaround, we call class cctors before allocating objects.
7755 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7756 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7757 if (cfg->verbose_level > 2)
7758 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7759 class_inits = g_slist_prepend (class_inits, vtable);
7762 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7765 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7768 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7770 /* Now call the actual ctor */
7771 /* Avoid virtual calls to ctors if possible */
7772 if (cmethod->klass->marshalbyref)
7773 callvirt_this_arg = sp [0];
7776 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7777 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7778 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7783 CHECK_CFG_EXCEPTION;
7788 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7789 mono_method_check_inlining (cfg, cmethod) &&
7790 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7791 !g_list_find (dont_inline, cmethod)) {
7794 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7795 cfg->real_offset += 5;
7798 inline_costs += costs - 5;
7801 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7803 } else if (context_used &&
7804 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7805 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7806 MonoInst *cmethod_addr;
7808 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7809 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7811 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7814 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7815 callvirt_this_arg, NULL, vtable_arg);
7819 if (alloc == NULL) {
7821 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7822 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7836 token = read32 (ip + 1);
7837 klass = mini_get_class (method, token, generic_context);
7838 CHECK_TYPELOAD (klass);
7839 if (sp [0]->type != STACK_OBJ)
7842 if (cfg->generic_sharing_context)
7843 context_used = mono_class_check_context_used (klass);
7845 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7852 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7854 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7858 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7859 MonoMethod *mono_castclass;
7860 MonoInst *iargs [1];
7863 mono_castclass = mono_marshal_get_castclass (klass);
7866 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7867 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7868 g_assert (costs > 0);
7871 cfg->real_offset += 5;
7876 inline_costs += costs;
7879 ins = handle_castclass (cfg, klass, *sp, context_used);
7880 CHECK_CFG_EXCEPTION;
7890 token = read32 (ip + 1);
7891 klass = mini_get_class (method, token, generic_context);
7892 CHECK_TYPELOAD (klass);
7893 if (sp [0]->type != STACK_OBJ)
7896 if (cfg->generic_sharing_context)
7897 context_used = mono_class_check_context_used (klass);
7899 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7906 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7908 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7912 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7913 MonoMethod *mono_isinst;
7914 MonoInst *iargs [1];
7917 mono_isinst = mono_marshal_get_isinst (klass);
7920 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7921 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7922 g_assert (costs > 0);
7925 cfg->real_offset += 5;
7930 inline_costs += costs;
7933 ins = handle_isinst (cfg, klass, *sp, context_used);
7934 CHECK_CFG_EXCEPTION;
7941 case CEE_UNBOX_ANY: {
7945 token = read32 (ip + 1);
7946 klass = mini_get_class (method, token, generic_context);
7947 CHECK_TYPELOAD (klass);
7949 mono_save_token_info (cfg, image, token, klass);
7951 if (cfg->generic_sharing_context)
7952 context_used = mono_class_check_context_used (klass);
7954 if (generic_class_is_reference_type (cfg, klass)) {
7955 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7956 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7957 MonoMethod *mono_castclass;
7958 MonoInst *iargs [1];
7961 mono_castclass = mono_marshal_get_castclass (klass);
7964 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7965 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7967 g_assert (costs > 0);
7970 cfg->real_offset += 5;
7974 inline_costs += costs;
7976 ins = handle_castclass (cfg, klass, *sp, context_used);
7977 CHECK_CFG_EXCEPTION;
7985 if (mono_class_is_nullable (klass)) {
7986 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7993 ins = handle_unbox (cfg, klass, sp, context_used);
7999 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8012 token = read32 (ip + 1);
8013 klass = mini_get_class (method, token, generic_context);
8014 CHECK_TYPELOAD (klass);
8016 mono_save_token_info (cfg, image, token, klass);
8018 if (cfg->generic_sharing_context)
8019 context_used = mono_class_check_context_used (klass);
8021 if (generic_class_is_reference_type (cfg, klass)) {
8027 if (klass == mono_defaults.void_class)
8029 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8031 /* frequent check in generic code: box (struct), brtrue */
8033 // FIXME: LLVM can't handle the inconsistent bb linking
8034 if (!mono_class_is_nullable (klass) &&
8035 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8036 (ip [5] == CEE_BRTRUE ||
8037 ip [5] == CEE_BRTRUE_S ||
8038 ip [5] == CEE_BRFALSE ||
8039 ip [5] == CEE_BRFALSE_S)) {
8040 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8042 MonoBasicBlock *true_bb, *false_bb;
8046 if (cfg->verbose_level > 3) {
8047 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8048 printf ("<box+brtrue opt>\n");
8056 target = ip + 1 + (signed char)(*ip);
8063 target = ip + 4 + (gint)(read32 (ip));
8067 g_assert_not_reached ();
8071 * We need to link both bblocks, since it is needed for handling stack
8072 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8073 * Branching to only one of them would lead to inconsistencies, so
8074 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8076 GET_BBLOCK (cfg, true_bb, target);
8077 GET_BBLOCK (cfg, false_bb, ip);
8079 mono_link_bblock (cfg, cfg->cbb, true_bb);
8080 mono_link_bblock (cfg, cfg->cbb, false_bb);
8082 if (sp != stack_start) {
8083 handle_stack_args (cfg, stack_start, sp - stack_start);
8085 CHECK_UNVERIFIABLE (cfg);
8088 if (COMPILE_LLVM (cfg)) {
8089 dreg = alloc_ireg (cfg);
8090 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8093 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8095 /* The JIT can't eliminate the iconst+compare */
8096 MONO_INST_NEW (cfg, ins, OP_BR);
8097 ins->inst_target_bb = is_true ? true_bb : false_bb;
8098 MONO_ADD_INS (cfg->cbb, ins);
8101 start_new_bblock = 1;
8105 *sp++ = handle_box (cfg, val, klass, context_used);
8107 CHECK_CFG_EXCEPTION;
8116 token = read32 (ip + 1);
8117 klass = mini_get_class (method, token, generic_context);
8118 CHECK_TYPELOAD (klass);
8120 mono_save_token_info (cfg, image, token, klass);
8122 if (cfg->generic_sharing_context)
8123 context_used = mono_class_check_context_used (klass);
8125 if (mono_class_is_nullable (klass)) {
8128 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8129 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8133 ins = handle_unbox (cfg, klass, sp, context_used);
8143 MonoClassField *field;
8147 if (*ip == CEE_STFLD) {
8154 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8156 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8159 token = read32 (ip + 1);
8160 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8161 field = mono_method_get_wrapper_data (method, token);
8162 klass = field->parent;
8165 field = mono_field_from_token (image, token, &klass, generic_context);
8169 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8170 FIELD_ACCESS_FAILURE;
8171 mono_class_init (klass);
8173 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8174 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8175 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8176 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8179 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8180 if (*ip == CEE_STFLD) {
8181 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8183 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8184 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8185 MonoInst *iargs [5];
8188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8189 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8190 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8194 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8195 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8196 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8197 g_assert (costs > 0);
8199 cfg->real_offset += 5;
8202 inline_costs += costs;
8204 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8209 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8212 if (sp [0]->opcode != OP_LDADDR)
8213 store->flags |= MONO_INST_FAULT;
8215 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8216 /* insert call to write barrier */
8220 dreg = alloc_preg (cfg);
8221 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8222 emit_write_barrier (cfg, ptr, sp [1], -1);
8225 store->flags |= ins_flag;
8232 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8233 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8234 MonoInst *iargs [4];
8237 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8238 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8239 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8240 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8241 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8242 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8244 g_assert (costs > 0);
8246 cfg->real_offset += 5;
8250 inline_costs += costs;
8252 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8256 if (sp [0]->type == STACK_VTYPE) {
8259 /* Have to compute the address of the variable */
8261 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8263 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8265 g_assert (var->klass == klass);
8267 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8271 if (*ip == CEE_LDFLDA) {
8272 if (sp [0]->type == STACK_OBJ) {
8273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8274 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8277 dreg = alloc_preg (cfg);
8279 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8280 ins->klass = mono_class_from_mono_type (field->type);
8281 ins->type = STACK_MP;
8286 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8288 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8289 load->flags |= ins_flag;
8290 if (sp [0]->opcode != OP_LDADDR)
8291 load->flags |= MONO_INST_FAULT;
8302 MonoClassField *field;
8303 gpointer addr = NULL;
8304 gboolean is_special_static;
8307 token = read32 (ip + 1);
8309 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8310 field = mono_method_get_wrapper_data (method, token);
8311 klass = field->parent;
8314 field = mono_field_from_token (image, token, &klass, generic_context);
8317 mono_class_init (klass);
8318 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8319 FIELD_ACCESS_FAILURE;
8321 /* if the class is Critical then transparent code cannot access it's fields */
8322 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8323 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8326 * We can only support shared generic static
8327 * field access on architectures where the
8328 * trampoline code has been extended to handle
8329 * the generic class init.
8331 #ifndef MONO_ARCH_VTABLE_REG
8332 GENERIC_SHARING_FAILURE (*ip);
8335 if (cfg->generic_sharing_context)
8336 context_used = mono_class_check_context_used (klass);
8338 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8340 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8341 * to be called here.
8343 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8344 mono_class_vtable (cfg->domain, klass);
8345 CHECK_TYPELOAD (klass);
8347 mono_domain_lock (cfg->domain);
8348 if (cfg->domain->special_static_fields)
8349 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8350 mono_domain_unlock (cfg->domain);
8352 is_special_static = mono_class_field_is_special_static (field);
8354 /* Generate IR to compute the field address */
8355 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8357 * Fast access to TLS data
8358 * Inline version of get_thread_static_data () in
8362 int idx, static_data_reg, array_reg, dreg;
8363 MonoInst *thread_ins;
8365 // offset &= 0x7fffffff;
8366 // idx = (offset >> 24) - 1;
8367 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8369 thread_ins = mono_get_thread_intrinsic (cfg);
8370 MONO_ADD_INS (cfg->cbb, thread_ins);
8371 static_data_reg = alloc_ireg (cfg);
8372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8374 if (cfg->compile_aot) {
8375 int offset_reg, offset2_reg, idx_reg;
8377 /* For TLS variables, this will return the TLS offset */
8378 EMIT_NEW_SFLDACONST (cfg, ins, field);
8379 offset_reg = ins->dreg;
8380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8381 idx_reg = alloc_ireg (cfg);
8382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8385 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8386 array_reg = alloc_ireg (cfg);
8387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8388 offset2_reg = alloc_ireg (cfg);
8389 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8390 dreg = alloc_ireg (cfg);
8391 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8393 offset = (gsize)addr & 0x7fffffff;
8394 idx = (offset >> 24) - 1;
8396 array_reg = alloc_ireg (cfg);
8397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8398 dreg = alloc_ireg (cfg);
8399 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8401 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8402 (cfg->compile_aot && is_special_static) ||
8403 (context_used && is_special_static)) {
8404 MonoInst *iargs [2];
8406 g_assert (field->parent);
8407 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8409 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8410 field, MONO_RGCTX_INFO_CLASS_FIELD);
8412 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8414 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8415 } else if (context_used) {
8416 MonoInst *static_data;
8419 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8420 method->klass->name_space, method->klass->name, method->name,
8421 depth, field->offset);
8424 if (mono_class_needs_cctor_run (klass, method))
8425 emit_generic_class_init (cfg, klass);
8428 * The pointer we're computing here is
8430 * super_info.static_data + field->offset
8432 static_data = emit_get_rgctx_klass (cfg, context_used,
8433 klass, MONO_RGCTX_INFO_STATIC_DATA);
8435 if (field->offset == 0) {
8438 int addr_reg = mono_alloc_preg (cfg);
8439 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8441 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8442 MonoInst *iargs [2];
8444 g_assert (field->parent);
8445 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8446 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8447 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8449 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8451 CHECK_TYPELOAD (klass);
8453 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8454 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8455 if (cfg->verbose_level > 2)
8456 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8457 class_inits = g_slist_prepend (class_inits, vtable);
8459 if (cfg->run_cctors) {
8461 /* This makes so that inline cannot trigger */
8462 /* .cctors: too many apps depend on them */
8463 /* running with a specific order... */
8464 if (! vtable->initialized)
8466 ex = mono_runtime_class_init_full (vtable, FALSE);
8468 set_exception_object (cfg, ex);
8469 goto exception_exit;
8473 addr = (char*)vtable->data + field->offset;
8475 if (cfg->compile_aot)
8476 EMIT_NEW_SFLDACONST (cfg, ins, field);
8478 EMIT_NEW_PCONST (cfg, ins, addr);
8480 MonoInst *iargs [1];
8481 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8482 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8486 /* Generate IR to do the actual load/store operation */
8488 if (*ip == CEE_LDSFLDA) {
8489 ins->klass = mono_class_from_mono_type (field->type);
8490 ins->type = STACK_PTR;
8492 } else if (*ip == CEE_STSFLD) {
8497 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8498 store->flags |= ins_flag;
8500 gboolean is_const = FALSE;
8501 MonoVTable *vtable = NULL;
8503 if (!context_used) {
8504 vtable = mono_class_vtable (cfg->domain, klass);
8505 CHECK_TYPELOAD (klass);
8507 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8508 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8509 gpointer addr = (char*)vtable->data + field->offset;
8510 int ro_type = field->type->type;
8511 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8512 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8514 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8517 case MONO_TYPE_BOOLEAN:
8519 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8523 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8526 case MONO_TYPE_CHAR:
8528 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8532 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8537 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8541 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8547 case MONO_TYPE_FNPTR:
8548 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8549 type_to_eval_stack_type ((cfg), field->type, *sp);
8552 case MONO_TYPE_STRING:
8553 case MONO_TYPE_OBJECT:
8554 case MONO_TYPE_CLASS:
8555 case MONO_TYPE_SZARRAY:
8556 case MONO_TYPE_ARRAY:
8557 if (!mono_gc_is_moving ()) {
8558 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8559 type_to_eval_stack_type ((cfg), field->type, *sp);
8567 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8572 case MONO_TYPE_VALUETYPE:
8582 CHECK_STACK_OVF (1);
8584 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8585 load->flags |= ins_flag;
8598 token = read32 (ip + 1);
8599 klass = mini_get_class (method, token, generic_context);
8600 CHECK_TYPELOAD (klass);
8601 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8602 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8603 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8604 generic_class_is_reference_type (cfg, klass)) {
8605 /* insert call to write barrier */
8606 emit_write_barrier (cfg, sp [0], sp [1], -1);
8618 const char *data_ptr;
8620 guint32 field_token;
8626 token = read32 (ip + 1);
8628 klass = mini_get_class (method, token, generic_context);
8629 CHECK_TYPELOAD (klass);
8631 if (cfg->generic_sharing_context)
8632 context_used = mono_class_check_context_used (klass);
8634 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8635 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8636 ins->sreg1 = sp [0]->dreg;
8637 ins->type = STACK_I4;
8638 ins->dreg = alloc_ireg (cfg);
8639 MONO_ADD_INS (cfg->cbb, ins);
8640 *sp = mono_decompose_opcode (cfg, ins);
8645 MonoClass *array_class = mono_array_class_get (klass, 1);
8646 /* FIXME: we cannot get a managed
8647 allocator because we can't get the
8648 open generic class's vtable. We
8649 have the same problem in
8650 handle_alloc(). This
8651 needs to be solved so that we can
8652 have managed allocs of shared
8655 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8656 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8658 MonoMethod *managed_alloc = NULL;
8660 /* FIXME: Decompose later to help abcrem */
8663 args [0] = emit_get_rgctx_klass (cfg, context_used,
8664 array_class, MONO_RGCTX_INFO_VTABLE);
8669 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8671 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8673 if (cfg->opt & MONO_OPT_SHARED) {
8674 /* Decompose now to avoid problems with references to the domainvar */
8675 MonoInst *iargs [3];
8677 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8678 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8681 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8683 /* Decompose later since it is needed by abcrem */
8684 MonoClass *array_type = mono_array_class_get (klass, 1);
8685 mono_class_vtable (cfg->domain, array_type);
8686 CHECK_TYPELOAD (array_type);
8688 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8689 ins->dreg = alloc_preg (cfg);
8690 ins->sreg1 = sp [0]->dreg;
8691 ins->inst_newa_class = klass;
8692 ins->type = STACK_OBJ;
8694 MONO_ADD_INS (cfg->cbb, ins);
8695 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8696 cfg->cbb->has_array_access = TRUE;
8698 /* Needed so mono_emit_load_get_addr () gets called */
8699 mono_get_got_var (cfg);
8709 * we inline/optimize the initialization sequence if possible.
8710 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8711 * for small sizes open code the memcpy
8712 * ensure the rva field is big enough
8714 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8715 MonoMethod *memcpy_method = get_memcpy_method ();
8716 MonoInst *iargs [3];
8717 int add_reg = alloc_preg (cfg);
8719 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8720 if (cfg->compile_aot) {
8721 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8723 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8725 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8726 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8735 if (sp [0]->type != STACK_OBJ)
8738 dreg = alloc_preg (cfg);
8739 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8740 ins->dreg = alloc_preg (cfg);
8741 ins->sreg1 = sp [0]->dreg;
8742 ins->type = STACK_I4;
8743 /* This flag will be inherited by the decomposition */
8744 ins->flags |= MONO_INST_FAULT;
8745 MONO_ADD_INS (cfg->cbb, ins);
8746 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8747 cfg->cbb->has_array_access = TRUE;
8755 if (sp [0]->type != STACK_OBJ)
8758 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8760 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8761 CHECK_TYPELOAD (klass);
8762 /* we need to make sure that this array is exactly the type it needs
8763 * to be for correctness. the wrappers are lax with their usage
8764 * so we need to ignore them here
8766 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8767 MonoClass *array_class = mono_array_class_get (klass, 1);
8768 mini_emit_check_array_type (cfg, sp [0], array_class);
8769 CHECK_TYPELOAD (array_class);
8773 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8788 case CEE_LDELEM_REF: {
8794 if (*ip == CEE_LDELEM) {
8796 token = read32 (ip + 1);
8797 klass = mini_get_class (method, token, generic_context);
8798 CHECK_TYPELOAD (klass);
8799 mono_class_init (klass);
8802 klass = array_access_to_klass (*ip);
8804 if (sp [0]->type != STACK_OBJ)
8807 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8809 if (sp [1]->opcode == OP_ICONST) {
8810 int array_reg = sp [0]->dreg;
8811 int index_reg = sp [1]->dreg;
8812 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8814 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8815 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8817 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8818 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8821 if (*ip == CEE_LDELEM)
8834 case CEE_STELEM_REF:
8841 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8843 if (*ip == CEE_STELEM) {
8845 token = read32 (ip + 1);
8846 klass = mini_get_class (method, token, generic_context);
8847 CHECK_TYPELOAD (klass);
8848 mono_class_init (klass);
8851 klass = array_access_to_klass (*ip);
8853 if (sp [0]->type != STACK_OBJ)
8856 /* storing a NULL doesn't need any of the complex checks in stelemref */
8857 if (generic_class_is_reference_type (cfg, klass) &&
8858 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8859 MonoMethod* helper = mono_marshal_get_stelemref ();
8860 MonoInst *iargs [3];
8862 if (sp [0]->type != STACK_OBJ)
8864 if (sp [2]->type != STACK_OBJ)
8871 mono_emit_method_call (cfg, helper, iargs, NULL);
8873 if (sp [1]->opcode == OP_ICONST) {
8874 int array_reg = sp [0]->dreg;
8875 int index_reg = sp [1]->dreg;
8876 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8878 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8879 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8881 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8882 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8886 if (*ip == CEE_STELEM)
8893 case CEE_CKFINITE: {
8897 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8898 ins->sreg1 = sp [0]->dreg;
8899 ins->dreg = alloc_freg (cfg);
8900 ins->type = STACK_R8;
8901 MONO_ADD_INS (bblock, ins);
8903 *sp++ = mono_decompose_opcode (cfg, ins);
8908 case CEE_REFANYVAL: {
8909 MonoInst *src_var, *src;
8911 int klass_reg = alloc_preg (cfg);
8912 int dreg = alloc_preg (cfg);
8915 MONO_INST_NEW (cfg, ins, *ip);
8918 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8919 CHECK_TYPELOAD (klass);
8920 mono_class_init (klass);
8922 if (cfg->generic_sharing_context)
8923 context_used = mono_class_check_context_used (klass);
8926 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8928 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8929 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8933 MonoInst *klass_ins;
8935 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8936 klass, MONO_RGCTX_INFO_KLASS);
8939 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8940 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8942 mini_emit_class_check (cfg, klass_reg, klass);
8944 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8945 ins->type = STACK_MP;
8950 case CEE_MKREFANY: {
8951 MonoInst *loc, *addr;
8954 MONO_INST_NEW (cfg, ins, *ip);
8957 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8958 CHECK_TYPELOAD (klass);
8959 mono_class_init (klass);
8961 if (cfg->generic_sharing_context)
8962 context_used = mono_class_check_context_used (klass);
8964 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8965 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8968 MonoInst *const_ins;
8969 int type_reg = alloc_preg (cfg);
8971 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8972 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8973 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8974 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8975 } else if (cfg->compile_aot) {
8976 int const_reg = alloc_preg (cfg);
8977 int type_reg = alloc_preg (cfg);
8979 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8984 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8985 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8987 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8989 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8990 ins->type = STACK_VTYPE;
8991 ins->klass = mono_defaults.typed_reference_class;
8998 MonoClass *handle_class;
9000 CHECK_STACK_OVF (1);
9003 n = read32 (ip + 1);
9005 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9006 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9007 handle = mono_method_get_wrapper_data (method, n);
9008 handle_class = mono_method_get_wrapper_data (method, n + 1);
9009 if (handle_class == mono_defaults.typehandle_class)
9010 handle = &((MonoClass*)handle)->byval_arg;
9013 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9017 mono_class_init (handle_class);
9018 if (cfg->generic_sharing_context) {
9019 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9020 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9021 /* This case handles ldtoken
9022 of an open type, like for
9025 } else if (handle_class == mono_defaults.typehandle_class) {
9026 /* If we get a MONO_TYPE_CLASS
9027 then we need to provide the
9029 instantiation of it. */
9030 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9033 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9034 } else if (handle_class == mono_defaults.fieldhandle_class)
9035 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9036 else if (handle_class == mono_defaults.methodhandle_class)
9037 context_used = mono_method_check_context_used (handle);
9039 g_assert_not_reached ();
9042 if ((cfg->opt & MONO_OPT_SHARED) &&
9043 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9044 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9045 MonoInst *addr, *vtvar, *iargs [3];
9046 int method_context_used;
9048 if (cfg->generic_sharing_context)
9049 method_context_used = mono_method_check_context_used (method);
9051 method_context_used = 0;
9053 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9055 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9056 EMIT_NEW_ICONST (cfg, iargs [1], n);
9057 if (method_context_used) {
9058 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9059 method, MONO_RGCTX_INFO_METHOD);
9060 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9062 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9063 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9065 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9067 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9069 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9071 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9072 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9073 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9074 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9075 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9076 MonoClass *tclass = mono_class_from_mono_type (handle);
9078 mono_class_init (tclass);
9080 ins = emit_get_rgctx_klass (cfg, context_used,
9081 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9082 } else if (cfg->compile_aot) {
9083 if (method->wrapper_type) {
9084 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9085 /* Special case for static synchronized wrappers */
9086 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9088 /* FIXME: n is not a normal token */
9089 cfg->disable_aot = TRUE;
9090 EMIT_NEW_PCONST (cfg, ins, NULL);
9093 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9096 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9098 ins->type = STACK_OBJ;
9099 ins->klass = cmethod->klass;
9102 MonoInst *addr, *vtvar;
9104 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9107 if (handle_class == mono_defaults.typehandle_class) {
9108 ins = emit_get_rgctx_klass (cfg, context_used,
9109 mono_class_from_mono_type (handle),
9110 MONO_RGCTX_INFO_TYPE);
9111 } else if (handle_class == mono_defaults.methodhandle_class) {
9112 ins = emit_get_rgctx_method (cfg, context_used,
9113 handle, MONO_RGCTX_INFO_METHOD);
9114 } else if (handle_class == mono_defaults.fieldhandle_class) {
9115 ins = emit_get_rgctx_field (cfg, context_used,
9116 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9118 g_assert_not_reached ();
9120 } else if (cfg->compile_aot) {
9121 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9123 EMIT_NEW_PCONST (cfg, ins, handle);
9125 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9126 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9127 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9137 MONO_INST_NEW (cfg, ins, OP_THROW);
9139 ins->sreg1 = sp [0]->dreg;
9141 bblock->out_of_line = TRUE;
9142 MONO_ADD_INS (bblock, ins);
9143 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9144 MONO_ADD_INS (bblock, ins);
9147 link_bblock (cfg, bblock, end_bblock);
9148 start_new_bblock = 1;
9150 case CEE_ENDFINALLY:
9151 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9152 MONO_ADD_INS (bblock, ins);
9154 start_new_bblock = 1;
9157 * Control will leave the method so empty the stack, otherwise
9158 * the next basic block will start with a nonempty stack.
9160 while (sp != stack_start) {
9168 if (*ip == CEE_LEAVE) {
9170 target = ip + 5 + (gint32)read32(ip + 1);
9173 target = ip + 2 + (signed char)(ip [1]);
9176 /* empty the stack */
9177 while (sp != stack_start) {
9182 * If this leave statement is in a catch block, check for a
9183 * pending exception, and rethrow it if necessary.
9184 * We avoid doing this in runtime invoke wrappers, since those are called
9185 * by native code which excepts the wrapper to catch all exceptions.
9187 for (i = 0; i < header->num_clauses; ++i) {
9188 MonoExceptionClause *clause = &header->clauses [i];
9191 * Use <= in the final comparison to handle clauses with multiple
9192 * leave statements, like in bug #78024.
9193 * The ordering of the exception clauses guarantees that we find the
9196 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9198 MonoBasicBlock *dont_throw;
9203 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9206 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9208 NEW_BBLOCK (cfg, dont_throw);
9211 * Currently, we allways rethrow the abort exception, despite the
9212 * fact that this is not correct. See thread6.cs for an example.
9213 * But propagating the abort exception is more important than
9214 * getting the sematics right.
9216 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9217 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9218 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9220 MONO_START_BB (cfg, dont_throw);
9225 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9227 MonoExceptionClause *clause;
9229 for (tmp = handlers; tmp; tmp = tmp->next) {
9231 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9233 link_bblock (cfg, bblock, tblock);
9234 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9235 ins->inst_target_bb = tblock;
9236 ins->inst_eh_block = clause;
9237 MONO_ADD_INS (bblock, ins);
9238 bblock->has_call_handler = 1;
9239 if (COMPILE_LLVM (cfg)) {
9240 MonoBasicBlock *target_bb;
9243 * Link the finally bblock with the target, since it will
9244 * conceptually branch there.
9245 * FIXME: Have to link the bblock containing the endfinally.
9247 GET_BBLOCK (cfg, target_bb, target);
9248 link_bblock (cfg, tblock, target_bb);
9251 g_list_free (handlers);
9254 MONO_INST_NEW (cfg, ins, OP_BR);
9255 MONO_ADD_INS (bblock, ins);
9256 GET_BBLOCK (cfg, tblock, target);
9257 link_bblock (cfg, bblock, tblock);
9258 ins->inst_target_bb = tblock;
9259 start_new_bblock = 1;
9261 if (*ip == CEE_LEAVE)
9270 * Mono specific opcodes
9272 case MONO_CUSTOM_PREFIX: {
9274 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9278 case CEE_MONO_ICALL: {
9280 MonoJitICallInfo *info;
9282 token = read32 (ip + 2);
9283 func = mono_method_get_wrapper_data (method, token);
9284 info = mono_find_jit_icall_by_addr (func);
9287 CHECK_STACK (info->sig->param_count);
9288 sp -= info->sig->param_count;
9290 ins = mono_emit_jit_icall (cfg, info->func, sp);
9291 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9295 inline_costs += 10 * num_calls++;
9299 case CEE_MONO_LDPTR: {
9302 CHECK_STACK_OVF (1);
9304 token = read32 (ip + 2);
9306 ptr = mono_method_get_wrapper_data (method, token);
9307 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9308 MonoJitICallInfo *callinfo;
9309 const char *icall_name;
9311 icall_name = method->name + strlen ("__icall_wrapper_");
9312 g_assert (icall_name);
9313 callinfo = mono_find_jit_icall_by_name (icall_name);
9314 g_assert (callinfo);
9316 if (ptr == callinfo->func) {
9317 /* Will be transformed into an AOTCONST later */
9318 EMIT_NEW_PCONST (cfg, ins, ptr);
9324 /* FIXME: Generalize this */
9325 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9326 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9331 EMIT_NEW_PCONST (cfg, ins, ptr);
9334 inline_costs += 10 * num_calls++;
9335 /* Can't embed random pointers into AOT code */
9336 cfg->disable_aot = 1;
9339 case CEE_MONO_ICALL_ADDR: {
9340 MonoMethod *cmethod;
9343 CHECK_STACK_OVF (1);
9345 token = read32 (ip + 2);
9347 cmethod = mono_method_get_wrapper_data (method, token);
9349 if (cfg->compile_aot) {
9350 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9352 ptr = mono_lookup_internal_call (cmethod);
9354 EMIT_NEW_PCONST (cfg, ins, ptr);
9360 case CEE_MONO_VTADDR: {
9361 MonoInst *src_var, *src;
9367 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9368 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9373 case CEE_MONO_NEWOBJ: {
9374 MonoInst *iargs [2];
9376 CHECK_STACK_OVF (1);
9378 token = read32 (ip + 2);
9379 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9380 mono_class_init (klass);
9381 NEW_DOMAINCONST (cfg, iargs [0]);
9382 MONO_ADD_INS (cfg->cbb, iargs [0]);
9383 NEW_CLASSCONST (cfg, iargs [1], klass);
9384 MONO_ADD_INS (cfg->cbb, iargs [1]);
9385 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9387 inline_costs += 10 * num_calls++;
9390 case CEE_MONO_OBJADDR:
9393 MONO_INST_NEW (cfg, ins, OP_MOVE);
9394 ins->dreg = alloc_preg (cfg);
9395 ins->sreg1 = sp [0]->dreg;
9396 ins->type = STACK_MP;
9397 MONO_ADD_INS (cfg->cbb, ins);
9401 case CEE_MONO_LDNATIVEOBJ:
9403 * Similar to LDOBJ, but instead load the unmanaged
9404 * representation of the vtype to the stack.
9409 token = read32 (ip + 2);
9410 klass = mono_method_get_wrapper_data (method, token);
9411 g_assert (klass->valuetype);
9412 mono_class_init (klass);
9415 MonoInst *src, *dest, *temp;
9418 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9419 temp->backend.is_pinvoke = 1;
9420 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9421 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9423 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9424 dest->type = STACK_VTYPE;
9425 dest->klass = klass;
9431 case CEE_MONO_RETOBJ: {
9433 * Same as RET, but return the native representation of a vtype
9436 g_assert (cfg->ret);
9437 g_assert (mono_method_signature (method)->pinvoke);
9442 token = read32 (ip + 2);
9443 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9445 if (!cfg->vret_addr) {
9446 g_assert (cfg->ret_var_is_local);
9448 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9450 EMIT_NEW_RETLOADA (cfg, ins);
9452 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9454 if (sp != stack_start)
9457 MONO_INST_NEW (cfg, ins, OP_BR);
9458 ins->inst_target_bb = end_bblock;
9459 MONO_ADD_INS (bblock, ins);
9460 link_bblock (cfg, bblock, end_bblock);
9461 start_new_bblock = 1;
9465 case CEE_MONO_CISINST:
9466 case CEE_MONO_CCASTCLASS: {
9471 token = read32 (ip + 2);
9472 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9473 if (ip [1] == CEE_MONO_CISINST)
9474 ins = handle_cisinst (cfg, klass, sp [0]);
9476 ins = handle_ccastclass (cfg, klass, sp [0]);
9482 case CEE_MONO_SAVE_LMF:
9483 case CEE_MONO_RESTORE_LMF:
9484 #ifdef MONO_ARCH_HAVE_LMF_OPS
9485 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9486 MONO_ADD_INS (bblock, ins);
9487 cfg->need_lmf_area = TRUE;
9491 case CEE_MONO_CLASSCONST:
9492 CHECK_STACK_OVF (1);
9494 token = read32 (ip + 2);
9495 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9498 inline_costs += 10 * num_calls++;
9500 case CEE_MONO_NOT_TAKEN:
9501 bblock->out_of_line = TRUE;
9505 CHECK_STACK_OVF (1);
9507 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9508 ins->dreg = alloc_preg (cfg);
9509 ins->inst_offset = (gint32)read32 (ip + 2);
9510 ins->type = STACK_PTR;
9511 MONO_ADD_INS (bblock, ins);
9515 case CEE_MONO_DYN_CALL: {
9518 /* It would be easier to call a trampoline, but that would put an
9519 * extra frame on the stack, confusing exception handling. So
9520 * implement it inline using an opcode for now.
9523 if (!cfg->dyn_call_var) {
9524 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9525 /* prevent it from being register allocated */
9526 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9529 /* Has to use a call inst since it local regalloc expects it */
9530 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9531 ins = (MonoInst*)call;
9533 ins->sreg1 = sp [0]->dreg;
9534 ins->sreg2 = sp [1]->dreg;
9535 MONO_ADD_INS (bblock, ins);
9537 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9538 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9542 inline_costs += 10 * num_calls++;
9547 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9557 /* somewhat similar to LDTOKEN */
9558 MonoInst *addr, *vtvar;
9559 CHECK_STACK_OVF (1);
9560 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9562 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9563 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9565 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9566 ins->type = STACK_VTYPE;
9567 ins->klass = mono_defaults.argumenthandle_class;
9580 * The following transforms:
9581 * CEE_CEQ into OP_CEQ
9582 * CEE_CGT into OP_CGT
9583 * CEE_CGT_UN into OP_CGT_UN
9584 * CEE_CLT into OP_CLT
9585 * CEE_CLT_UN into OP_CLT_UN
9587 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9589 MONO_INST_NEW (cfg, ins, cmp->opcode);
9591 cmp->sreg1 = sp [0]->dreg;
9592 cmp->sreg2 = sp [1]->dreg;
9593 type_from_op (cmp, sp [0], sp [1]);
9595 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9596 cmp->opcode = OP_LCOMPARE;
9597 else if (sp [0]->type == STACK_R8)
9598 cmp->opcode = OP_FCOMPARE;
9600 cmp->opcode = OP_ICOMPARE;
9601 MONO_ADD_INS (bblock, cmp);
9602 ins->type = STACK_I4;
9603 ins->dreg = alloc_dreg (cfg, ins->type);
9604 type_from_op (ins, sp [0], sp [1]);
9606 if (cmp->opcode == OP_FCOMPARE) {
9608 * The backends expect the fceq opcodes to do the
9611 cmp->opcode = OP_NOP;
9612 ins->sreg1 = cmp->sreg1;
9613 ins->sreg2 = cmp->sreg2;
9615 MONO_ADD_INS (bblock, ins);
9622 MonoMethod *cil_method;
9623 gboolean needs_static_rgctx_invoke;
9625 CHECK_STACK_OVF (1);
9627 n = read32 (ip + 2);
9628 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9629 if (!cmethod || mono_loader_get_last_error ())
9631 mono_class_init (cmethod->klass);
9633 mono_save_token_info (cfg, image, n, cmethod);
9635 if (cfg->generic_sharing_context)
9636 context_used = mono_method_check_context_used (cmethod);
9638 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9640 cil_method = cmethod;
9641 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9642 METHOD_ACCESS_FAILURE;
9644 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9645 if (check_linkdemand (cfg, method, cmethod))
9647 CHECK_CFG_EXCEPTION;
9648 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9649 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9653 * Optimize the common case of ldftn+delegate creation
9655 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9656 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9657 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9658 MonoInst *target_ins;
9660 int invoke_context_used = 0;
9662 invoke = mono_get_delegate_invoke (ctor_method->klass);
9663 if (!invoke || !mono_method_signature (invoke))
9666 if (cfg->generic_sharing_context)
9667 invoke_context_used = mono_method_check_context_used (invoke);
9669 target_ins = sp [-1];
9671 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9672 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9673 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9675 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9679 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9680 /* FIXME: SGEN support */
9681 if (invoke_context_used == 0) {
9683 if (cfg->verbose_level > 3)
9684 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9686 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9687 CHECK_CFG_EXCEPTION;
9696 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9697 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9701 inline_costs += 10 * num_calls++;
9704 case CEE_LDVIRTFTN: {
9709 n = read32 (ip + 2);
9710 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9711 if (!cmethod || mono_loader_get_last_error ())
9713 mono_class_init (cmethod->klass);
9715 if (cfg->generic_sharing_context)
9716 context_used = mono_method_check_context_used (cmethod);
9718 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9719 if (check_linkdemand (cfg, method, cmethod))
9721 CHECK_CFG_EXCEPTION;
9722 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9723 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9729 args [1] = emit_get_rgctx_method (cfg, context_used,
9730 cmethod, MONO_RGCTX_INFO_METHOD);
9733 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9735 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9738 inline_costs += 10 * num_calls++;
9742 CHECK_STACK_OVF (1);
9744 n = read16 (ip + 2);
9746 EMIT_NEW_ARGLOAD (cfg, ins, n);
9751 CHECK_STACK_OVF (1);
9753 n = read16 (ip + 2);
9755 NEW_ARGLOADA (cfg, ins, n);
9756 MONO_ADD_INS (cfg->cbb, ins);
9764 n = read16 (ip + 2);
9766 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9768 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9772 CHECK_STACK_OVF (1);
9774 n = read16 (ip + 2);
9776 EMIT_NEW_LOCLOAD (cfg, ins, n);
9781 unsigned char *tmp_ip;
9782 CHECK_STACK_OVF (1);
9784 n = read16 (ip + 2);
9787 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9793 EMIT_NEW_LOCLOADA (cfg, ins, n);
9802 n = read16 (ip + 2);
9804 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9806 emit_stloc_ir (cfg, sp, header, n);
9813 if (sp != stack_start)
9815 if (cfg->method != method)
9817 * Inlining this into a loop in a parent could lead to
9818 * stack overflows which is different behavior than the
9819 * non-inlined case, thus disable inlining in this case.
9821 goto inline_failure;
9823 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9824 ins->dreg = alloc_preg (cfg);
9825 ins->sreg1 = sp [0]->dreg;
9826 ins->type = STACK_PTR;
9827 MONO_ADD_INS (cfg->cbb, ins);
9829 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9831 ins->flags |= MONO_INST_INIT;
9836 case CEE_ENDFILTER: {
9837 MonoExceptionClause *clause, *nearest;
9838 int cc, nearest_num;
9842 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9844 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9845 ins->sreg1 = (*sp)->dreg;
9846 MONO_ADD_INS (bblock, ins);
9847 start_new_bblock = 1;
9852 for (cc = 0; cc < header->num_clauses; ++cc) {
9853 clause = &header->clauses [cc];
9854 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9855 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9856 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9862 if ((ip - header->code) != nearest->handler_offset)
9867 case CEE_UNALIGNED_:
9868 ins_flag |= MONO_INST_UNALIGNED;
9869 /* FIXME: record alignment? we can assume 1 for now */
9874 ins_flag |= MONO_INST_VOLATILE;
9878 ins_flag |= MONO_INST_TAILCALL;
9879 cfg->flags |= MONO_CFG_HAS_TAIL;
9880 /* Can't inline tail calls at this time */
9881 inline_costs += 100000;
9888 token = read32 (ip + 2);
9889 klass = mini_get_class (method, token, generic_context);
9890 CHECK_TYPELOAD (klass);
9891 if (generic_class_is_reference_type (cfg, klass))
9892 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9894 mini_emit_initobj (cfg, *sp, NULL, klass);
9898 case CEE_CONSTRAINED_:
9900 token = read32 (ip + 2);
9901 if (method->wrapper_type != MONO_WRAPPER_NONE)
9902 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9904 constrained_call = mono_class_get_full (image, token, generic_context);
9905 CHECK_TYPELOAD (constrained_call);
9910 MonoInst *iargs [3];
9914 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9915 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9916 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9917 /* emit_memset only works when val == 0 */
9918 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9923 if (ip [1] == CEE_CPBLK) {
9924 MonoMethod *memcpy_method = get_memcpy_method ();
9925 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9927 MonoMethod *memset_method = get_memset_method ();
9928 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9938 ins_flag |= MONO_INST_NOTYPECHECK;
9940 ins_flag |= MONO_INST_NORANGECHECK;
9941 /* we ignore the no-nullcheck for now since we
9942 * really do it explicitly only when doing callvirt->call
9948 int handler_offset = -1;
9950 for (i = 0; i < header->num_clauses; ++i) {
9951 MonoExceptionClause *clause = &header->clauses [i];
9952 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9953 handler_offset = clause->handler_offset;
9958 bblock->flags |= BB_EXCEPTION_UNSAFE;
9960 g_assert (handler_offset != -1);
9962 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9963 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9964 ins->sreg1 = load->dreg;
9965 MONO_ADD_INS (bblock, ins);
9967 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9968 MONO_ADD_INS (bblock, ins);
9971 link_bblock (cfg, bblock, end_bblock);
9972 start_new_bblock = 1;
9980 CHECK_STACK_OVF (1);
9982 token = read32 (ip + 2);
9983 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9984 MonoType *type = mono_type_create_from_typespec (image, token);
9985 token = mono_type_size (type, &ialign);
9987 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9988 CHECK_TYPELOAD (klass);
9989 mono_class_init (klass);
9990 token = mono_class_value_size (klass, &align);
9992 EMIT_NEW_ICONST (cfg, ins, token);
9997 case CEE_REFANYTYPE: {
9998 MonoInst *src_var, *src;
10004 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10006 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10007 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10008 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10013 case CEE_READONLY_:
10026 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10036 g_warning ("opcode 0x%02x not handled", *ip);
10040 if (start_new_bblock != 1)
10043 bblock->cil_length = ip - bblock->cil_code;
10044 bblock->next_bb = end_bblock;
10046 if (cfg->method == method && cfg->domainvar) {
10048 MonoInst *get_domain;
10050 cfg->cbb = init_localsbb;
10052 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10053 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10056 get_domain->dreg = alloc_preg (cfg);
10057 MONO_ADD_INS (cfg->cbb, get_domain);
10059 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10060 MONO_ADD_INS (cfg->cbb, store);
10063 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10064 if (cfg->compile_aot)
10065 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10066 mono_get_got_var (cfg);
10069 if (cfg->method == method && cfg->got_var)
10070 mono_emit_load_got_addr (cfg);
10075 cfg->cbb = init_localsbb;
10077 for (i = 0; i < header->num_locals; ++i) {
10078 MonoType *ptype = header->locals [i];
10079 int t = ptype->type;
10080 dreg = cfg->locals [i]->dreg;
10082 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10083 t = mono_class_enum_basetype (ptype->data.klass)->type;
10084 if (ptype->byref) {
10085 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10086 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10087 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10088 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10089 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10090 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10091 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10092 ins->type = STACK_R8;
10093 ins->inst_p0 = (void*)&r8_0;
10094 ins->dreg = alloc_dreg (cfg, STACK_R8);
10095 MONO_ADD_INS (init_localsbb, ins);
10096 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10097 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10098 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10099 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10101 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10106 if (cfg->init_ref_vars && cfg->method == method) {
10107 /* Emit initialization for ref vars */
10108 // FIXME: Avoid duplication initialization for IL locals.
10109 for (i = 0; i < cfg->num_varinfo; ++i) {
10110 MonoInst *ins = cfg->varinfo [i];
10112 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10113 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10117 /* Add a sequence point for method entry/exit events */
10119 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10120 MONO_ADD_INS (init_localsbb, ins);
10121 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10122 MONO_ADD_INS (cfg->bb_exit, ins);
10127 if (cfg->method == method) {
10128 MonoBasicBlock *bb;
10129 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10130 bb->region = mono_find_block_region (cfg, bb->real_offset);
10132 mono_create_spvar_for_region (cfg, bb->region);
10133 if (cfg->verbose_level > 2)
10134 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10138 g_slist_free (class_inits);
10139 dont_inline = g_list_remove (dont_inline, method);
10141 if (inline_costs < 0) {
10144 /* Method is too large */
10145 mname = mono_method_full_name (method, TRUE);
10146 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10147 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10149 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10150 mono_basic_block_free (original_bb);
10154 if ((cfg->verbose_level > 2) && (cfg->method == method))
10155 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10157 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10158 mono_basic_block_free (original_bb);
10159 return inline_costs;
10162 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10169 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10173 set_exception_type_from_invalid_il (cfg, method, ip);
10177 g_slist_free (class_inits);
10178 mono_basic_block_free (original_bb);
10179 dont_inline = g_list_remove (dont_inline, method);
10180 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10185 store_membase_reg_to_store_membase_imm (int opcode)
10188 case OP_STORE_MEMBASE_REG:
10189 return OP_STORE_MEMBASE_IMM;
10190 case OP_STOREI1_MEMBASE_REG:
10191 return OP_STOREI1_MEMBASE_IMM;
10192 case OP_STOREI2_MEMBASE_REG:
10193 return OP_STOREI2_MEMBASE_IMM;
10194 case OP_STOREI4_MEMBASE_REG:
10195 return OP_STOREI4_MEMBASE_IMM;
10196 case OP_STOREI8_MEMBASE_REG:
10197 return OP_STOREI8_MEMBASE_IMM;
10199 g_assert_not_reached ();
10205 #endif /* DISABLE_JIT */
10208 mono_op_to_op_imm (int opcode)
10212 return OP_IADD_IMM;
10214 return OP_ISUB_IMM;
10216 return OP_IDIV_IMM;
10218 return OP_IDIV_UN_IMM;
10220 return OP_IREM_IMM;
10222 return OP_IREM_UN_IMM;
10224 return OP_IMUL_IMM;
10226 return OP_IAND_IMM;
10230 return OP_IXOR_IMM;
10232 return OP_ISHL_IMM;
10234 return OP_ISHR_IMM;
10236 return OP_ISHR_UN_IMM;
10239 return OP_LADD_IMM;
10241 return OP_LSUB_IMM;
10243 return OP_LAND_IMM;
10247 return OP_LXOR_IMM;
10249 return OP_LSHL_IMM;
10251 return OP_LSHR_IMM;
10253 return OP_LSHR_UN_IMM;
10256 return OP_COMPARE_IMM;
10258 return OP_ICOMPARE_IMM;
10260 return OP_LCOMPARE_IMM;
10262 case OP_STORE_MEMBASE_REG:
10263 return OP_STORE_MEMBASE_IMM;
10264 case OP_STOREI1_MEMBASE_REG:
10265 return OP_STOREI1_MEMBASE_IMM;
10266 case OP_STOREI2_MEMBASE_REG:
10267 return OP_STOREI2_MEMBASE_IMM;
10268 case OP_STOREI4_MEMBASE_REG:
10269 return OP_STOREI4_MEMBASE_IMM;
10271 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10273 return OP_X86_PUSH_IMM;
10274 case OP_X86_COMPARE_MEMBASE_REG:
10275 return OP_X86_COMPARE_MEMBASE_IMM;
10277 #if defined(TARGET_AMD64)
10278 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10279 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10281 case OP_VOIDCALL_REG:
10282 return OP_VOIDCALL;
10290 return OP_LOCALLOC_IMM;
10297 ldind_to_load_membase (int opcode)
10301 return OP_LOADI1_MEMBASE;
10303 return OP_LOADU1_MEMBASE;
10305 return OP_LOADI2_MEMBASE;
10307 return OP_LOADU2_MEMBASE;
10309 return OP_LOADI4_MEMBASE;
10311 return OP_LOADU4_MEMBASE;
10313 return OP_LOAD_MEMBASE;
10314 case CEE_LDIND_REF:
10315 return OP_LOAD_MEMBASE;
10317 return OP_LOADI8_MEMBASE;
10319 return OP_LOADR4_MEMBASE;
10321 return OP_LOADR8_MEMBASE;
10323 g_assert_not_reached ();
10330 stind_to_store_membase (int opcode)
10334 return OP_STOREI1_MEMBASE_REG;
10336 return OP_STOREI2_MEMBASE_REG;
10338 return OP_STOREI4_MEMBASE_REG;
10340 case CEE_STIND_REF:
10341 return OP_STORE_MEMBASE_REG;
10343 return OP_STOREI8_MEMBASE_REG;
10345 return OP_STORER4_MEMBASE_REG;
10347 return OP_STORER8_MEMBASE_REG;
10349 g_assert_not_reached ();
10356 mono_load_membase_to_load_mem (int opcode)
10358 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10359 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10361 case OP_LOAD_MEMBASE:
10362 return OP_LOAD_MEM;
10363 case OP_LOADU1_MEMBASE:
10364 return OP_LOADU1_MEM;
10365 case OP_LOADU2_MEMBASE:
10366 return OP_LOADU2_MEM;
10367 case OP_LOADI4_MEMBASE:
10368 return OP_LOADI4_MEM;
10369 case OP_LOADU4_MEMBASE:
10370 return OP_LOADU4_MEM;
10371 #if SIZEOF_REGISTER == 8
10372 case OP_LOADI8_MEMBASE:
10373 return OP_LOADI8_MEM;
10382 op_to_op_dest_membase (int store_opcode, int opcode)
10384 #if defined(TARGET_X86)
10385 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10390 return OP_X86_ADD_MEMBASE_REG;
10392 return OP_X86_SUB_MEMBASE_REG;
10394 return OP_X86_AND_MEMBASE_REG;
10396 return OP_X86_OR_MEMBASE_REG;
10398 return OP_X86_XOR_MEMBASE_REG;
10401 return OP_X86_ADD_MEMBASE_IMM;
10404 return OP_X86_SUB_MEMBASE_IMM;
10407 return OP_X86_AND_MEMBASE_IMM;
10410 return OP_X86_OR_MEMBASE_IMM;
10413 return OP_X86_XOR_MEMBASE_IMM;
10419 #if defined(TARGET_AMD64)
10420 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10425 return OP_X86_ADD_MEMBASE_REG;
10427 return OP_X86_SUB_MEMBASE_REG;
10429 return OP_X86_AND_MEMBASE_REG;
10431 return OP_X86_OR_MEMBASE_REG;
10433 return OP_X86_XOR_MEMBASE_REG;
10435 return OP_X86_ADD_MEMBASE_IMM;
10437 return OP_X86_SUB_MEMBASE_IMM;
10439 return OP_X86_AND_MEMBASE_IMM;
10441 return OP_X86_OR_MEMBASE_IMM;
10443 return OP_X86_XOR_MEMBASE_IMM;
10445 return OP_AMD64_ADD_MEMBASE_REG;
10447 return OP_AMD64_SUB_MEMBASE_REG;
10449 return OP_AMD64_AND_MEMBASE_REG;
10451 return OP_AMD64_OR_MEMBASE_REG;
10453 return OP_AMD64_XOR_MEMBASE_REG;
10456 return OP_AMD64_ADD_MEMBASE_IMM;
10459 return OP_AMD64_SUB_MEMBASE_IMM;
10462 return OP_AMD64_AND_MEMBASE_IMM;
10465 return OP_AMD64_OR_MEMBASE_IMM;
10468 return OP_AMD64_XOR_MEMBASE_IMM;
10478 op_to_op_store_membase (int store_opcode, int opcode)
10480 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10483 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10484 return OP_X86_SETEQ_MEMBASE;
10486 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10487 return OP_X86_SETNE_MEMBASE;
10495 op_to_op_src1_membase (int load_opcode, int opcode)
10498 /* FIXME: This has sign extension issues */
10500 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10501 return OP_X86_COMPARE_MEMBASE8_IMM;
10504 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10509 return OP_X86_PUSH_MEMBASE;
10510 case OP_COMPARE_IMM:
10511 case OP_ICOMPARE_IMM:
10512 return OP_X86_COMPARE_MEMBASE_IMM;
10515 return OP_X86_COMPARE_MEMBASE_REG;
10519 #ifdef TARGET_AMD64
10520 /* FIXME: This has sign extension issues */
10522 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10523 return OP_X86_COMPARE_MEMBASE8_IMM;
10528 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10529 return OP_X86_PUSH_MEMBASE;
10531 /* FIXME: This only works for 32 bit immediates
10532 case OP_COMPARE_IMM:
10533 case OP_LCOMPARE_IMM:
10534 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10535 return OP_AMD64_COMPARE_MEMBASE_IMM;
10537 case OP_ICOMPARE_IMM:
10538 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10539 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10543 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10544 return OP_AMD64_COMPARE_MEMBASE_REG;
10547 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10548 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10557 op_to_op_src2_membase (int load_opcode, int opcode)
10560 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10566 return OP_X86_COMPARE_REG_MEMBASE;
10568 return OP_X86_ADD_REG_MEMBASE;
10570 return OP_X86_SUB_REG_MEMBASE;
10572 return OP_X86_AND_REG_MEMBASE;
10574 return OP_X86_OR_REG_MEMBASE;
10576 return OP_X86_XOR_REG_MEMBASE;
10580 #ifdef TARGET_AMD64
10581 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10584 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10586 return OP_X86_ADD_REG_MEMBASE;
10588 return OP_X86_SUB_REG_MEMBASE;
10590 return OP_X86_AND_REG_MEMBASE;
10592 return OP_X86_OR_REG_MEMBASE;
10594 return OP_X86_XOR_REG_MEMBASE;
10596 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10600 return OP_AMD64_COMPARE_REG_MEMBASE;
10602 return OP_AMD64_ADD_REG_MEMBASE;
10604 return OP_AMD64_SUB_REG_MEMBASE;
10606 return OP_AMD64_AND_REG_MEMBASE;
10608 return OP_AMD64_OR_REG_MEMBASE;
10610 return OP_AMD64_XOR_REG_MEMBASE;
10619 mono_op_to_op_imm_noemul (int opcode)
10622 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10628 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10636 return mono_op_to_op_imm (opcode);
10640 #ifndef DISABLE_JIT
10643 * mono_handle_global_vregs:
10645 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10649 mono_handle_global_vregs (MonoCompile *cfg)
10651 gint32 *vreg_to_bb;
10652 MonoBasicBlock *bb;
10655 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10657 #ifdef MONO_ARCH_SIMD_INTRINSICS
10658 if (cfg->uses_simd_intrinsics)
10659 mono_simd_simplify_indirection (cfg);
10662 /* Find local vregs used in more than one bb */
10663 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10664 MonoInst *ins = bb->code;
10665 int block_num = bb->block_num;
10667 if (cfg->verbose_level > 2)
10668 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10671 for (; ins; ins = ins->next) {
10672 const char *spec = INS_INFO (ins->opcode);
10673 int regtype = 0, regindex;
10676 if (G_UNLIKELY (cfg->verbose_level > 2))
10677 mono_print_ins (ins);
10679 g_assert (ins->opcode >= MONO_CEE_LAST);
10681 for (regindex = 0; regindex < 4; regindex ++) {
10684 if (regindex == 0) {
10685 regtype = spec [MONO_INST_DEST];
10686 if (regtype == ' ')
10689 } else if (regindex == 1) {
10690 regtype = spec [MONO_INST_SRC1];
10691 if (regtype == ' ')
10694 } else if (regindex == 2) {
10695 regtype = spec [MONO_INST_SRC2];
10696 if (regtype == ' ')
10699 } else if (regindex == 3) {
10700 regtype = spec [MONO_INST_SRC3];
10701 if (regtype == ' ')
10706 #if SIZEOF_REGISTER == 4
10707 /* In the LLVM case, the long opcodes are not decomposed */
10708 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10710 * Since some instructions reference the original long vreg,
10711 * and some reference the two component vregs, it is quite hard
10712 * to determine when it needs to be global. So be conservative.
10714 if (!get_vreg_to_inst (cfg, vreg)) {
10715 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10717 if (cfg->verbose_level > 2)
10718 printf ("LONG VREG R%d made global.\n", vreg);
10722 * Make the component vregs volatile since the optimizations can
10723 * get confused otherwise.
10725 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10726 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10730 g_assert (vreg != -1);
10732 prev_bb = vreg_to_bb [vreg];
10733 if (prev_bb == 0) {
10734 /* 0 is a valid block num */
10735 vreg_to_bb [vreg] = block_num + 1;
10736 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10737 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10740 if (!get_vreg_to_inst (cfg, vreg)) {
10741 if (G_UNLIKELY (cfg->verbose_level > 2))
10742 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10746 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10749 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10752 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10755 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10758 g_assert_not_reached ();
10762 /* Flag as having been used in more than one bb */
10763 vreg_to_bb [vreg] = -1;
10769 /* If a variable is used in only one bblock, convert it into a local vreg */
10770 for (i = 0; i < cfg->num_varinfo; i++) {
10771 MonoInst *var = cfg->varinfo [i];
10772 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10774 switch (var->type) {
10780 #if SIZEOF_REGISTER == 8
10783 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10784 /* Enabling this screws up the fp stack on x86 */
10787 /* Arguments are implicitly global */
10788 /* Putting R4 vars into registers doesn't work currently */
10789 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10791 * Make that the variable's liveness interval doesn't contain a call, since
10792 * that would cause the lvreg to be spilled, making the whole optimization
10795 /* This is too slow for JIT compilation */
10797 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10799 int def_index, call_index, ins_index;
10800 gboolean spilled = FALSE;
10805 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10806 const char *spec = INS_INFO (ins->opcode);
10808 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10809 def_index = ins_index;
10811 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10812 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10813 if (call_index > def_index) {
10819 if (MONO_IS_CALL (ins))
10820 call_index = ins_index;
10830 if (G_UNLIKELY (cfg->verbose_level > 2))
10831 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10832 var->flags |= MONO_INST_IS_DEAD;
10833 cfg->vreg_to_inst [var->dreg] = NULL;
10840 * Compress the varinfo and vars tables so the liveness computation is faster and
10841 * takes up less space.
10844 for (i = 0; i < cfg->num_varinfo; ++i) {
10845 MonoInst *var = cfg->varinfo [i];
10846 if (pos < i && cfg->locals_start == i)
10847 cfg->locals_start = pos;
10848 if (!(var->flags & MONO_INST_IS_DEAD)) {
10850 cfg->varinfo [pos] = cfg->varinfo [i];
10851 cfg->varinfo [pos]->inst_c0 = pos;
10852 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10853 cfg->vars [pos].idx = pos;
10854 #if SIZEOF_REGISTER == 4
10855 if (cfg->varinfo [pos]->type == STACK_I8) {
10856 /* Modify the two component vars too */
10859 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10860 var1->inst_c0 = pos;
10861 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10862 var1->inst_c0 = pos;
10869 cfg->num_varinfo = pos;
10870 if (cfg->locals_start > cfg->num_varinfo)
10871 cfg->locals_start = cfg->num_varinfo;
10875 * mono_spill_global_vars:
10877 * Generate spill code for variables which are not allocated to registers,
10878 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10879 * code is generated which could be optimized by the local optimization passes.
10882 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10884 MonoBasicBlock *bb;
10886 int orig_next_vreg;
10887 guint32 *vreg_to_lvreg;
10889 guint32 i, lvregs_len;
10890 gboolean dest_has_lvreg = FALSE;
10891 guint32 stacktypes [128];
10892 MonoInst **live_range_start, **live_range_end;
10893 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10895 *need_local_opts = FALSE;
10897 memset (spec2, 0, sizeof (spec2));
10899 /* FIXME: Move this function to mini.c */
10900 stacktypes ['i'] = STACK_PTR;
10901 stacktypes ['l'] = STACK_I8;
10902 stacktypes ['f'] = STACK_R8;
10903 #ifdef MONO_ARCH_SIMD_INTRINSICS
10904 stacktypes ['x'] = STACK_VTYPE;
10907 #if SIZEOF_REGISTER == 4
10908 /* Create MonoInsts for longs */
10909 for (i = 0; i < cfg->num_varinfo; i++) {
10910 MonoInst *ins = cfg->varinfo [i];
10912 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10913 switch (ins->type) {
10918 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10921 g_assert (ins->opcode == OP_REGOFFSET);
10923 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10925 tree->opcode = OP_REGOFFSET;
10926 tree->inst_basereg = ins->inst_basereg;
10927 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10929 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10931 tree->opcode = OP_REGOFFSET;
10932 tree->inst_basereg = ins->inst_basereg;
10933 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10943 /* FIXME: widening and truncation */
10946 * As an optimization, when a variable allocated to the stack is first loaded into
10947 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10948 * the variable again.
10950 orig_next_vreg = cfg->next_vreg;
10951 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10952 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10956 * These arrays contain the first and last instructions accessing a given
10958 * Since we emit bblocks in the same order we process them here, and we
10959 * don't split live ranges, these will precisely describe the live range of
10960 * the variable, i.e. the instruction range where a valid value can be found
10961 * in the variables location.
10962 * The live range is computed using the liveness info computed by the liveness pass.
10963 * We can't use vmv->range, since that is an abstract live range, and we need
10964 * one which is instruction precise.
10965 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10967 /* FIXME: Only do this if debugging info is requested */
10968 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10969 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10970 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10971 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10973 /* Add spill loads/stores */
10974 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10977 if (cfg->verbose_level > 2)
10978 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10980 /* Clear vreg_to_lvreg array */
10981 for (i = 0; i < lvregs_len; i++)
10982 vreg_to_lvreg [lvregs [i]] = 0;
10986 MONO_BB_FOR_EACH_INS (bb, ins) {
10987 const char *spec = INS_INFO (ins->opcode);
10988 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10989 gboolean store, no_lvreg;
10990 int sregs [MONO_MAX_SRC_REGS];
10992 if (G_UNLIKELY (cfg->verbose_level > 2))
10993 mono_print_ins (ins);
10995 if (ins->opcode == OP_NOP)
10999 * We handle LDADDR here as well, since it can only be decomposed
11000 * when variable addresses are known.
11002 if (ins->opcode == OP_LDADDR) {
11003 MonoInst *var = ins->inst_p0;
11005 if (var->opcode == OP_VTARG_ADDR) {
11006 /* Happens on SPARC/S390 where vtypes are passed by reference */
11007 MonoInst *vtaddr = var->inst_left;
11008 if (vtaddr->opcode == OP_REGVAR) {
11009 ins->opcode = OP_MOVE;
11010 ins->sreg1 = vtaddr->dreg;
11012 else if (var->inst_left->opcode == OP_REGOFFSET) {
11013 ins->opcode = OP_LOAD_MEMBASE;
11014 ins->inst_basereg = vtaddr->inst_basereg;
11015 ins->inst_offset = vtaddr->inst_offset;
11019 g_assert (var->opcode == OP_REGOFFSET);
11021 ins->opcode = OP_ADD_IMM;
11022 ins->sreg1 = var->inst_basereg;
11023 ins->inst_imm = var->inst_offset;
11026 *need_local_opts = TRUE;
11027 spec = INS_INFO (ins->opcode);
11030 if (ins->opcode < MONO_CEE_LAST) {
11031 mono_print_ins (ins);
11032 g_assert_not_reached ();
11036 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11040 if (MONO_IS_STORE_MEMBASE (ins)) {
11041 tmp_reg = ins->dreg;
11042 ins->dreg = ins->sreg2;
11043 ins->sreg2 = tmp_reg;
11046 spec2 [MONO_INST_DEST] = ' ';
11047 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11048 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11049 spec2 [MONO_INST_SRC3] = ' ';
11051 } else if (MONO_IS_STORE_MEMINDEX (ins))
11052 g_assert_not_reached ();
11057 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11058 printf ("\t %.3s %d", spec, ins->dreg);
11059 num_sregs = mono_inst_get_src_registers (ins, sregs);
11060 for (srcindex = 0; srcindex < 3; ++srcindex)
11061 printf (" %d", sregs [srcindex]);
11068 regtype = spec [MONO_INST_DEST];
11069 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11072 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11073 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11074 MonoInst *store_ins;
11076 MonoInst *def_ins = ins;
11077 int dreg = ins->dreg; /* The original vreg */
11079 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11081 if (var->opcode == OP_REGVAR) {
11082 ins->dreg = var->dreg;
11083 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11085 * Instead of emitting a load+store, use a _membase opcode.
11087 g_assert (var->opcode == OP_REGOFFSET);
11088 if (ins->opcode == OP_MOVE) {
11092 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11093 ins->inst_basereg = var->inst_basereg;
11094 ins->inst_offset = var->inst_offset;
11097 spec = INS_INFO (ins->opcode);
11101 g_assert (var->opcode == OP_REGOFFSET);
11103 prev_dreg = ins->dreg;
11105 /* Invalidate any previous lvreg for this vreg */
11106 vreg_to_lvreg [ins->dreg] = 0;
11110 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11112 store_opcode = OP_STOREI8_MEMBASE_REG;
11115 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11117 if (regtype == 'l') {
11118 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11119 mono_bblock_insert_after_ins (bb, ins, store_ins);
11120 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11121 mono_bblock_insert_after_ins (bb, ins, store_ins);
11122 def_ins = store_ins;
11125 g_assert (store_opcode != OP_STOREV_MEMBASE);
11127 /* Try to fuse the store into the instruction itself */
11128 /* FIXME: Add more instructions */
11129 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11130 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11131 ins->inst_imm = ins->inst_c0;
11132 ins->inst_destbasereg = var->inst_basereg;
11133 ins->inst_offset = var->inst_offset;
11134 spec = INS_INFO (ins->opcode);
11135 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11136 ins->opcode = store_opcode;
11137 ins->inst_destbasereg = var->inst_basereg;
11138 ins->inst_offset = var->inst_offset;
11142 tmp_reg = ins->dreg;
11143 ins->dreg = ins->sreg2;
11144 ins->sreg2 = tmp_reg;
11147 spec2 [MONO_INST_DEST] = ' ';
11148 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11149 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11150 spec2 [MONO_INST_SRC3] = ' ';
11152 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11153 // FIXME: The backends expect the base reg to be in inst_basereg
11154 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11156 ins->inst_basereg = var->inst_basereg;
11157 ins->inst_offset = var->inst_offset;
11158 spec = INS_INFO (ins->opcode);
11160 /* printf ("INS: "); mono_print_ins (ins); */
11161 /* Create a store instruction */
11162 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11164 /* Insert it after the instruction */
11165 mono_bblock_insert_after_ins (bb, ins, store_ins);
11167 def_ins = store_ins;
11170 * We can't assign ins->dreg to var->dreg here, since the
11171 * sregs could use it. So set a flag, and do it after
11174 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11175 dest_has_lvreg = TRUE;
11180 if (def_ins && !live_range_start [dreg]) {
11181 live_range_start [dreg] = def_ins;
11182 live_range_start_bb [dreg] = bb;
11189 num_sregs = mono_inst_get_src_registers (ins, sregs);
11190 for (srcindex = 0; srcindex < 3; ++srcindex) {
11191 regtype = spec [MONO_INST_SRC1 + srcindex];
11192 sreg = sregs [srcindex];
11194 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11195 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11196 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11197 MonoInst *use_ins = ins;
11198 MonoInst *load_ins;
11199 guint32 load_opcode;
11201 if (var->opcode == OP_REGVAR) {
11202 sregs [srcindex] = var->dreg;
11203 //mono_inst_set_src_registers (ins, sregs);
11204 live_range_end [sreg] = use_ins;
11205 live_range_end_bb [sreg] = bb;
11209 g_assert (var->opcode == OP_REGOFFSET);
11211 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11213 g_assert (load_opcode != OP_LOADV_MEMBASE);
11215 if (vreg_to_lvreg [sreg]) {
11216 g_assert (vreg_to_lvreg [sreg] != -1);
11218 /* The variable is already loaded to an lvreg */
11219 if (G_UNLIKELY (cfg->verbose_level > 2))
11220 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11221 sregs [srcindex] = vreg_to_lvreg [sreg];
11222 //mono_inst_set_src_registers (ins, sregs);
11226 /* Try to fuse the load into the instruction */
11227 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11228 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11229 sregs [0] = var->inst_basereg;
11230 //mono_inst_set_src_registers (ins, sregs);
11231 ins->inst_offset = var->inst_offset;
11232 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11233 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11234 sregs [1] = var->inst_basereg;
11235 //mono_inst_set_src_registers (ins, sregs);
11236 ins->inst_offset = var->inst_offset;
11238 if (MONO_IS_REAL_MOVE (ins)) {
11239 ins->opcode = OP_NOP;
11242 //printf ("%d ", srcindex); mono_print_ins (ins);
11244 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11246 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11247 if (var->dreg == prev_dreg) {
11249 * sreg refers to the value loaded by the load
11250 * emitted below, but we need to use ins->dreg
11251 * since it refers to the store emitted earlier.
11255 g_assert (sreg != -1);
11256 vreg_to_lvreg [var->dreg] = sreg;
11257 g_assert (lvregs_len < 1024);
11258 lvregs [lvregs_len ++] = var->dreg;
11262 sregs [srcindex] = sreg;
11263 //mono_inst_set_src_registers (ins, sregs);
11265 if (regtype == 'l') {
11266 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11267 mono_bblock_insert_before_ins (bb, ins, load_ins);
11268 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11269 mono_bblock_insert_before_ins (bb, ins, load_ins);
11270 use_ins = load_ins;
11273 #if SIZEOF_REGISTER == 4
11274 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11276 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11277 mono_bblock_insert_before_ins (bb, ins, load_ins);
11278 use_ins = load_ins;
11282 if (var->dreg < orig_next_vreg) {
11283 live_range_end [var->dreg] = use_ins;
11284 live_range_end_bb [var->dreg] = bb;
11288 mono_inst_set_src_registers (ins, sregs);
11290 if (dest_has_lvreg) {
11291 g_assert (ins->dreg != -1);
11292 vreg_to_lvreg [prev_dreg] = ins->dreg;
11293 g_assert (lvregs_len < 1024);
11294 lvregs [lvregs_len ++] = prev_dreg;
11295 dest_has_lvreg = FALSE;
11299 tmp_reg = ins->dreg;
11300 ins->dreg = ins->sreg2;
11301 ins->sreg2 = tmp_reg;
11304 if (MONO_IS_CALL (ins)) {
11305 /* Clear vreg_to_lvreg array */
11306 for (i = 0; i < lvregs_len; i++)
11307 vreg_to_lvreg [lvregs [i]] = 0;
11309 } else if (ins->opcode == OP_NOP) {
11311 MONO_INST_NULLIFY_SREGS (ins);
11314 if (cfg->verbose_level > 2)
11315 mono_print_ins_index (1, ins);
11318 /* Extend the live range based on the liveness info */
11319 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11320 for (i = 0; i < cfg->num_varinfo; i ++) {
11321 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11323 if (vreg_is_volatile (cfg, vi->vreg))
11324 /* The liveness info is incomplete */
11327 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11328 /* Live from at least the first ins of this bb */
11329 live_range_start [vi->vreg] = bb->code;
11330 live_range_start_bb [vi->vreg] = bb;
11333 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11334 /* Live at least until the last ins of this bb */
11335 live_range_end [vi->vreg] = bb->last_ins;
11336 live_range_end_bb [vi->vreg] = bb;
11342 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11344 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11345 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11347 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11348 for (i = 0; i < cfg->num_varinfo; ++i) {
11349 int vreg = MONO_VARINFO (cfg, i)->vreg;
11352 if (live_range_start [vreg]) {
11353 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11355 ins->inst_c1 = vreg;
11356 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11358 if (live_range_end [vreg]) {
11359 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11361 ins->inst_c1 = vreg;
11362 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11363 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11365 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11371 g_free (live_range_start);
11372 g_free (live_range_end);
11373 g_free (live_range_start_bb);
11374 g_free (live_range_end_bb);
11379 * - use 'iadd' instead of 'int_add'
11380 * - handling ovf opcodes: decompose in method_to_ir.
11381 * - unify iregs/fregs
11382 * -> partly done, the missing parts are:
11383 * - a more complete unification would involve unifying the hregs as well, so
11384 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11385 * would no longer map to the machine hregs, so the code generators would need to
11386 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11387 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11388 * fp/non-fp branches speeds it up by about 15%.
11389 * - use sext/zext opcodes instead of shifts
11391 * - get rid of TEMPLOADs if possible and use vregs instead
11392 * - clean up usage of OP_P/OP_ opcodes
11393 * - cleanup usage of DUMMY_USE
11394 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11396 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11397 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11398 * - make sure handle_stack_args () is called before the branch is emitted
11399 * - when the new IR is done, get rid of all unused stuff
11400 * - COMPARE/BEQ as separate instructions or unify them ?
11401 * - keeping them separate allows specialized compare instructions like
11402 * compare_imm, compare_membase
11403 * - most back ends unify fp compare+branch, fp compare+ceq
11404 * - integrate mono_save_args into inline_method
11405 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11406 * - handle long shift opts on 32 bit platforms somehow: they require
11407 * 3 sregs (2 for arg1 and 1 for arg2)
11408 * - make byref a 'normal' type.
11409 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11410 * variable if needed.
11411 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11412 * like inline_method.
11413 * - remove inlining restrictions
11414 * - fix LNEG and enable cfold of INEG
11415 * - generalize x86 optimizations like ldelema as a peephole optimization
11416 * - add store_mem_imm for amd64
11417 * - optimize the loading of the interruption flag in the managed->native wrappers
11418 * - avoid special handling of OP_NOP in passes
11419 * - move code inserting instructions into one function/macro.
11420 * - try a coalescing phase after liveness analysis
11421 * - add float -> vreg conversion + local optimizations on !x86
11422 * - figure out how to handle decomposed branches during optimizations, ie.
11423 * compare+branch, op_jump_table+op_br etc.
11424 * - promote RuntimeXHandles to vregs
11425 * - vtype cleanups:
11426 * - add a NEW_VARLOADA_VREG macro
11427 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11428 * accessing vtype fields.
11429 * - get rid of I8CONST on 64 bit platforms
11430 * - dealing with the increase in code size due to branches created during opcode
11432 * - use extended basic blocks
11433 * - all parts of the JIT
11434 * - handle_global_vregs () && local regalloc
11435 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11436 * - sources of increase in code size:
11439 * - isinst and castclass
11440 * - lvregs not allocated to global registers even if used multiple times
11441 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11443 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11444 * - add all micro optimizations from the old JIT
11445 * - put tree optimizations into the deadce pass
11446 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11447 * specific function.
11448 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11449 * fcompare + branchCC.
11450 * - create a helper function for allocating a stack slot, taking into account
11451 * MONO_CFG_HAS_SPILLUP.
11453 * - merge the ia64 switch changes.
11454 * - optimize mono_regstate2_alloc_int/float.
11455 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11456 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11457 * parts of the tree could be separated by other instructions, killing the tree
11458 * arguments, or stores killing loads etc. Also, should we fold loads into other
11459 * instructions if the result of the load is used multiple times ?
11460 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11461 * - LAST MERGE: 108395.
11462 * - when returning vtypes in registers, generate IR and append it to the end of the
11463 * last bb instead of doing it in the epilog.
11464 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11472 - When to decompose opcodes:
11473 - earlier: this makes some optimizations hard to implement, since the low level IR
11474 no longer contains the neccessary information. But it is easier to do.
11475 - later: harder to implement, enables more optimizations.
11476 - Branches inside bblocks:
11477 - created when decomposing complex opcodes.
11478 - branches to another bblock: harmless, but not tracked by the branch
11479 optimizations, so need to branch to a label at the start of the bblock.
11480 - branches to inside the same bblock: very problematic, trips up the local
11481 reg allocator. Can be fixed by spitting the current bblock, but that is a
11482 complex operation, since some local vregs can become global vregs etc.
11483 - Local/global vregs:
11484 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11485 local register allocator.
11486 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11487 structure, created by mono_create_var (). Assigned to hregs or the stack by
11488 the global register allocator.
11489 - When to do optimizations like alu->alu_imm:
11490 - earlier -> saves work later on since the IR will be smaller/simpler
11491 - later -> can work on more instructions
11492 - Handling of valuetypes:
11493 - When a vtype is pushed on the stack, a new temporary is created, an
11494 instruction computing its address (LDADDR) is emitted and pushed on
11495 the stack. Need to optimize cases when the vtype is used immediately as in
11496 argument passing, stloc etc.
11497 - Instead of the to_end stuff in the old JIT, simply call the function handling
11498 the values on the stack before emitting the last instruction of the bb.
11501 #endif /* DISABLE_JIT */