2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 t = mono_type_get_underlying_type (t);
1103 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_CHAR:
1113 case MONO_TYPE_FNPTR:
1115 case MONO_TYPE_CLASS:
1116 case MONO_TYPE_STRING:
1117 case MONO_TYPE_OBJECT:
1118 case MONO_TYPE_SZARRAY:
1119 case MONO_TYPE_ARRAY:
1127 case MONO_TYPE_VALUETYPE:
1128 case MONO_TYPE_TYPEDBYREF:
1130 case MONO_TYPE_GENERICINST:
1131 if (mono_type_generic_inst_is_valuetype (t))
1137 g_assert_not_reached ();
1144 array_access_to_klass (int opcode)
1148 return mono_defaults.byte_class;
1150 return mono_defaults.uint16_class;
1153 return mono_defaults.int_class;
1156 return mono_defaults.sbyte_class;
1159 return mono_defaults.int16_class;
1162 return mono_defaults.int32_class;
1164 return mono_defaults.uint32_class;
1167 return mono_defaults.int64_class;
1170 return mono_defaults.single_class;
1173 return mono_defaults.double_class;
1174 case CEE_LDELEM_REF:
1175 case CEE_STELEM_REF:
1176 return mono_defaults.object_class;
1178 g_assert_not_reached ();
1184 * We try to share variables when possible
1187 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1192 /* inlining can result in deeper stacks */
1193 if (slot >= cfg->header->max_stack)
1194 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1196 pos = ins->type - 1 + slot * STACK_MAX;
1198 switch (ins->type) {
1205 if ((vnum = cfg->intvars [pos]))
1206 return cfg->varinfo [vnum];
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 cfg->intvars [pos] = res->inst_c0;
1211 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1217 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1225 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1226 jump_info_token->image = image;
1227 jump_info_token->token = token;
1228 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1245 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1248 MonoBasicBlock *bb = cfg->cbb;
1249 MonoBasicBlock *outb;
1250 MonoInst *inst, **locals;
1255 if (cfg->verbose_level > 3)
1256 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1257 if (!bb->out_scount) {
1258 bb->out_scount = count;
1259 //printf ("bblock %d has out:", bb->block_num);
1261 for (i = 0; i < bb->out_count; ++i) {
1262 outb = bb->out_bb [i];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb->flags & BB_EXCEPTION_HANDLER)
1266 //printf (" %d", outb->block_num);
1267 if (outb->in_stack) {
1269 bb->out_stack = outb->in_stack;
1275 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1276 for (i = 0; i < count; ++i) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1289 if (cfg->inlined_method)
1290 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1292 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1297 for (i = 0; i < bb->out_count; ++i) {
1298 outb = bb->out_bb [i];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb->flags & BB_EXCEPTION_HANDLER)
1302 if (outb->in_scount) {
1303 if (outb->in_scount != bb->out_scount) {
1304 cfg->unverifiable = TRUE;
1307 continue; /* check they are the same locals */
1309 outb->in_scount = count;
1310 outb->in_stack = bb->out_stack;
1313 locals = bb->out_stack;
1315 for (i = 0; i < count; ++i) {
1316 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1317 inst->cil_code = sp [i]->cil_code;
1318 sp [i] = locals [i];
1319 if (cfg->verbose_level > 3)
1320 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1332 /* Find a bblock which has a different in_stack */
1334 while (bindex < bb->out_count) {
1335 outb = bb->out_bb [bindex];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb->flags & BB_EXCEPTION_HANDLER) {
1341 if (outb->in_stack != locals) {
1342 for (i = 0; i < count; ++i) {
1343 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1344 inst->cil_code = sp [i]->cil_code;
1345 sp [i] = locals [i];
1346 if (cfg->verbose_level > 3)
1347 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1349 locals = outb->in_stack;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1362 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1364 if (cfg->compile_aot) {
1365 int ioffset_reg = alloc_preg (cfg);
1366 int iid_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1369 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1378 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1379 * stored in "klass_reg" implements the interface "klass".
1382 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1384 int ibitmap_reg = alloc_preg (cfg);
1385 int ibitmap_byte_reg = alloc_preg (cfg);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1389 if (cfg->compile_aot) {
1390 int iid_reg = alloc_preg (cfg);
1391 int shifted_iid_reg = alloc_preg (cfg);
1392 int ibitmap_byte_address_reg = alloc_preg (cfg);
1393 int masked_iid_reg = alloc_preg (cfg);
1394 int iid_one_bit_reg = alloc_preg (cfg);
1395 int iid_bit_reg = alloc_preg (cfg);
1396 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1398 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1401 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1411 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1412 * stored in "vtable_reg" implements the interface "klass".
1415 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1417 int ibitmap_reg = alloc_preg (cfg);
1418 int ibitmap_byte_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1422 if (cfg->compile_aot) {
1423 int iid_reg = alloc_preg (cfg);
1424 int shifted_iid_reg = alloc_preg (cfg);
1425 int ibitmap_byte_address_reg = alloc_preg (cfg);
1426 int masked_iid_reg = alloc_preg (cfg);
1427 int iid_one_bit_reg = alloc_preg (cfg);
1428 int iid_bit_reg = alloc_preg (cfg);
1429 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1432 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1434 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1444 * Emit code which checks whenever the interface id of @klass is smaller than
1445 * than the value given by max_iid_reg.
1448 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1449 MonoBasicBlock *false_target)
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1461 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1464 /* Same as above, but obtains max_iid from a vtable */
1466 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1467 MonoBasicBlock *false_target)
1469 int max_iid_reg = alloc_preg (cfg);
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1472 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1475 /* Same as above, but obtains max_iid from a klass */
1477 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1489 int idepth_reg = alloc_preg (cfg);
1490 int stypes_reg = alloc_preg (cfg);
1491 int stype = alloc_preg (cfg);
1493 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1501 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1502 } else if (cfg->compile_aot) {
1503 int const_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1513 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1519 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1521 int intf_reg = alloc_preg (cfg);
1523 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1524 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1529 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1533 * Variant of the above that takes a register to the class, not the vtable.
1536 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1538 int intf_bit_reg = alloc_preg (cfg);
1540 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1541 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1546 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1550 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1553 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1554 } else if (cfg->compile_aot) {
1555 int const_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1561 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1565 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1567 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1571 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1573 if (cfg->compile_aot) {
1574 int const_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1576 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1584 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1587 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1590 int rank_reg = alloc_preg (cfg);
1591 int eclass_reg = alloc_preg (cfg);
1593 g_assert (!klass_inst);
1594 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1596 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1597 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1599 if (klass->cast_class == mono_defaults.object_class) {
1600 int parent_reg = alloc_preg (cfg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1602 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1605 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1606 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1607 } else if (klass->cast_class == mono_defaults.enum_class) {
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1610 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1612 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1613 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1616 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1617 /* Check that the object is a vector too */
1618 int bounds_reg = alloc_preg (cfg);
1619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1621 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1624 int idepth_reg = alloc_preg (cfg);
1625 int stypes_reg = alloc_preg (cfg);
1626 int stype = alloc_preg (cfg);
1628 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1631 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1635 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1640 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1642 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1646 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1650 g_assert (val == 0);
1655 if ((size <= 4) && (size <= align)) {
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1664 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1666 #if SIZEOF_REGISTER == 8
1668 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1674 val_reg = alloc_preg (cfg);
1676 if (SIZEOF_REGISTER == 8)
1677 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1679 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1682 /* This could be optimized further if neccesary */
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1691 #if !NO_UNALIGNED_ACCESS
1692 if (SIZEOF_REGISTER == 8) {
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1723 #endif /* DISABLE_JIT */
1726 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1733 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1734 g_assert (size < 10000);
1737 /* This could be optimized further if neccesary */
1739 cur_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (SIZEOF_REGISTER == 8) {
1751 cur_reg = alloc_preg (cfg);
1752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1778 cur_reg = alloc_preg (cfg);
1779 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1790 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1793 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1796 type = mini_get_basic_type_from_generic (gsctx, type);
1797 switch (type->type) {
1798 case MONO_TYPE_VOID:
1799 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1802 case MONO_TYPE_BOOLEAN:
1805 case MONO_TYPE_CHAR:
1808 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 case MONO_TYPE_FNPTR:
1813 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1814 case MONO_TYPE_CLASS:
1815 case MONO_TYPE_STRING:
1816 case MONO_TYPE_OBJECT:
1817 case MONO_TYPE_SZARRAY:
1818 case MONO_TYPE_ARRAY:
1819 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1825 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1826 case MONO_TYPE_VALUETYPE:
1827 if (type->data.klass->enumtype) {
1828 type = mono_class_enum_basetype (type->data.klass);
1831 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1832 case MONO_TYPE_TYPEDBYREF:
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_GENERICINST:
1835 type = &type->data.generic_class->container_class->byval_arg;
1838 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1844 * target_type_is_incompatible:
1845 * @cfg: MonoCompile context
1847 * Check that the item @arg on the evaluation stack can be stored
1848 * in the target type (can be a local, or field, etc).
1849 * The cfg arg can be used to check if we need verification or just
1852 * Returns: non-0 value if arg can't be stored on a target.
1855 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1857 MonoType *simple_type;
1860 if (target->byref) {
1861 /* FIXME: check that the pointed to types match */
1862 if (arg->type == STACK_MP)
1863 return arg->klass != mono_class_from_mono_type (target);
1864 if (arg->type == STACK_PTR)
1869 simple_type = mono_type_get_underlying_type (target);
1870 switch (simple_type->type) {
1871 case MONO_TYPE_VOID:
1875 case MONO_TYPE_BOOLEAN:
1878 case MONO_TYPE_CHAR:
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 /* STACK_MP is needed when setting pinned locals */
1886 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1891 case MONO_TYPE_FNPTR:
1892 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1895 case MONO_TYPE_CLASS:
1896 case MONO_TYPE_STRING:
1897 case MONO_TYPE_OBJECT:
1898 case MONO_TYPE_SZARRAY:
1899 case MONO_TYPE_ARRAY:
1900 if (arg->type != STACK_OBJ)
1902 /* FIXME: check type compatibility */
1906 if (arg->type != STACK_I8)
1911 if (arg->type != STACK_R8)
1914 case MONO_TYPE_VALUETYPE:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_TYPEDBYREF:
1922 if (arg->type != STACK_VTYPE)
1924 klass = mono_class_from_mono_type (simple_type);
1925 if (klass != arg->klass)
1928 case MONO_TYPE_GENERICINST:
1929 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1930 if (arg->type != STACK_VTYPE)
1932 klass = mono_class_from_mono_type (simple_type);
1933 if (klass != arg->klass)
1937 if (arg->type != STACK_OBJ)
1939 /* FIXME: check type compatibility */
1943 case MONO_TYPE_MVAR:
1944 /* FIXME: all the arguments must be references for now,
1945 * later look inside cfg and see if the arg num is
1946 * really a reference
1948 g_assert (cfg->generic_sharing_context);
1949 if (arg->type != STACK_OBJ)
1953 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1959 * Prepare arguments for passing to a function call.
1960 * Return a non-zero value if the arguments can't be passed to the given
1962 * The type checks are not yet complete and some conversions may need
1963 * casts on 32 or 64 bit architectures.
1965 * FIXME: implement this using target_type_is_incompatible ()
1968 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1970 MonoType *simple_type;
1974 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1978 for (i = 0; i < sig->param_count; ++i) {
1979 if (sig->params [i]->byref) {
1980 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1984 simple_type = sig->params [i];
1985 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1987 switch (simple_type->type) {
1988 case MONO_TYPE_VOID:
1993 case MONO_TYPE_BOOLEAN:
1996 case MONO_TYPE_CHAR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2005 case MONO_TYPE_FNPTR:
2006 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2009 case MONO_TYPE_CLASS:
2010 case MONO_TYPE_STRING:
2011 case MONO_TYPE_OBJECT:
2012 case MONO_TYPE_SZARRAY:
2013 case MONO_TYPE_ARRAY:
2014 if (args [i]->type != STACK_OBJ)
2019 if (args [i]->type != STACK_I8)
2024 if (args [i]->type != STACK_R8)
2027 case MONO_TYPE_VALUETYPE:
2028 if (simple_type->data.klass->enumtype) {
2029 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2032 if (args [i]->type != STACK_VTYPE)
2035 case MONO_TYPE_TYPEDBYREF:
2036 if (args [i]->type != STACK_VTYPE)
2039 case MONO_TYPE_GENERICINST:
2040 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2044 g_error ("unknown type 0x%02x in check_call_signature",
2052 callvirt_to_call (int opcode)
2057 case OP_VOIDCALLVIRT:
2066 g_assert_not_reached ();
2073 callvirt_to_call_membase (int opcode)
2077 return OP_CALL_MEMBASE;
2078 case OP_VOIDCALLVIRT:
2079 return OP_VOIDCALL_MEMBASE;
2081 return OP_FCALL_MEMBASE;
2083 return OP_LCALL_MEMBASE;
2085 return OP_VCALL_MEMBASE;
2087 g_assert_not_reached ();
2093 #ifdef MONO_ARCH_HAVE_IMT
2095 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2097 #ifdef MONO_ARCH_IMT_REG
2098 int method_reg = alloc_preg (cfg);
2101 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2102 } else if (cfg->compile_aot) {
2103 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2106 MONO_INST_NEW (cfg, ins, OP_PCONST);
2107 ins->inst_p0 = call->method;
2108 ins->dreg = method_reg;
2109 MONO_ADD_INS (cfg->cbb, ins);
2112 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2114 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2119 static MonoJumpInfo *
2120 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2122 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2126 ji->data.target = target;
2131 inline static MonoCallInst *
2132 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2133 MonoInst **args, int calli, int virtual, int tail)
2136 #ifdef MONO_ARCH_SOFT_FLOAT
2141 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2143 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2146 call->signature = sig;
2148 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2151 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2152 call->vret_var = cfg->vret_addr;
2153 //g_assert_not_reached ();
2155 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2156 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2159 temp->backend.is_pinvoke = sig->pinvoke;
2162 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2163 * address of return value to increase optimization opportunities.
2164 * Before vtype decomposition, the dreg of the call ins itself represents the
2165 * fact the call modifies the return value. After decomposition, the call will
2166 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2167 * will be transformed into an LDADDR.
2169 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2170 loada->dreg = alloc_preg (cfg);
2171 loada->inst_p0 = temp;
2172 /* We reference the call too since call->dreg could change during optimization */
2173 loada->inst_p1 = call;
2174 MONO_ADD_INS (cfg->cbb, loada);
2176 call->inst.dreg = temp->dreg;
2178 call->vret_var = loada;
2179 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2180 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2182 #ifdef MONO_ARCH_SOFT_FLOAT
2183 if (COMPILE_SOFT_FLOAT (cfg)) {
2185 * If the call has a float argument, we would need to do an r8->r4 conversion using
2186 * an icall, but that cannot be done during the call sequence since it would clobber
2187 * the call registers + the stack. So we do it before emitting the call.
2189 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2191 MonoInst *in = call->args [i];
2193 if (i >= sig->hasthis)
2194 t = sig->params [i - sig->hasthis];
2196 t = &mono_defaults.int_class->byval_arg;
2197 t = mono_type_get_underlying_type (t);
2199 if (!t->byref && t->type == MONO_TYPE_R4) {
2200 MonoInst *iargs [1];
2204 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2206 /* The result will be in an int vreg */
2207 call->args [i] = conv;
2214 if (COMPILE_LLVM (cfg))
2215 mono_llvm_emit_call (cfg, call);
2217 mono_arch_emit_call (cfg, call);
2219 mono_arch_emit_call (cfg, call);
2222 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2223 cfg->flags |= MONO_CFG_HAS_CALLS;
2228 inline static MonoInst*
2229 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2231 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2233 call->inst.sreg1 = addr->dreg;
2235 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2237 return (MonoInst*)call;
2240 inline static MonoInst*
2241 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2243 #ifdef MONO_ARCH_RGCTX_REG
2248 rgctx_reg = mono_alloc_preg (cfg);
2249 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2251 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2253 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2254 cfg->uses_rgctx_reg = TRUE;
2255 call->rgctx_reg = TRUE;
2257 return (MonoInst*)call;
2259 g_assert_not_reached ();
2265 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2267 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2270 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2271 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2273 gboolean might_be_remote;
2274 gboolean virtual = this != NULL;
2275 gboolean enable_for_aot = TRUE;
2279 if (method->string_ctor) {
2280 /* Create the real signature */
2281 /* FIXME: Cache these */
2282 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2283 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2288 might_be_remote = this && sig->hasthis &&
2289 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2290 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2292 context_used = mono_method_check_context_used (method);
2293 if (might_be_remote && context_used) {
2296 g_assert (cfg->generic_sharing_context);
2298 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2300 return mono_emit_calli (cfg, sig, args, addr);
2303 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2305 if (might_be_remote)
2306 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2308 call->method = method;
2309 call->inst.flags |= MONO_INST_HAS_METHOD;
2310 call->inst.inst_left = this;
2313 int vtable_reg, slot_reg, this_reg;
2315 this_reg = this->dreg;
2317 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2318 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2319 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2321 /* Make a call to delegate->invoke_impl */
2322 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2323 call->inst.inst_basereg = this_reg;
2324 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2325 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2327 return (MonoInst*)call;
2331 if ((!cfg->compile_aot || enable_for_aot) &&
2332 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2333 (MONO_METHOD_IS_FINAL (method) &&
2334 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2335 !(method->klass->marshalbyref && context_used)) {
2337 * the method is not virtual, we just need to ensure this is not null
2338 * and then we can call the method directly.
2340 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2342 * The check above ensures method is not gshared, this is needed since
2343 * gshared methods can't have wrappers.
2345 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2348 if (!method->string_ctor)
2349 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2351 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2353 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2355 return (MonoInst*)call;
2358 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2360 * the method is virtual, but we can statically dispatch since either
2361 * it's class or the method itself are sealed.
2362 * But first we need to ensure it's not a null reference.
2364 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2366 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2367 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2369 return (MonoInst*)call;
2372 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2374 vtable_reg = alloc_preg (cfg);
2375 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2376 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2378 #ifdef MONO_ARCH_HAVE_IMT
2380 guint32 imt_slot = mono_method_get_imt_slot (method);
2381 emit_imt_argument (cfg, call, imt_arg);
2382 slot_reg = vtable_reg;
2383 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2386 if (slot_reg == -1) {
2387 slot_reg = alloc_preg (cfg);
2388 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2389 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2392 slot_reg = vtable_reg;
2393 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2394 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2395 #ifdef MONO_ARCH_HAVE_IMT
2397 g_assert (mono_method_signature (method)->generic_param_count);
2398 emit_imt_argument (cfg, call, imt_arg);
2403 call->inst.sreg1 = slot_reg;
2404 call->virtual = TRUE;
2407 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2409 return (MonoInst*)call;
2413 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2414 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2416 #ifdef MONO_ARCH_RGCTX_REG
2423 #ifdef MONO_ARCH_RGCTX_REG
2424 rgctx_reg = mono_alloc_preg (cfg);
2425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2430 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2432 call = (MonoCallInst*)ins;
2434 #ifdef MONO_ARCH_RGCTX_REG
2435 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2436 cfg->uses_rgctx_reg = TRUE;
2437 call->rgctx_reg = TRUE;
2447 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2449 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2453 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2460 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2463 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2465 return (MonoInst*)call;
2469 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2471 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2475 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2479 * mono_emit_abs_call:
2481 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2483 inline static MonoInst*
2484 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2485 MonoMethodSignature *sig, MonoInst **args)
2487 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2491 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2494 if (cfg->abs_patches == NULL)
2495 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2496 g_hash_table_insert (cfg->abs_patches, ji, ji);
2497 ins = mono_emit_native_call (cfg, ji, sig, args);
2498 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2503 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2505 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2506 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2510 * Native code might return non register sized integers
2511 * without initializing the upper bits.
2513 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2514 case OP_LOADI1_MEMBASE:
2515 widen_op = OP_ICONV_TO_I1;
2517 case OP_LOADU1_MEMBASE:
2518 widen_op = OP_ICONV_TO_U1;
2520 case OP_LOADI2_MEMBASE:
2521 widen_op = OP_ICONV_TO_I2;
2523 case OP_LOADU2_MEMBASE:
2524 widen_op = OP_ICONV_TO_U2;
2530 if (widen_op != -1) {
2531 int dreg = alloc_preg (cfg);
2534 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2535 widen->type = ins->type;
2545 get_memcpy_method (void)
2547 static MonoMethod *memcpy_method = NULL;
2548 if (!memcpy_method) {
2549 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2551 g_error ("Old corlib found. Install a new one");
2553 return memcpy_method;
2557 * Emit code to copy a valuetype of type @klass whose address is stored in
2558 * @src->dreg to memory whose address is stored at @dest->dreg.
2561 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2563 MonoInst *iargs [3];
2566 MonoMethod *memcpy_method;
2570 * This check breaks with spilled vars... need to handle it during verification anyway.
2571 * g_assert (klass && klass == src->klass && klass == dest->klass);
2575 n = mono_class_native_size (klass, &align);
2577 n = mono_class_value_size (klass, &align);
2579 #if HAVE_WRITE_BARRIERS
2580 /* if native is true there should be no references in the struct */
2581 if (klass->has_references && !native) {
2582 /* Avoid barriers when storing to the stack */
2583 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2584 (dest->opcode == OP_LDADDR))) {
2585 int context_used = 0;
2590 if (cfg->generic_sharing_context)
2591 context_used = mono_class_check_context_used (klass);
2593 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2595 if (cfg->compile_aot) {
2596 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2598 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2599 mono_class_compute_gc_descriptor (klass);
2603 /* FIXME: this does the memcpy as well (or
2604 should), so we don't need the memcpy
2606 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2611 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2612 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2613 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2617 EMIT_NEW_ICONST (cfg, iargs [2], n);
2619 memcpy_method = get_memcpy_method ();
2620 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2625 get_memset_method (void)
2627 static MonoMethod *memset_method = NULL;
2628 if (!memset_method) {
2629 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2631 g_error ("Old corlib found. Install a new one");
2633 return memset_method;
2637 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2639 MonoInst *iargs [3];
2642 MonoMethod *memset_method;
2644 /* FIXME: Optimize this for the case when dest is an LDADDR */
2646 mono_class_init (klass);
2647 n = mono_class_value_size (klass, &align);
2649 if (n <= sizeof (gpointer) * 5) {
2650 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2653 memset_method = get_memset_method ();
2655 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2656 EMIT_NEW_ICONST (cfg, iargs [2], n);
2657 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2662 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2664 MonoInst *this = NULL;
2666 g_assert (cfg->generic_sharing_context);
2668 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2669 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2670 !method->klass->valuetype)
2671 EMIT_NEW_ARGLOAD (cfg, this, 0);
2673 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2674 MonoInst *mrgctx_loc, *mrgctx_var;
2677 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2679 mrgctx_loc = mono_get_vtable_var (cfg);
2680 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2683 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2684 MonoInst *vtable_loc, *vtable_var;
2688 vtable_loc = mono_get_vtable_var (cfg);
2689 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2691 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2692 MonoInst *mrgctx_var = vtable_var;
2695 vtable_reg = alloc_preg (cfg);
2696 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2697 vtable_var->type = STACK_PTR;
2703 int vtable_reg, res_reg;
2705 vtable_reg = alloc_preg (cfg);
2706 res_reg = alloc_preg (cfg);
2707 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2712 static MonoJumpInfoRgctxEntry *
2713 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2715 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2716 res->method = method;
2717 res->in_mrgctx = in_mrgctx;
2718 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2719 res->data->type = patch_type;
2720 res->data->data.target = patch_data;
2721 res->info_type = info_type;
2726 static inline MonoInst*
2727 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2729 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2733 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2734 MonoClass *klass, int rgctx_type)
2736 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2737 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2739 return emit_rgctx_fetch (cfg, rgctx, entry);
2743 * emit_get_rgctx_method:
2745 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2746 * normal constants, else emit a load from the rgctx.
2749 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2750 MonoMethod *cmethod, int rgctx_type)
2752 if (!context_used) {
2755 switch (rgctx_type) {
2756 case MONO_RGCTX_INFO_METHOD:
2757 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2759 case MONO_RGCTX_INFO_METHOD_RGCTX:
2760 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2763 g_assert_not_reached ();
2766 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2767 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2769 return emit_rgctx_fetch (cfg, rgctx, entry);
2774 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2775 MonoClassField *field, int rgctx_type)
2777 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2778 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2780 return emit_rgctx_fetch (cfg, rgctx, entry);
2784 * On return the caller must check @klass for load errors.
2787 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2789 MonoInst *vtable_arg;
2791 int context_used = 0;
2793 if (cfg->generic_sharing_context)
2794 context_used = mono_class_check_context_used (klass);
2797 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2798 klass, MONO_RGCTX_INFO_VTABLE);
2800 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2804 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2807 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2808 #ifdef MONO_ARCH_VTABLE_REG
2809 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2810 cfg->uses_vtable_reg = TRUE;
2817 * On return the caller must check @array_class for load errors
2820 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2822 int vtable_reg = alloc_preg (cfg);
2823 int context_used = 0;
2825 if (cfg->generic_sharing_context)
2826 context_used = mono_class_check_context_used (array_class);
2828 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2830 if (cfg->opt & MONO_OPT_SHARED) {
2831 int class_reg = alloc_preg (cfg);
2832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2833 if (cfg->compile_aot) {
2834 int klass_reg = alloc_preg (cfg);
2835 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2836 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2840 } else if (context_used) {
2841 MonoInst *vtable_ins;
2843 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2844 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2846 if (cfg->compile_aot) {
2850 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2852 vt_reg = alloc_preg (cfg);
2853 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2854 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2857 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2867 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2869 if (mini_get_debug_options ()->better_cast_details) {
2870 int to_klass_reg = alloc_preg (cfg);
2871 int vtable_reg = alloc_preg (cfg);
2872 int klass_reg = alloc_preg (cfg);
2873 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2876 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2880 MONO_ADD_INS (cfg->cbb, tls_get);
2881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2885 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2891 reset_cast_details (MonoCompile *cfg)
2893 /* Reset the variables holding the cast details */
2894 if (mini_get_debug_options ()->better_cast_details) {
2895 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2897 MONO_ADD_INS (cfg->cbb, tls_get);
2898 /* It is enough to reset the from field */
2899 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2904 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2905 * generic code is generated.
2908 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2910 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2913 MonoInst *rgctx, *addr;
2915 /* FIXME: What if the class is shared? We might not
2916 have to get the address of the method from the
2918 addr = emit_get_rgctx_method (cfg, context_used, method,
2919 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2921 rgctx = emit_get_rgctx (cfg, method, context_used);
2923 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2925 return mono_emit_method_call (cfg, method, &val, NULL);
2930 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2934 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2935 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2936 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2937 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2939 obj_reg = sp [0]->dreg;
2940 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2941 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2943 /* FIXME: generics */
2944 g_assert (klass->rank == 0);
2947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2948 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2951 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2954 MonoInst *element_class;
2956 /* This assertion is from the unboxcast insn */
2957 g_assert (klass->rank == 0);
2959 element_class = emit_get_rgctx_klass (cfg, context_used,
2960 klass->element_class, MONO_RGCTX_INFO_KLASS);
2962 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2963 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2965 save_cast_details (cfg, klass->element_class, obj_reg);
2966 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2967 reset_cast_details (cfg);
2970 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2971 MONO_ADD_INS (cfg->cbb, add);
2972 add->type = STACK_MP;
2979 * Returns NULL and set the cfg exception on error.
2982 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2984 MonoInst *iargs [2];
2987 if (cfg->opt & MONO_OPT_SHARED) {
2988 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2989 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2991 alloc_ftn = mono_object_new;
2992 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2993 /* This happens often in argument checking code, eg. throw new FooException... */
2994 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2995 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2996 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2998 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2999 MonoMethod *managed_alloc = NULL;
3003 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3004 cfg->exception_ptr = klass;
3008 #ifndef MONO_CROSS_COMPILE
3009 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3012 if (managed_alloc) {
3013 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3014 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3016 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3018 guint32 lw = vtable->klass->instance_size;
3019 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3020 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3021 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3024 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3028 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3032 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3035 MonoInst *iargs [2];
3036 MonoMethod *managed_alloc = NULL;
3040 FIXME: we cannot get managed_alloc here because we can't get
3041 the class's vtable (because it's not a closed class)
3043 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3044 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3047 if (cfg->opt & MONO_OPT_SHARED) {
3048 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3049 iargs [1] = data_inst;
3050 alloc_ftn = mono_object_new;
3052 if (managed_alloc) {
3053 iargs [0] = data_inst;
3054 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3057 iargs [0] = data_inst;
3058 alloc_ftn = mono_object_new_specific;
3061 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3065 * Returns NULL and set the cfg exception on error.
3068 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3070 MonoInst *alloc, *ins;
3072 if (mono_class_is_nullable (klass)) {
3073 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3074 return mono_emit_method_call (cfg, method, &val, NULL);
3077 alloc = handle_alloc (cfg, klass, TRUE);
3081 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3087 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3089 MonoInst *alloc, *ins;
3091 if (mono_class_is_nullable (klass)) {
3092 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3093 /* FIXME: What if the class is shared? We might not
3094 have to get the method address from the RGCTX. */
3095 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3096 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3097 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3099 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3101 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3103 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3109 // FIXME: This doesn't work yet (class libs tests fail?)
3110 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3113 * Returns NULL and set the cfg exception on error.
3116 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3118 MonoBasicBlock *is_null_bb;
3119 int obj_reg = src->dreg;
3120 int vtable_reg = alloc_preg (cfg);
3121 MonoInst *klass_inst = NULL;
3126 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3127 klass, MONO_RGCTX_INFO_KLASS);
3129 if (is_complex_isinst (klass)) {
3130 /* Complex case, handle by an icall */
3136 args [1] = klass_inst;
3138 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3140 /* Simple case, handled by the code below */
3144 NEW_BBLOCK (cfg, is_null_bb);
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3149 save_cast_details (cfg, klass, obj_reg);
3151 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3153 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3155 int klass_reg = alloc_preg (cfg);
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3159 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3160 /* the remoting code is broken, access the class for now */
3161 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3162 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3164 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3165 cfg->exception_ptr = klass;
3168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3173 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3175 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3176 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3180 MONO_START_BB (cfg, is_null_bb);
3182 reset_cast_details (cfg);
3188 * Returns NULL and set the cfg exception on error.
3191 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3194 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3195 int obj_reg = src->dreg;
3196 int vtable_reg = alloc_preg (cfg);
3197 int res_reg = alloc_preg (cfg);
3198 MonoInst *klass_inst = NULL;
3201 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3203 if (is_complex_isinst (klass)) {
3206 /* Complex case, handle by an icall */
3212 args [1] = klass_inst;
3214 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3216 /* Simple case, the code below can handle it */
3220 NEW_BBLOCK (cfg, is_null_bb);
3221 NEW_BBLOCK (cfg, false_bb);
3222 NEW_BBLOCK (cfg, end_bb);
3224 /* Do the assignment at the beginning, so the other assignment can be if converted */
3225 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3226 ins->type = STACK_OBJ;
3229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3230 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3234 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3235 g_assert (!context_used);
3236 /* the is_null_bb target simply copies the input register to the output */
3237 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3239 int klass_reg = alloc_preg (cfg);
3242 int rank_reg = alloc_preg (cfg);
3243 int eclass_reg = alloc_preg (cfg);
3245 g_assert (!context_used);
3246 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3248 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3250 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3251 if (klass->cast_class == mono_defaults.object_class) {
3252 int parent_reg = alloc_preg (cfg);
3253 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3254 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3255 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3257 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3258 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3259 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3261 } else if (klass->cast_class == mono_defaults.enum_class) {
3262 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3263 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3264 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3265 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3267 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3268 /* Check that the object is a vector too */
3269 int bounds_reg = alloc_preg (cfg);
3270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3275 /* the is_null_bb target simply copies the input register to the output */
3276 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3278 } else if (mono_class_is_nullable (klass)) {
3279 g_assert (!context_used);
3280 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3281 /* the is_null_bb target simply copies the input register to the output */
3282 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3284 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3285 g_assert (!context_used);
3286 /* the remoting code is broken, access the class for now */
3287 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3288 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3290 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3291 cfg->exception_ptr = klass;
3294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3296 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3302 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3303 /* the is_null_bb target simply copies the input register to the output */
3304 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3309 MONO_START_BB (cfg, false_bb);
3311 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3312 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3314 MONO_START_BB (cfg, is_null_bb);
3316 MONO_START_BB (cfg, end_bb);
3322 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3324 /* This opcode takes as input an object reference and a class, and returns:
3325 0) if the object is an instance of the class,
3326 1) if the object is not instance of the class,
3327 2) if the object is a proxy whose type cannot be determined */
3330 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3331 int obj_reg = src->dreg;
3332 int dreg = alloc_ireg (cfg);
3334 int klass_reg = alloc_preg (cfg);
3336 NEW_BBLOCK (cfg, true_bb);
3337 NEW_BBLOCK (cfg, false_bb);
3338 NEW_BBLOCK (cfg, false2_bb);
3339 NEW_BBLOCK (cfg, end_bb);
3340 NEW_BBLOCK (cfg, no_proxy_bb);
3342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3345 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3346 NEW_BBLOCK (cfg, interface_fail_bb);
3348 tmp_reg = alloc_preg (cfg);
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3350 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3351 MONO_START_BB (cfg, interface_fail_bb);
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3354 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3356 tmp_reg = alloc_preg (cfg);
3357 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3361 tmp_reg = alloc_preg (cfg);
3362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3365 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3366 tmp_reg = alloc_preg (cfg);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3370 tmp_reg = alloc_preg (cfg);
3371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3372 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3375 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3378 MONO_START_BB (cfg, no_proxy_bb);
3380 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3383 MONO_START_BB (cfg, false_bb);
3385 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3388 MONO_START_BB (cfg, false2_bb);
3390 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3393 MONO_START_BB (cfg, true_bb);
3395 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3397 MONO_START_BB (cfg, end_bb);
3400 MONO_INST_NEW (cfg, ins, OP_ICONST);
3402 ins->type = STACK_I4;
3408 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3410 /* This opcode takes as input an object reference and a class, and returns:
3411 0) if the object is an instance of the class,
3412 1) if the object is a proxy whose type cannot be determined
3413 an InvalidCastException exception is thrown otherwhise*/
3416 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3417 int obj_reg = src->dreg;
3418 int dreg = alloc_ireg (cfg);
3419 int tmp_reg = alloc_preg (cfg);
3420 int klass_reg = alloc_preg (cfg);
3422 NEW_BBLOCK (cfg, end_bb);
3423 NEW_BBLOCK (cfg, ok_result_bb);
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3428 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3429 NEW_BBLOCK (cfg, interface_fail_bb);
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3432 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3433 MONO_START_BB (cfg, interface_fail_bb);
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3436 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3438 tmp_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3441 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3443 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3447 NEW_BBLOCK (cfg, no_proxy_bb);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3451 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3453 tmp_reg = alloc_preg (cfg);
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3457 tmp_reg = alloc_preg (cfg);
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3462 NEW_BBLOCK (cfg, fail_1_bb);
3464 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3466 MONO_START_BB (cfg, fail_1_bb);
3468 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3469 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3471 MONO_START_BB (cfg, no_proxy_bb);
3473 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3476 MONO_START_BB (cfg, ok_result_bb);
3478 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3480 MONO_START_BB (cfg, end_bb);
3483 MONO_INST_NEW (cfg, ins, OP_ICONST);
3485 ins->type = STACK_I4;
3491 * Returns NULL and set the cfg exception on error.
3493 static G_GNUC_UNUSED MonoInst*
3494 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3496 gpointer *trampoline;
3497 MonoInst *obj, *method_ins, *tramp_ins;
3501 obj = handle_alloc (cfg, klass, FALSE);
3505 /* Inline the contents of mono_delegate_ctor */
3507 /* Set target field */
3508 /* Optimize away setting of NULL target */
3509 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3510 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3512 /* Set method field */
3513 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3517 * To avoid looking up the compiled code belonging to the target method
3518 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3519 * store it, and we fill it after the method has been compiled.
3521 if (!cfg->compile_aot && !method->dynamic) {
3522 MonoInst *code_slot_ins;
3525 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3527 domain = mono_domain_get ();
3528 mono_domain_lock (domain);
3529 if (!domain_jit_info (domain)->method_code_hash)
3530 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3531 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3533 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3534 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3536 mono_domain_unlock (domain);
3538 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3540 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3543 /* Set invoke_impl field */
3544 if (cfg->compile_aot) {
3545 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3547 trampoline = mono_create_delegate_trampoline (klass);
3548 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3550 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3552 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3558 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3560 MonoJitICallInfo *info;
3562 /* Need to register the icall so it gets an icall wrapper */
3563 info = mono_get_array_new_va_icall (rank);
3565 cfg->flags |= MONO_CFG_HAS_VARARGS;
3567 /* mono_array_new_va () needs a vararg calling convention */
3568 cfg->disable_llvm = TRUE;
3570 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3571 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3575 mono_emit_load_got_addr (MonoCompile *cfg)
3577 MonoInst *getaddr, *dummy_use;
3579 if (!cfg->got_var || cfg->got_var_allocated)
3582 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3583 getaddr->dreg = cfg->got_var->dreg;
3585 /* Add it to the start of the first bblock */
3586 if (cfg->bb_entry->code) {
3587 getaddr->next = cfg->bb_entry->code;
3588 cfg->bb_entry->code = getaddr;
3591 MONO_ADD_INS (cfg->bb_entry, getaddr);
3593 cfg->got_var_allocated = TRUE;
3596 * Add a dummy use to keep the got_var alive, since real uses might
3597 * only be generated by the back ends.
3598 * Add it to end_bblock, so the variable's lifetime covers the whole
3600 * It would be better to make the usage of the got var explicit in all
3601 * cases when the backend needs it (i.e. calls, throw etc.), so this
3602 * wouldn't be needed.
3604 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3605 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3608 static int inline_limit;
3609 static gboolean inline_limit_inited;
3612 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3614 MonoMethodHeaderSummary header;
3616 #ifdef MONO_ARCH_SOFT_FLOAT
3617 MonoMethodSignature *sig = mono_method_signature (method);
3621 if (cfg->generic_sharing_context)
3624 if (cfg->inline_depth > 10)
3627 #ifdef MONO_ARCH_HAVE_LMF_OPS
3628 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3629 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3630 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3635 if (!mono_method_get_header_summary (method, &header))
3638 /*runtime, icall and pinvoke are checked by summary call*/
3639 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3640 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3641 (method->klass->marshalbyref) ||
3645 /* also consider num_locals? */
3646 /* Do the size check early to avoid creating vtables */
3647 if (!inline_limit_inited) {
3648 if (getenv ("MONO_INLINELIMIT"))
3649 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3651 inline_limit = INLINE_LENGTH_LIMIT;
3652 inline_limit_inited = TRUE;
3654 if (header.code_size >= inline_limit)
3658 * if we can initialize the class of the method right away, we do,
3659 * otherwise we don't allow inlining if the class needs initialization,
3660 * since it would mean inserting a call to mono_runtime_class_init()
3661 * inside the inlined code
3663 if (!(cfg->opt & MONO_OPT_SHARED)) {
3664 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3665 if (cfg->run_cctors && method->klass->has_cctor) {
3666 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3667 if (!method->klass->runtime_info)
3668 /* No vtable created yet */
3670 vtable = mono_class_vtable (cfg->domain, method->klass);
3673 /* This makes so that inline cannot trigger */
3674 /* .cctors: too many apps depend on them */
3675 /* running with a specific order... */
3676 if (! vtable->initialized)
3678 mono_runtime_class_init (vtable);
3680 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3681 if (!method->klass->runtime_info)
3682 /* No vtable created yet */
3684 vtable = mono_class_vtable (cfg->domain, method->klass);
3687 if (!vtable->initialized)
3692 * If we're compiling for shared code
3693 * the cctor will need to be run at aot method load time, for example,
3694 * or at the end of the compilation of the inlining method.
3696 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3701 * CAS - do not inline methods with declarative security
3702 * Note: this has to be before any possible return TRUE;
3704 if (mono_method_has_declsec (method))
3707 #ifdef MONO_ARCH_SOFT_FLOAT
3709 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3711 for (i = 0; i < sig->param_count; ++i)
3712 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3720 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3722 if (vtable->initialized && !cfg->compile_aot)
3725 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3728 if (!mono_class_needs_cctor_run (vtable->klass, method))
3731 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3732 /* The initialization is already done before the method is called */
3739 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3743 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3745 mono_class_init (klass);
3746 size = mono_class_array_element_size (klass);
3748 mult_reg = alloc_preg (cfg);
3749 array_reg = arr->dreg;
3750 index_reg = index->dreg;
3752 #if SIZEOF_REGISTER == 8
3753 /* The array reg is 64 bits but the index reg is only 32 */
3754 if (COMPILE_LLVM (cfg)) {
3756 index2_reg = index_reg;
3758 index2_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3762 if (index->type == STACK_I8) {
3763 index2_reg = alloc_preg (cfg);
3764 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3766 index2_reg = index_reg;
3770 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3772 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3773 if (size == 1 || size == 2 || size == 4 || size == 8) {
3774 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3776 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3777 ins->type = STACK_PTR;
3783 add_reg = alloc_preg (cfg);
3785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3786 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3787 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3788 ins->type = STACK_PTR;
3789 MONO_ADD_INS (cfg->cbb, ins);
3794 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3796 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3798 int bounds_reg = alloc_preg (cfg);
3799 int add_reg = alloc_preg (cfg);
3800 int mult_reg = alloc_preg (cfg);
3801 int mult2_reg = alloc_preg (cfg);
3802 int low1_reg = alloc_preg (cfg);
3803 int low2_reg = alloc_preg (cfg);
3804 int high1_reg = alloc_preg (cfg);
3805 int high2_reg = alloc_preg (cfg);
3806 int realidx1_reg = alloc_preg (cfg);
3807 int realidx2_reg = alloc_preg (cfg);
3808 int sum_reg = alloc_preg (cfg);
3813 mono_class_init (klass);
3814 size = mono_class_array_element_size (klass);
3816 index1 = index_ins1->dreg;
3817 index2 = index_ins2->dreg;
3819 /* range checking */
3820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3821 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3823 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3824 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3825 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3827 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3828 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3829 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3831 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3832 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3833 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3835 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3836 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3837 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3839 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3840 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3843 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3845 ins->type = STACK_MP;
3847 MONO_ADD_INS (cfg->cbb, ins);
3854 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3858 MonoMethod *addr_method;
3861 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3864 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3866 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3867 /* emit_ldelema_2 depends on OP_LMUL */
3868 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3869 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3873 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3874 addr_method = mono_marshal_get_array_address (rank, element_size);
3875 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3880 static MonoBreakPolicy
3881 always_insert_breakpoint (MonoMethod *method)
3883 return MONO_BREAK_POLICY_ALWAYS;
3886 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3889 * mono_set_break_policy:
3890 * policy_callback: the new callback function
3892 * Allow embedders to decide wherther to actually obey breakpoint instructions
3893 * (both break IL instructions and Debugger.Break () method calls), for example
3894 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3895 * untrusted or semi-trusted code.
3897 * @policy_callback will be called every time a break point instruction needs to
3898 * be inserted with the method argument being the method that calls Debugger.Break()
3899 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3900 * if it wants the breakpoint to not be effective in the given method.
3901 * #MONO_BREAK_POLICY_ALWAYS is the default.
3904 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3906 if (policy_callback)
3907 break_policy_func = policy_callback;
3909 break_policy_func = always_insert_breakpoint;
3913 should_insert_brekpoint (MonoMethod *method) {
3914 switch (break_policy_func (method)) {
3915 case MONO_BREAK_POLICY_ALWAYS:
3917 case MONO_BREAK_POLICY_NEVER:
3919 case MONO_BREAK_POLICY_ON_DBG:
3920 return mono_debug_using_mono_debugger ();
3922 g_warning ("Incorrect value returned from break policy callback");
3928 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3930 MonoInst *ins = NULL;
3932 static MonoClass *runtime_helpers_class = NULL;
3933 if (! runtime_helpers_class)
3934 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3935 "System.Runtime.CompilerServices", "RuntimeHelpers");
3937 if (cmethod->klass == mono_defaults.string_class) {
3938 if (strcmp (cmethod->name, "get_Chars") == 0) {
3939 int dreg = alloc_ireg (cfg);
3940 int index_reg = alloc_preg (cfg);
3941 int mult_reg = alloc_preg (cfg);
3942 int add_reg = alloc_preg (cfg);
3944 #if SIZEOF_REGISTER == 8
3945 /* The array reg is 64 bits but the index reg is only 32 */
3946 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3948 index_reg = args [1]->dreg;
3950 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3952 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3953 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3954 add_reg = ins->dreg;
3955 /* Avoid a warning */
3957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3961 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3963 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3965 type_from_op (ins, NULL, NULL);
3967 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3968 int dreg = alloc_ireg (cfg);
3969 /* Decompose later to allow more optimizations */
3970 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3971 ins->type = STACK_I4;
3972 cfg->cbb->has_array_access = TRUE;
3973 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3976 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3977 int mult_reg = alloc_preg (cfg);
3978 int add_reg = alloc_preg (cfg);
3980 /* The corlib functions check for oob already. */
3981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3982 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3983 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3984 return cfg->cbb->last_ins;
3987 } else if (cmethod->klass == mono_defaults.object_class) {
3989 if (strcmp (cmethod->name, "GetType") == 0) {
3990 int dreg = alloc_preg (cfg);
3991 int vt_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3993 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3994 type_from_op (ins, NULL, NULL);
3997 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3998 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3999 int dreg = alloc_ireg (cfg);
4000 int t1 = alloc_ireg (cfg);
4002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4003 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4004 ins->type = STACK_I4;
4008 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4009 MONO_INST_NEW (cfg, ins, OP_NOP);
4010 MONO_ADD_INS (cfg->cbb, ins);
4014 } else if (cmethod->klass == mono_defaults.array_class) {
4015 if (cmethod->name [0] != 'g')
4018 if (strcmp (cmethod->name, "get_Rank") == 0) {
4019 int dreg = alloc_ireg (cfg);
4020 int vtable_reg = alloc_preg (cfg);
4021 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4022 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4023 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4024 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4025 type_from_op (ins, NULL, NULL);
4028 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4029 int dreg = alloc_ireg (cfg);
4031 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4032 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4033 type_from_op (ins, NULL, NULL);
4038 } else if (cmethod->klass == runtime_helpers_class) {
4040 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4041 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4045 } else if (cmethod->klass == mono_defaults.thread_class) {
4046 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4047 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4048 MONO_ADD_INS (cfg->cbb, ins);
4050 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4051 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4052 MONO_ADD_INS (cfg->cbb, ins);
4055 } else if (cmethod->klass == mono_defaults.monitor_class) {
4056 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4057 if (strcmp (cmethod->name, "Enter") == 0) {
4060 if (COMPILE_LLVM (cfg)) {
4062 * Pass the argument normally, the LLVM backend will handle the
4063 * calling convention problems.
4065 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4067 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4068 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4069 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4070 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4073 return (MonoInst*)call;
4074 } else if (strcmp (cmethod->name, "Exit") == 0) {
4077 if (COMPILE_LLVM (cfg)) {
4078 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4080 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4081 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4082 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4083 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4086 return (MonoInst*)call;
4088 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4089 MonoMethod *fast_method = NULL;
4091 /* Avoid infinite recursion */
4092 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4093 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4094 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4097 if (strcmp (cmethod->name, "Enter") == 0 ||
4098 strcmp (cmethod->name, "Exit") == 0)
4099 fast_method = mono_monitor_get_fast_path (cmethod);
4103 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4105 } else if (mini_class_is_system_array (cmethod->klass) &&
4106 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4107 MonoInst *addr, *store, *load;
4108 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4110 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4111 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4112 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4114 } else if (cmethod->klass->image == mono_defaults.corlib &&
4115 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4116 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4119 #if SIZEOF_REGISTER == 8
4120 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4121 /* 64 bit reads are already atomic */
4122 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4123 ins->dreg = mono_alloc_preg (cfg);
4124 ins->inst_basereg = args [0]->dreg;
4125 ins->inst_offset = 0;
4126 MONO_ADD_INS (cfg->cbb, ins);
4130 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4131 if (strcmp (cmethod->name, "Increment") == 0) {
4132 MonoInst *ins_iconst;
4135 if (fsig->params [0]->type == MONO_TYPE_I4)
4136 opcode = OP_ATOMIC_ADD_NEW_I4;
4137 #if SIZEOF_REGISTER == 8
4138 else if (fsig->params [0]->type == MONO_TYPE_I8)
4139 opcode = OP_ATOMIC_ADD_NEW_I8;
4142 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4143 ins_iconst->inst_c0 = 1;
4144 ins_iconst->dreg = mono_alloc_ireg (cfg);
4145 MONO_ADD_INS (cfg->cbb, ins_iconst);
4147 MONO_INST_NEW (cfg, ins, opcode);
4148 ins->dreg = mono_alloc_ireg (cfg);
4149 ins->inst_basereg = args [0]->dreg;
4150 ins->inst_offset = 0;
4151 ins->sreg2 = ins_iconst->dreg;
4152 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4153 MONO_ADD_INS (cfg->cbb, ins);
4155 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4156 MonoInst *ins_iconst;
4159 if (fsig->params [0]->type == MONO_TYPE_I4)
4160 opcode = OP_ATOMIC_ADD_NEW_I4;
4161 #if SIZEOF_REGISTER == 8
4162 else if (fsig->params [0]->type == MONO_TYPE_I8)
4163 opcode = OP_ATOMIC_ADD_NEW_I8;
4166 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4167 ins_iconst->inst_c0 = -1;
4168 ins_iconst->dreg = mono_alloc_ireg (cfg);
4169 MONO_ADD_INS (cfg->cbb, ins_iconst);
4171 MONO_INST_NEW (cfg, ins, opcode);
4172 ins->dreg = mono_alloc_ireg (cfg);
4173 ins->inst_basereg = args [0]->dreg;
4174 ins->inst_offset = 0;
4175 ins->sreg2 = ins_iconst->dreg;
4176 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4177 MONO_ADD_INS (cfg->cbb, ins);
4179 } else if (strcmp (cmethod->name, "Add") == 0) {
4182 if (fsig->params [0]->type == MONO_TYPE_I4)
4183 opcode = OP_ATOMIC_ADD_NEW_I4;
4184 #if SIZEOF_REGISTER == 8
4185 else if (fsig->params [0]->type == MONO_TYPE_I8)
4186 opcode = OP_ATOMIC_ADD_NEW_I8;
4190 MONO_INST_NEW (cfg, ins, opcode);
4191 ins->dreg = mono_alloc_ireg (cfg);
4192 ins->inst_basereg = args [0]->dreg;
4193 ins->inst_offset = 0;
4194 ins->sreg2 = args [1]->dreg;
4195 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4196 MONO_ADD_INS (cfg->cbb, ins);
4199 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4201 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4202 if (strcmp (cmethod->name, "Exchange") == 0) {
4204 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4206 if (fsig->params [0]->type == MONO_TYPE_I4)
4207 opcode = OP_ATOMIC_EXCHANGE_I4;
4208 #if SIZEOF_REGISTER == 8
4209 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4210 (fsig->params [0]->type == MONO_TYPE_I))
4211 opcode = OP_ATOMIC_EXCHANGE_I8;
4213 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4214 opcode = OP_ATOMIC_EXCHANGE_I4;
4219 MONO_INST_NEW (cfg, ins, opcode);
4220 ins->dreg = mono_alloc_ireg (cfg);
4221 ins->inst_basereg = args [0]->dreg;
4222 ins->inst_offset = 0;
4223 ins->sreg2 = args [1]->dreg;
4224 MONO_ADD_INS (cfg->cbb, ins);
4226 switch (fsig->params [0]->type) {
4228 ins->type = STACK_I4;
4232 ins->type = STACK_I8;
4234 case MONO_TYPE_OBJECT:
4235 ins->type = STACK_OBJ;
4238 g_assert_not_reached ();
4241 #if HAVE_WRITE_BARRIERS
4243 MonoInst *dummy_use;
4244 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4245 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4246 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4250 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4252 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4253 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4255 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4256 if (fsig->params [1]->type == MONO_TYPE_I4)
4258 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4259 size = sizeof (gpointer);
4260 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4263 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4264 ins->dreg = alloc_ireg (cfg);
4265 ins->sreg1 = args [0]->dreg;
4266 ins->sreg2 = args [1]->dreg;
4267 ins->sreg3 = args [2]->dreg;
4268 ins->type = STACK_I4;
4269 MONO_ADD_INS (cfg->cbb, ins);
4270 } else if (size == 8) {
4271 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4272 ins->dreg = alloc_ireg (cfg);
4273 ins->sreg1 = args [0]->dreg;
4274 ins->sreg2 = args [1]->dreg;
4275 ins->sreg3 = args [2]->dreg;
4276 ins->type = STACK_I8;
4277 MONO_ADD_INS (cfg->cbb, ins);
4279 /* g_assert_not_reached (); */
4281 #if HAVE_WRITE_BARRIERS
4283 MonoInst *dummy_use;
4284 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4285 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4286 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4290 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4294 } else if (cmethod->klass->image == mono_defaults.corlib) {
4295 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4296 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4297 if (should_insert_brekpoint (cfg->method))
4298 MONO_INST_NEW (cfg, ins, OP_BREAK);
4300 MONO_INST_NEW (cfg, ins, OP_NOP);
4301 MONO_ADD_INS (cfg->cbb, ins);
4304 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4305 && strcmp (cmethod->klass->name, "Environment") == 0) {
4307 EMIT_NEW_ICONST (cfg, ins, 1);
4309 EMIT_NEW_ICONST (cfg, ins, 0);
4313 } else if (cmethod->klass == mono_defaults.math_class) {
4315 * There is general branches code for Min/Max, but it does not work for
4317 * http://everything2.com/?node_id=1051618
4321 #ifdef MONO_ARCH_SIMD_INTRINSICS
4322 if (cfg->opt & MONO_OPT_SIMD) {
4323 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4329 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4333 * This entry point could be used later for arbitrary method
4336 inline static MonoInst*
4337 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4338 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4340 if (method->klass == mono_defaults.string_class) {
4341 /* managed string allocation support */
4342 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4343 MonoInst *iargs [2];
4344 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4345 MonoMethod *managed_alloc = NULL;
4347 g_assert (vtable); /*Should not fail since it System.String*/
4348 #ifndef MONO_CROSS_COMPILE
4349 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4353 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4354 iargs [1] = args [0];
4355 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4362 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4364 MonoInst *store, *temp;
4367 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4368 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4371 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4372 * would be different than the MonoInst's used to represent arguments, and
4373 * the ldelema implementation can't deal with that.
4374 * Solution: When ldelema is used on an inline argument, create a var for
4375 * it, emit ldelema on that var, and emit the saving code below in
4376 * inline_method () if needed.
4378 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4379 cfg->args [i] = temp;
4380 /* This uses cfg->args [i] which is set by the preceeding line */
4381 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4382 store->cil_code = sp [0]->cil_code;
4387 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4388 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4390 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4392 check_inline_called_method_name_limit (MonoMethod *called_method)
4395 static char *limit = NULL;
4397 if (limit == NULL) {
4398 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4400 if (limit_string != NULL)
4401 limit = limit_string;
4403 limit = (char *) "";
4406 if (limit [0] != '\0') {
4407 char *called_method_name = mono_method_full_name (called_method, TRUE);
4409 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4410 g_free (called_method_name);
4412 //return (strncmp_result <= 0);
4413 return (strncmp_result == 0);
4420 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4422 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4425 static char *limit = NULL;
4427 if (limit == NULL) {
4428 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4429 if (limit_string != NULL) {
4430 limit = limit_string;
4432 limit = (char *) "";
4436 if (limit [0] != '\0') {
4437 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4439 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4440 g_free (caller_method_name);
4442 //return (strncmp_result <= 0);
4443 return (strncmp_result == 0);
4451 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4452 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4454 MonoInst *ins, *rvar = NULL;
4455 MonoMethodHeader *cheader;
4456 MonoBasicBlock *ebblock, *sbblock;
4458 MonoMethod *prev_inlined_method;
4459 MonoInst **prev_locals, **prev_args;
4460 MonoType **prev_arg_types;
4461 guint prev_real_offset;
4462 GHashTable *prev_cbb_hash;
4463 MonoBasicBlock **prev_cil_offset_to_bb;
4464 MonoBasicBlock *prev_cbb;
4465 unsigned char* prev_cil_start;
4466 guint32 prev_cil_offset_to_bb_len;
4467 MonoMethod *prev_current_method;
4468 MonoGenericContext *prev_generic_context;
4469 gboolean ret_var_set, prev_ret_var_set;
4471 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4473 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4474 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4477 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4478 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4482 if (cfg->verbose_level > 2)
4483 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4485 if (!cmethod->inline_info) {
4486 mono_jit_stats.inlineable_methods++;
4487 cmethod->inline_info = 1;
4490 /* allocate local variables */
4491 cheader = mono_method_get_header (cmethod);
4493 if (cheader == NULL || mono_loader_get_last_error ()) {
4495 mono_metadata_free_mh (cheader);
4496 mono_loader_clear_error ();
4500 /* allocate space to store the return value */
4501 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4502 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4506 prev_locals = cfg->locals;
4507 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4508 for (i = 0; i < cheader->num_locals; ++i)
4509 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4511 /* allocate start and end blocks */
4512 /* This is needed so if the inline is aborted, we can clean up */
4513 NEW_BBLOCK (cfg, sbblock);
4514 sbblock->real_offset = real_offset;
4516 NEW_BBLOCK (cfg, ebblock);
4517 ebblock->block_num = cfg->num_bblocks++;
4518 ebblock->real_offset = real_offset;
4520 prev_args = cfg->args;
4521 prev_arg_types = cfg->arg_types;
4522 prev_inlined_method = cfg->inlined_method;
4523 cfg->inlined_method = cmethod;
4524 cfg->ret_var_set = FALSE;
4525 cfg->inline_depth ++;
4526 prev_real_offset = cfg->real_offset;
4527 prev_cbb_hash = cfg->cbb_hash;
4528 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4529 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4530 prev_cil_start = cfg->cil_start;
4531 prev_cbb = cfg->cbb;
4532 prev_current_method = cfg->current_method;
4533 prev_generic_context = cfg->generic_context;
4534 prev_ret_var_set = cfg->ret_var_set;
4536 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4538 ret_var_set = cfg->ret_var_set;
4540 cfg->inlined_method = prev_inlined_method;
4541 cfg->real_offset = prev_real_offset;
4542 cfg->cbb_hash = prev_cbb_hash;
4543 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4544 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4545 cfg->cil_start = prev_cil_start;
4546 cfg->locals = prev_locals;
4547 cfg->args = prev_args;
4548 cfg->arg_types = prev_arg_types;
4549 cfg->current_method = prev_current_method;
4550 cfg->generic_context = prev_generic_context;
4551 cfg->ret_var_set = prev_ret_var_set;
4552 cfg->inline_depth --;
4554 if ((costs >= 0 && costs < 60) || inline_allways) {
4555 if (cfg->verbose_level > 2)
4556 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4558 mono_jit_stats.inlined_methods++;
4560 /* always add some code to avoid block split failures */
4561 MONO_INST_NEW (cfg, ins, OP_NOP);
4562 MONO_ADD_INS (prev_cbb, ins);
4564 prev_cbb->next_bb = sbblock;
4565 link_bblock (cfg, prev_cbb, sbblock);
4568 * Get rid of the begin and end bblocks if possible to aid local
4571 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4573 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4574 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4576 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4577 MonoBasicBlock *prev = ebblock->in_bb [0];
4578 mono_merge_basic_blocks (cfg, prev, ebblock);
4580 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4581 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4582 cfg->cbb = prev_cbb;
4590 * If the inlined method contains only a throw, then the ret var is not
4591 * set, so set it to a dummy value.
4594 static double r8_0 = 0.0;
4596 switch (rvar->type) {
4598 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4601 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4606 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4609 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4610 ins->type = STACK_R8;
4611 ins->inst_p0 = (void*)&r8_0;
4612 ins->dreg = rvar->dreg;
4613 MONO_ADD_INS (cfg->cbb, ins);
4616 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4619 g_assert_not_reached ();
4623 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4626 mono_metadata_free_mh (cheader);
4629 if (cfg->verbose_level > 2)
4630 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4631 cfg->exception_type = MONO_EXCEPTION_NONE;
4632 mono_loader_clear_error ();
4634 /* This gets rid of the newly added bblocks */
4635 cfg->cbb = prev_cbb;
4637 mono_metadata_free_mh (cheader);
4642 * Some of these comments may well be out-of-date.
4643 * Design decisions: we do a single pass over the IL code (and we do bblock
4644 * splitting/merging in the few cases when it's required: a back jump to an IL
4645 * address that was not already seen as bblock starting point).
4646 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4647 * Complex operations are decomposed in simpler ones right away. We need to let the
4648 * arch-specific code peek and poke inside this process somehow (except when the
4649 * optimizations can take advantage of the full semantic info of coarse opcodes).
4650 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4651 * MonoInst->opcode initially is the IL opcode or some simplification of that
4652 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4653 * opcode with value bigger than OP_LAST.
4654 * At this point the IR can be handed over to an interpreter, a dumb code generator
4655 * or to the optimizing code generator that will translate it to SSA form.
4657 * Profiling directed optimizations.
4658 * We may compile by default with few or no optimizations and instrument the code
4659 * or the user may indicate what methods to optimize the most either in a config file
4660 * or through repeated runs where the compiler applies offline the optimizations to
4661 * each method and then decides if it was worth it.
4664 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4665 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4666 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4667 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4668 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4669 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4670 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4671 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4673 /* offset from br.s -> br like opcodes */
4674 #define BIG_BRANCH_OFFSET 13
4677 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4679 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4681 return b == NULL || b == bb;
4685 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4687 unsigned char *ip = start;
4688 unsigned char *target;
4691 MonoBasicBlock *bblock;
4692 const MonoOpcode *opcode;
4695 cli_addr = ip - start;
4696 i = mono_opcode_value ((const guint8 **)&ip, end);
4699 opcode = &mono_opcodes [i];
4700 switch (opcode->argument) {
4701 case MonoInlineNone:
4704 case MonoInlineString:
4705 case MonoInlineType:
4706 case MonoInlineField:
4707 case MonoInlineMethod:
4710 case MonoShortInlineR:
4717 case MonoShortInlineVar:
4718 case MonoShortInlineI:
4721 case MonoShortInlineBrTarget:
4722 target = start + cli_addr + 2 + (signed char)ip [1];
4723 GET_BBLOCK (cfg, bblock, target);
4726 GET_BBLOCK (cfg, bblock, ip);
4728 case MonoInlineBrTarget:
4729 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4730 GET_BBLOCK (cfg, bblock, target);
4733 GET_BBLOCK (cfg, bblock, ip);
4735 case MonoInlineSwitch: {
4736 guint32 n = read32 (ip + 1);
4739 cli_addr += 5 + 4 * n;
4740 target = start + cli_addr;
4741 GET_BBLOCK (cfg, bblock, target);
4743 for (j = 0; j < n; ++j) {
4744 target = start + cli_addr + (gint32)read32 (ip);
4745 GET_BBLOCK (cfg, bblock, target);
4755 g_assert_not_reached ();
4758 if (i == CEE_THROW) {
4759 unsigned char *bb_start = ip - 1;
4761 /* Find the start of the bblock containing the throw */
4763 while ((bb_start >= start) && !bblock) {
4764 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4768 bblock->out_of_line = 1;
4777 static inline MonoMethod *
4778 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4782 if (m->wrapper_type != MONO_WRAPPER_NONE)
4783 return mono_method_get_wrapper_data (m, token);
4785 method = mono_get_method_full (m->klass->image, token, klass, context);
4790 static inline MonoMethod *
4791 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4793 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4795 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4801 static inline MonoClass*
4802 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4806 if (method->wrapper_type != MONO_WRAPPER_NONE)
4807 klass = mono_method_get_wrapper_data (method, token);
4809 klass = mono_class_get_full (method->klass->image, token, context);
4811 mono_class_init (klass);
4816 * Returns TRUE if the JIT should abort inlining because "callee"
4817 * is influenced by security attributes.
4820 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4824 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4828 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4829 if (result == MONO_JIT_SECURITY_OK)
4832 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4833 /* Generate code to throw a SecurityException before the actual call/link */
4834 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4837 NEW_ICONST (cfg, args [0], 4);
4838 NEW_METHODCONST (cfg, args [1], caller);
4839 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4840 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4841 /* don't hide previous results */
4842 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4843 cfg->exception_data = result;
4851 throw_exception (void)
4853 static MonoMethod *method = NULL;
4856 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4857 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4864 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4866 MonoMethod *thrower = throw_exception ();
4869 EMIT_NEW_PCONST (cfg, args [0], ex);
4870 mono_emit_method_call (cfg, thrower, args, NULL);
4874 * Return the original method is a wrapper is specified. We can only access
4875 * the custom attributes from the original method.
4878 get_original_method (MonoMethod *method)
4880 if (method->wrapper_type == MONO_WRAPPER_NONE)
4883 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4884 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4887 /* in other cases we need to find the original method */
4888 return mono_marshal_method_from_wrapper (method);
4892 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4893 MonoBasicBlock *bblock, unsigned char *ip)
4895 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4896 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4899 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4900 caller = get_original_method (caller);
4904 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4905 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4906 emit_throw_exception (cfg, mono_get_exception_field_access ());
4910 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4911 MonoBasicBlock *bblock, unsigned char *ip)
4913 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4914 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4917 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4918 caller = get_original_method (caller);
4922 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4923 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4924 emit_throw_exception (cfg, mono_get_exception_method_access ());
4928 * Check that the IL instructions at ip are the array initialization
4929 * sequence and return the pointer to the data and the size.
4932 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4935 * newarr[System.Int32]
4937 * ldtoken field valuetype ...
4938 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4940 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4941 guint32 token = read32 (ip + 7);
4942 guint32 field_token = read32 (ip + 2);
4943 guint32 field_index = field_token & 0xffffff;
4945 const char *data_ptr;
4947 MonoMethod *cmethod;
4948 MonoClass *dummy_class;
4949 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4955 *out_field_token = field_token;
4957 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4960 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4962 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4963 case MONO_TYPE_BOOLEAN:
4967 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4968 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4969 case MONO_TYPE_CHAR:
4979 return NULL; /* stupid ARM FP swapped format */
4989 if (size > mono_type_size (field->type, &dummy_align))
4992 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4993 if (!method->klass->image->dynamic) {
4994 field_index = read32 (ip + 2) & 0xffffff;
4995 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4996 data_ptr = mono_image_rva_map (method->klass->image, rva);
4997 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4998 /* for aot code we do the lookup on load */
4999 if (aot && data_ptr)
5000 return GUINT_TO_POINTER (rva);
5002 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5004 data_ptr = mono_field_get_data (field);
5012 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5014 char *method_fname = mono_method_full_name (method, TRUE);
5016 MonoMethodHeader *header = mono_method_get_header (method);
5018 if (header->code_size == 0)
5019 method_code = g_strdup ("method body is empty.");
5021 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5022 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5023 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5024 g_free (method_fname);
5025 g_free (method_code);
5026 mono_metadata_free_mh (header);
5030 set_exception_object (MonoCompile *cfg, MonoException *exception)
5032 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5033 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5034 cfg->exception_ptr = exception;
5038 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5042 if (cfg->generic_sharing_context)
5043 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5045 type = &klass->byval_arg;
5046 return MONO_TYPE_IS_REFERENCE (type);
5050 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5053 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5054 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5055 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5056 /* Optimize reg-reg moves away */
5058 * Can't optimize other opcodes, since sp[0] might point to
5059 * the last ins of a decomposed opcode.
5061 sp [0]->dreg = (cfg)->locals [n]->dreg;
5063 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5068 * ldloca inhibits many optimizations so try to get rid of it in common
5071 static inline unsigned char *
5072 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5081 local = read16 (ip + 2);
5085 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5086 gboolean skip = FALSE;
5088 /* From the INITOBJ case */
5089 token = read32 (ip + 2);
5090 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5091 CHECK_TYPELOAD (klass);
5092 if (generic_class_is_reference_type (cfg, klass)) {
5093 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5094 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5095 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5096 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5097 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5110 is_exception_class (MonoClass *class)
5113 if (class == mono_defaults.exception_class)
5115 class = class->parent;
5121 * mono_method_to_ir:
5123 * Translate the .net IL into linear IR.
5126 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5128 guint inline_offset, gboolean is_virtual_call)
5131 MonoInst *ins, **sp, **stack_start;
5132 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5133 MonoSimpleBasicBlock *bb = NULL;
5134 MonoMethod *cmethod, *method_definition;
5135 MonoInst **arg_array;
5136 MonoMethodHeader *header;
5138 guint32 token, ins_flag;
5140 MonoClass *constrained_call = NULL;
5141 unsigned char *ip, *end, *target, *err_pos;
5142 static double r8_0 = 0.0;
5143 MonoMethodSignature *sig;
5144 MonoGenericContext *generic_context = NULL;
5145 MonoGenericContainer *generic_container = NULL;
5146 MonoType **param_types;
5147 int i, n, start_new_bblock, dreg;
5148 int num_calls = 0, inline_costs = 0;
5149 int breakpoint_id = 0;
5151 MonoBoolean security, pinvoke;
5152 MonoSecurityManager* secman = NULL;
5153 MonoDeclSecurityActions actions;
5154 GSList *class_inits = NULL;
5155 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5157 gboolean init_locals, seq_points, skip_dead_blocks;
5159 /* serialization and xdomain stuff may need access to private fields and methods */
5160 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5161 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5162 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5163 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5164 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5165 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5167 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5169 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5170 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5171 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5172 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5174 image = method->klass->image;
5175 header = mono_method_get_header (method);
5176 generic_container = mono_method_get_generic_container (method);
5177 sig = mono_method_signature (method);
5178 num_args = sig->hasthis + sig->param_count;
5179 ip = (unsigned char*)header->code;
5180 cfg->cil_start = ip;
5181 end = ip + header->code_size;
5182 mono_jit_stats.cil_code_size += header->code_size;
5183 init_locals = header->init_locals;
5185 seq_points = cfg->gen_seq_points && cfg->method == method;
5188 * Methods without init_locals set could cause asserts in various passes
5193 method_definition = method;
5194 while (method_definition->is_inflated) {
5195 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5196 method_definition = imethod->declaring;
5199 /* SkipVerification is not allowed if core-clr is enabled */
5200 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5202 dont_verify_stloc = TRUE;
5205 if (!dont_verify && mini_method_verify (cfg, method_definition))
5206 goto exception_exit;
5208 if (mono_debug_using_mono_debugger ())
5209 cfg->keep_cil_nops = TRUE;
5211 if (sig->is_inflated)
5212 generic_context = mono_method_get_context (method);
5213 else if (generic_container)
5214 generic_context = &generic_container->context;
5215 cfg->generic_context = generic_context;
5217 if (!cfg->generic_sharing_context)
5218 g_assert (!sig->has_type_parameters);
5220 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5221 g_assert (method->is_inflated);
5222 g_assert (mono_method_get_context (method)->method_inst);
5224 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5225 g_assert (sig->generic_param_count);
5227 if (cfg->method == method) {
5228 cfg->real_offset = 0;
5230 cfg->real_offset = inline_offset;
5233 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5234 cfg->cil_offset_to_bb_len = header->code_size;
5236 cfg->current_method = method;
5238 if (cfg->verbose_level > 2)
5239 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5241 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5243 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5244 for (n = 0; n < sig->param_count; ++n)
5245 param_types [n + sig->hasthis] = sig->params [n];
5246 cfg->arg_types = param_types;
5248 dont_inline = g_list_prepend (dont_inline, method);
5249 if (cfg->method == method) {
5251 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5252 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5255 NEW_BBLOCK (cfg, start_bblock);
5256 cfg->bb_entry = start_bblock;
5257 start_bblock->cil_code = NULL;
5258 start_bblock->cil_length = 0;
5261 NEW_BBLOCK (cfg, end_bblock);
5262 cfg->bb_exit = end_bblock;
5263 end_bblock->cil_code = NULL;
5264 end_bblock->cil_length = 0;
5265 g_assert (cfg->num_bblocks == 2);
5267 arg_array = cfg->args;
5269 if (header->num_clauses) {
5270 cfg->spvars = g_hash_table_new (NULL, NULL);
5271 cfg->exvars = g_hash_table_new (NULL, NULL);
5273 /* handle exception clauses */
5274 for (i = 0; i < header->num_clauses; ++i) {
5275 MonoBasicBlock *try_bb;
5276 MonoExceptionClause *clause = &header->clauses [i];
5277 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5278 try_bb->real_offset = clause->try_offset;
5279 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5280 tblock->real_offset = clause->handler_offset;
5281 tblock->flags |= BB_EXCEPTION_HANDLER;
5283 link_bblock (cfg, try_bb, tblock);
5285 if (*(ip + clause->handler_offset) == CEE_POP)
5286 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5288 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5289 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5290 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5291 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5292 MONO_ADD_INS (tblock, ins);
5294 /* todo: is a fault block unsafe to optimize? */
5295 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5296 tblock->flags |= BB_EXCEPTION_UNSAFE;
5300 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5302 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5304 /* catch and filter blocks get the exception object on the stack */
5305 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5306 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5307 MonoInst *dummy_use;
5309 /* mostly like handle_stack_args (), but just sets the input args */
5310 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5311 tblock->in_scount = 1;
5312 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5313 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5316 * Add a dummy use for the exvar so its liveness info will be
5320 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5322 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5323 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5324 tblock->flags |= BB_EXCEPTION_HANDLER;
5325 tblock->real_offset = clause->data.filter_offset;
5326 tblock->in_scount = 1;
5327 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5328 /* The filter block shares the exvar with the handler block */
5329 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5330 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5331 MONO_ADD_INS (tblock, ins);
5335 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5336 clause->data.catch_class &&
5337 cfg->generic_sharing_context &&
5338 mono_class_check_context_used (clause->data.catch_class)) {
5340 * In shared generic code with catch
5341 * clauses containing type variables
5342 * the exception handling code has to
5343 * be able to get to the rgctx.
5344 * Therefore we have to make sure that
5345 * the vtable/mrgctx argument (for
5346 * static or generic methods) or the
5347 * "this" argument (for non-static
5348 * methods) are live.
5350 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5351 mini_method_get_context (method)->method_inst ||
5352 method->klass->valuetype) {
5353 mono_get_vtable_var (cfg);
5355 MonoInst *dummy_use;
5357 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5362 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5363 cfg->cbb = start_bblock;
5364 cfg->args = arg_array;
5365 mono_save_args (cfg, sig, inline_args);
5368 /* FIRST CODE BLOCK */
5369 NEW_BBLOCK (cfg, bblock);
5370 bblock->cil_code = ip;
5374 ADD_BBLOCK (cfg, bblock);
5376 if (cfg->method == method) {
5377 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5378 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5379 MONO_INST_NEW (cfg, ins, OP_BREAK);
5380 MONO_ADD_INS (bblock, ins);
5384 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5385 secman = mono_security_manager_get_methods ();
5387 security = (secman && mono_method_has_declsec (method));
5388 /* at this point having security doesn't mean we have any code to generate */
5389 if (security && (cfg->method == method)) {
5390 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5391 * And we do not want to enter the next section (with allocation) if we
5392 * have nothing to generate */
5393 security = mono_declsec_get_demands (method, &actions);
5396 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5397 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5399 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5400 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5401 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5403 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5404 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5408 mono_custom_attrs_free (custom);
5411 custom = mono_custom_attrs_from_class (wrapped->klass);
5412 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5416 mono_custom_attrs_free (custom);
5419 /* not a P/Invoke after all */
5424 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5425 /* we use a separate basic block for the initialization code */
5426 NEW_BBLOCK (cfg, init_localsbb);
5427 cfg->bb_init = init_localsbb;
5428 init_localsbb->real_offset = cfg->real_offset;
5429 start_bblock->next_bb = init_localsbb;
5430 init_localsbb->next_bb = bblock;
5431 link_bblock (cfg, start_bblock, init_localsbb);
5432 link_bblock (cfg, init_localsbb, bblock);
5434 cfg->cbb = init_localsbb;
5436 start_bblock->next_bb = bblock;
5437 link_bblock (cfg, start_bblock, bblock);
5440 /* at this point we know, if security is TRUE, that some code needs to be generated */
5441 if (security && (cfg->method == method)) {
5444 mono_jit_stats.cas_demand_generation++;
5446 if (actions.demand.blob) {
5447 /* Add code for SecurityAction.Demand */
5448 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5449 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5450 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5451 mono_emit_method_call (cfg, secman->demand, args, NULL);
5453 if (actions.noncasdemand.blob) {
5454 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5455 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5456 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5457 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5458 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5459 mono_emit_method_call (cfg, secman->demand, args, NULL);
5461 if (actions.demandchoice.blob) {
5462 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5463 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5464 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5465 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5466 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5470 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5472 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5475 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5476 /* check if this is native code, e.g. an icall or a p/invoke */
5477 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5478 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5480 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5481 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5483 /* if this ia a native call then it can only be JITted from platform code */
5484 if ((icall || pinvk) && method->klass && method->klass->image) {
5485 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5486 MonoException *ex = icall ? mono_get_exception_security () :
5487 mono_get_exception_method_access ();
5488 emit_throw_exception (cfg, ex);
5495 if (header->code_size == 0)
5498 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5503 if (cfg->method == method)
5504 mono_debug_init_method (cfg, bblock, breakpoint_id);
5506 for (n = 0; n < header->num_locals; ++n) {
5507 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5512 /* We force the vtable variable here for all shared methods
5513 for the possibility that they might show up in a stack
5514 trace where their exact instantiation is needed. */
5515 if (cfg->generic_sharing_context && method == cfg->method) {
5516 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5517 mini_method_get_context (method)->method_inst ||
5518 method->klass->valuetype) {
5519 mono_get_vtable_var (cfg);
5521 /* FIXME: Is there a better way to do this?
5522 We need the variable live for the duration
5523 of the whole method. */
5524 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5528 /* add a check for this != NULL to inlined methods */
5529 if (is_virtual_call) {
5532 NEW_ARGLOAD (cfg, arg_ins, 0);
5533 MONO_ADD_INS (cfg->cbb, arg_ins);
5534 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5537 skip_dead_blocks = !dont_verify;
5538 if (skip_dead_blocks) {
5539 bb = mono_basic_block_split (method, &error);
5540 if (!mono_error_ok (&error)) {
5541 mono_error_cleanup (&error);
5547 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5548 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5551 start_new_bblock = 0;
5554 if (cfg->method == method)
5555 cfg->real_offset = ip - header->code;
5557 cfg->real_offset = inline_offset;
5562 if (start_new_bblock) {
5563 bblock->cil_length = ip - bblock->cil_code;
5564 if (start_new_bblock == 2) {
5565 g_assert (ip == tblock->cil_code);
5567 GET_BBLOCK (cfg, tblock, ip);
5569 bblock->next_bb = tblock;
5572 start_new_bblock = 0;
5573 for (i = 0; i < bblock->in_scount; ++i) {
5574 if (cfg->verbose_level > 3)
5575 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5576 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5580 g_slist_free (class_inits);
5583 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5584 link_bblock (cfg, bblock, tblock);
5585 if (sp != stack_start) {
5586 handle_stack_args (cfg, stack_start, sp - stack_start);
5588 CHECK_UNVERIFIABLE (cfg);
5590 bblock->next_bb = tblock;
5593 for (i = 0; i < bblock->in_scount; ++i) {
5594 if (cfg->verbose_level > 3)
5595 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5596 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5599 g_slist_free (class_inits);
5604 if (skip_dead_blocks) {
5605 int ip_offset = ip - header->code;
5607 if (ip_offset == bb->end)
5611 int op_size = mono_opcode_size (ip, end);
5612 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5614 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5616 if (ip_offset + op_size == bb->end) {
5617 MONO_INST_NEW (cfg, ins, OP_NOP);
5618 MONO_ADD_INS (bblock, ins);
5619 start_new_bblock = 1;
5627 * Sequence points are points where the debugger can place a breakpoint.
5628 * Currently, we generate these automatically at points where the IL
5631 if (seq_points && sp == stack_start) {
5632 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5633 MONO_ADD_INS (cfg->cbb, ins);
5636 bblock->real_offset = cfg->real_offset;
5638 if ((cfg->method == method) && cfg->coverage_info) {
5639 guint32 cil_offset = ip - header->code;
5640 cfg->coverage_info->data [cil_offset].cil_code = ip;
5642 /* TODO: Use an increment here */
5643 #if defined(TARGET_X86)
5644 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5645 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5647 MONO_ADD_INS (cfg->cbb, ins);
5649 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5650 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5654 if (cfg->verbose_level > 3)
5655 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5659 if (cfg->keep_cil_nops)
5660 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5662 MONO_INST_NEW (cfg, ins, OP_NOP);
5664 MONO_ADD_INS (bblock, ins);
5667 if (should_insert_brekpoint (cfg->method))
5668 MONO_INST_NEW (cfg, ins, OP_BREAK);
5670 MONO_INST_NEW (cfg, ins, OP_NOP);
5672 MONO_ADD_INS (bblock, ins);
5678 CHECK_STACK_OVF (1);
5679 n = (*ip)-CEE_LDARG_0;
5681 EMIT_NEW_ARGLOAD (cfg, ins, n);
5689 CHECK_STACK_OVF (1);
5690 n = (*ip)-CEE_LDLOC_0;
5692 EMIT_NEW_LOCLOAD (cfg, ins, n);
5701 n = (*ip)-CEE_STLOC_0;
5704 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5706 emit_stloc_ir (cfg, sp, header, n);
5713 CHECK_STACK_OVF (1);
5716 EMIT_NEW_ARGLOAD (cfg, ins, n);
5722 CHECK_STACK_OVF (1);
5725 NEW_ARGLOADA (cfg, ins, n);
5726 MONO_ADD_INS (cfg->cbb, ins);
5736 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5738 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5743 CHECK_STACK_OVF (1);
5746 EMIT_NEW_LOCLOAD (cfg, ins, n);
5750 case CEE_LDLOCA_S: {
5751 unsigned char *tmp_ip;
5753 CHECK_STACK_OVF (1);
5754 CHECK_LOCAL (ip [1]);
5756 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5762 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5771 CHECK_LOCAL (ip [1]);
5772 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5774 emit_stloc_ir (cfg, sp, header, ip [1]);
5779 CHECK_STACK_OVF (1);
5780 EMIT_NEW_PCONST (cfg, ins, NULL);
5781 ins->type = STACK_OBJ;
5786 CHECK_STACK_OVF (1);
5787 EMIT_NEW_ICONST (cfg, ins, -1);
5800 CHECK_STACK_OVF (1);
5801 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5807 CHECK_STACK_OVF (1);
5809 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5815 CHECK_STACK_OVF (1);
5816 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5822 CHECK_STACK_OVF (1);
5823 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5824 ins->type = STACK_I8;
5825 ins->dreg = alloc_dreg (cfg, STACK_I8);
5827 ins->inst_l = (gint64)read64 (ip);
5828 MONO_ADD_INS (bblock, ins);
5834 gboolean use_aotconst = FALSE;
5836 #ifdef TARGET_POWERPC
5837 /* FIXME: Clean this up */
5838 if (cfg->compile_aot)
5839 use_aotconst = TRUE;
5842 /* FIXME: we should really allocate this only late in the compilation process */
5843 f = mono_domain_alloc (cfg->domain, sizeof (float));
5845 CHECK_STACK_OVF (1);
5851 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5853 dreg = alloc_freg (cfg);
5854 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5855 ins->type = STACK_R8;
5857 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5858 ins->type = STACK_R8;
5859 ins->dreg = alloc_dreg (cfg, STACK_R8);
5861 MONO_ADD_INS (bblock, ins);
5871 gboolean use_aotconst = FALSE;
5873 #ifdef TARGET_POWERPC
5874 /* FIXME: Clean this up */
5875 if (cfg->compile_aot)
5876 use_aotconst = TRUE;
5879 /* FIXME: we should really allocate this only late in the compilation process */
5880 d = mono_domain_alloc (cfg->domain, sizeof (double));
5882 CHECK_STACK_OVF (1);
5888 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5890 dreg = alloc_freg (cfg);
5891 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5892 ins->type = STACK_R8;
5894 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5895 ins->type = STACK_R8;
5896 ins->dreg = alloc_dreg (cfg, STACK_R8);
5898 MONO_ADD_INS (bblock, ins);
5907 MonoInst *temp, *store;
5909 CHECK_STACK_OVF (1);
5913 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5914 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5916 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5919 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5932 if (sp [0]->type == STACK_R8)
5933 /* we need to pop the value from the x86 FP stack */
5934 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5943 if (stack_start != sp)
5945 token = read32 (ip + 1);
5946 /* FIXME: check the signature matches */
5947 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5952 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5953 GENERIC_SHARING_FAILURE (CEE_JMP);
5955 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5956 CHECK_CFG_EXCEPTION;
5958 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5960 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5963 /* Handle tail calls similarly to calls */
5964 n = fsig->param_count + fsig->hasthis;
5966 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5967 call->method = cmethod;
5968 call->tail_call = TRUE;
5969 call->signature = mono_method_signature (cmethod);
5970 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5971 call->inst.inst_p0 = cmethod;
5972 for (i = 0; i < n; ++i)
5973 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5975 mono_arch_emit_call (cfg, call);
5976 MONO_ADD_INS (bblock, (MonoInst*)call);
5979 for (i = 0; i < num_args; ++i)
5980 /* Prevent arguments from being optimized away */
5981 arg_array [i]->flags |= MONO_INST_VOLATILE;
5983 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5984 ins = (MonoInst*)call;
5985 ins->inst_p0 = cmethod;
5986 MONO_ADD_INS (bblock, ins);
5990 start_new_bblock = 1;
5995 case CEE_CALLVIRT: {
5996 MonoInst *addr = NULL;
5997 MonoMethodSignature *fsig = NULL;
5999 int virtual = *ip == CEE_CALLVIRT;
6000 int calli = *ip == CEE_CALLI;
6001 gboolean pass_imt_from_rgctx = FALSE;
6002 MonoInst *imt_arg = NULL;
6003 gboolean pass_vtable = FALSE;
6004 gboolean pass_mrgctx = FALSE;
6005 MonoInst *vtable_arg = NULL;
6006 gboolean check_this = FALSE;
6007 gboolean supported_tail_call = FALSE;
6010 token = read32 (ip + 1);
6017 if (method->wrapper_type != MONO_WRAPPER_NONE)
6018 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6020 fsig = mono_metadata_parse_signature (image, token);
6022 n = fsig->param_count + fsig->hasthis;
6024 if (method->dynamic && fsig->pinvoke) {
6028 * This is a call through a function pointer using a pinvoke
6029 * signature. Have to create a wrapper and call that instead.
6030 * FIXME: This is very slow, need to create a wrapper at JIT time
6031 * instead based on the signature.
6033 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6034 EMIT_NEW_PCONST (cfg, args [1], fsig);
6036 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6039 MonoMethod *cil_method;
6041 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6042 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6043 cil_method = cmethod;
6044 } else if (constrained_call) {
6045 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6047 * This is needed since get_method_constrained can't find
6048 * the method in klass representing a type var.
6049 * The type var is guaranteed to be a reference type in this
6052 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6053 cil_method = cmethod;
6054 g_assert (!cmethod->klass->valuetype);
6056 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6059 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6060 cil_method = cmethod;
6065 if (!dont_verify && !cfg->skip_visibility) {
6066 MonoMethod *target_method = cil_method;
6067 if (method->is_inflated) {
6068 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6070 if (!mono_method_can_access_method (method_definition, target_method) &&
6071 !mono_method_can_access_method (method, cil_method))
6072 METHOD_ACCESS_FAILURE;
6075 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6076 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6078 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6079 /* MS.NET seems to silently convert this to a callvirt */
6082 if (!cmethod->klass->inited)
6083 if (!mono_class_init (cmethod->klass))
6086 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6087 mini_class_is_system_array (cmethod->klass)) {
6088 array_rank = cmethod->klass->rank;
6089 fsig = mono_method_signature (cmethod);
6091 if (mono_method_signature (cmethod)->pinvoke) {
6092 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6093 check_for_pending_exc, FALSE);
6094 fsig = mono_method_signature (wrapper);
6095 } else if (constrained_call) {
6096 fsig = mono_method_signature (cmethod);
6098 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6102 mono_save_token_info (cfg, image, token, cil_method);
6104 n = fsig->param_count + fsig->hasthis;
6106 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6107 if (check_linkdemand (cfg, method, cmethod))
6109 CHECK_CFG_EXCEPTION;
6112 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6113 g_assert_not_reached ();
6116 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6119 if (!cfg->generic_sharing_context && cmethod)
6120 g_assert (!mono_method_check_context_used (cmethod));
6124 //g_assert (!virtual || fsig->hasthis);
6128 if (constrained_call) {
6130 * We have the `constrained.' prefix opcode.
6132 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6134 * The type parameter is instantiated as a valuetype,
6135 * but that type doesn't override the method we're
6136 * calling, so we need to box `this'.
6138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6139 ins->klass = constrained_call;
6140 sp [0] = handle_box (cfg, ins, constrained_call);
6141 CHECK_CFG_EXCEPTION;
6142 } else if (!constrained_call->valuetype) {
6143 int dreg = alloc_preg (cfg);
6146 * The type parameter is instantiated as a reference
6147 * type. We have a managed pointer on the stack, so
6148 * we need to dereference it here.
6150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6151 ins->type = STACK_OBJ;
6153 } else if (cmethod->klass->valuetype)
6155 constrained_call = NULL;
6158 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6162 * If the callee is a shared method, then its static cctor
6163 * might not get called after the call was patched.
6165 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6166 emit_generic_class_init (cfg, cmethod->klass);
6167 CHECK_TYPELOAD (cmethod->klass);
6170 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6171 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6172 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6173 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6174 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6177 * Pass vtable iff target method might
6178 * be shared, which means that sharing
6179 * is enabled for its class and its
6180 * context is sharable (and it's not a
6183 if (sharing_enabled && context_sharable &&
6184 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6188 if (cmethod && mini_method_get_context (cmethod) &&
6189 mini_method_get_context (cmethod)->method_inst) {
6190 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6191 MonoGenericContext *context = mini_method_get_context (cmethod);
6192 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6194 g_assert (!pass_vtable);
6196 if (sharing_enabled && context_sharable)
6200 if (cfg->generic_sharing_context && cmethod) {
6201 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6203 context_used = mono_method_check_context_used (cmethod);
6205 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6206 /* Generic method interface
6207 calls are resolved via a
6208 helper function and don't
6210 if (!cmethod_context || !cmethod_context->method_inst)
6211 pass_imt_from_rgctx = TRUE;
6215 * If a shared method calls another
6216 * shared method then the caller must
6217 * have a generic sharing context
6218 * because the magic trampoline
6219 * requires it. FIXME: We shouldn't
6220 * have to force the vtable/mrgctx
6221 * variable here. Instead there
6222 * should be a flag in the cfg to
6223 * request a generic sharing context.
6226 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6227 mono_get_vtable_var (cfg);
6232 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6234 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6236 CHECK_TYPELOAD (cmethod->klass);
6237 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6242 g_assert (!vtable_arg);
6244 if (!cfg->compile_aot) {
6246 * emit_get_rgctx_method () calls mono_class_vtable () so check
6247 * for type load errors before.
6249 mono_class_setup_vtable (cmethod->klass);
6250 CHECK_TYPELOAD (cmethod->klass);
6253 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6255 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6256 MONO_METHOD_IS_FINAL (cmethod)) {
6263 if (pass_imt_from_rgctx) {
6264 g_assert (!pass_vtable);
6267 imt_arg = emit_get_rgctx_method (cfg, context_used,
6268 cmethod, MONO_RGCTX_INFO_METHOD);
6272 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6274 /* Calling virtual generic methods */
6275 if (cmethod && virtual &&
6276 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6277 !(MONO_METHOD_IS_FINAL (cmethod) &&
6278 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6279 mono_method_signature (cmethod)->generic_param_count) {
6280 MonoInst *this_temp, *this_arg_temp, *store;
6281 MonoInst *iargs [4];
6283 g_assert (mono_method_signature (cmethod)->is_inflated);
6285 /* Prevent inlining of methods that contain indirect calls */
6288 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6289 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6290 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6291 g_assert (!imt_arg);
6293 g_assert (cmethod->is_inflated);
6294 imt_arg = emit_get_rgctx_method (cfg, context_used,
6295 cmethod, MONO_RGCTX_INFO_METHOD);
6296 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6300 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6301 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6302 MONO_ADD_INS (bblock, store);
6304 /* FIXME: This should be a managed pointer */
6305 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6307 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6308 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6309 cmethod, MONO_RGCTX_INFO_METHOD);
6310 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6311 addr = mono_emit_jit_icall (cfg,
6312 mono_helper_compile_generic_method, iargs);
6314 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6316 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6319 if (!MONO_TYPE_IS_VOID (fsig->ret))
6320 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6327 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6328 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6330 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6334 /* FIXME: runtime generic context pointer for jumps? */
6335 /* FIXME: handle this for generic sharing eventually */
6336 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6339 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6342 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6343 /* Handle tail calls similarly to calls */
6344 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6346 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6347 call->tail_call = TRUE;
6348 call->method = cmethod;
6349 call->signature = mono_method_signature (cmethod);
6352 * We implement tail calls by storing the actual arguments into the
6353 * argument variables, then emitting a CEE_JMP.
6355 for (i = 0; i < n; ++i) {
6356 /* Prevent argument from being register allocated */
6357 arg_array [i]->flags |= MONO_INST_VOLATILE;
6358 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6362 ins = (MonoInst*)call;
6363 ins->inst_p0 = cmethod;
6364 ins->inst_p1 = arg_array [0];
6365 MONO_ADD_INS (bblock, ins);
6366 link_bblock (cfg, bblock, end_bblock);
6367 start_new_bblock = 1;
6368 /* skip CEE_RET as well */
6374 /* Conversion to a JIT intrinsic */
6375 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6376 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6377 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6388 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6389 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6390 mono_method_check_inlining (cfg, cmethod) &&
6391 !g_list_find (dont_inline, cmethod)) {
6393 gboolean allways = FALSE;
6395 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6396 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6397 /* Prevent inlining of methods that call wrappers */
6399 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6403 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6405 cfg->real_offset += 5;
6408 if (!MONO_TYPE_IS_VOID (fsig->ret))
6409 /* *sp is already set by inline_method */
6412 inline_costs += costs;
6418 inline_costs += 10 * num_calls++;
6420 /* Tail recursion elimination */
6421 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6422 gboolean has_vtargs = FALSE;
6425 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6428 /* keep it simple */
6429 for (i = fsig->param_count - 1; i >= 0; i--) {
6430 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6435 for (i = 0; i < n; ++i)
6436 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6437 MONO_INST_NEW (cfg, ins, OP_BR);
6438 MONO_ADD_INS (bblock, ins);
6439 tblock = start_bblock->out_bb [0];
6440 link_bblock (cfg, bblock, tblock);
6441 ins->inst_target_bb = tblock;
6442 start_new_bblock = 1;
6444 /* skip the CEE_RET, too */
6445 if (ip_in_bb (cfg, bblock, ip + 5))
6455 /* Generic sharing */
6456 /* FIXME: only do this for generic methods if
6457 they are not shared! */
6458 if (context_used && !imt_arg && !array_rank &&
6459 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6460 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6461 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6462 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6465 g_assert (cfg->generic_sharing_context && cmethod);
6469 * We are compiling a call to a
6470 * generic method from shared code,
6471 * which means that we have to look up
6472 * the method in the rgctx and do an
6475 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6478 /* Indirect calls */
6480 g_assert (!imt_arg);
6482 if (*ip == CEE_CALL)
6483 g_assert (context_used);
6484 else if (*ip == CEE_CALLI)
6485 g_assert (!vtable_arg);
6487 /* FIXME: what the hell is this??? */
6488 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6489 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6491 /* Prevent inlining of methods with indirect calls */
6495 #ifdef MONO_ARCH_RGCTX_REG
6497 int rgctx_reg = mono_alloc_preg (cfg);
6499 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6500 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6501 call = (MonoCallInst*)ins;
6502 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6503 cfg->uses_rgctx_reg = TRUE;
6504 call->rgctx_reg = TRUE;
6509 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6511 * Instead of emitting an indirect call, emit a direct call
6512 * with the contents of the aotconst as the patch info.
6514 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6516 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6517 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6520 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6523 if (!MONO_TYPE_IS_VOID (fsig->ret))
6524 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6535 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6536 if (sp [fsig->param_count]->type == STACK_OBJ) {
6537 MonoInst *iargs [2];
6540 iargs [1] = sp [fsig->param_count];
6542 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6545 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6546 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6547 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6548 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6550 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6553 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6554 if (!cmethod->klass->element_class->valuetype && !readonly)
6555 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6556 CHECK_TYPELOAD (cmethod->klass);
6559 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6562 g_assert_not_reached ();
6570 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6572 if (!MONO_TYPE_IS_VOID (fsig->ret))
6573 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6583 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6585 } else if (imt_arg) {
6586 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6588 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6591 if (!MONO_TYPE_IS_VOID (fsig->ret))
6592 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6599 if (cfg->method != method) {
6600 /* return from inlined method */
6602 * If in_count == 0, that means the ret is unreachable due to
6603 * being preceeded by a throw. In that case, inline_method () will
6604 * handle setting the return value
6605 * (test case: test_0_inline_throw ()).
6607 if (return_var && cfg->cbb->in_count) {
6611 //g_assert (returnvar != -1);
6612 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6613 cfg->ret_var_set = TRUE;
6617 MonoType *ret_type = mono_method_signature (method)->ret;
6621 * Place a seq point here too even through the IL stack is not
6622 * empty, so a step over on
6625 * will work correctly.
6627 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6628 MONO_ADD_INS (cfg->cbb, ins);
6631 g_assert (!return_var);
6634 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6637 if (!cfg->vret_addr) {
6640 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6642 EMIT_NEW_RETLOADA (cfg, ret_addr);
6644 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6645 ins->klass = mono_class_from_mono_type (ret_type);
6648 #ifdef MONO_ARCH_SOFT_FLOAT
6649 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6650 MonoInst *iargs [1];
6654 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6655 mono_arch_emit_setret (cfg, method, conv);
6657 mono_arch_emit_setret (cfg, method, *sp);
6660 mono_arch_emit_setret (cfg, method, *sp);
6665 if (sp != stack_start)
6667 MONO_INST_NEW (cfg, ins, OP_BR);
6669 ins->inst_target_bb = end_bblock;
6670 MONO_ADD_INS (bblock, ins);
6671 link_bblock (cfg, bblock, end_bblock);
6672 start_new_bblock = 1;
6676 MONO_INST_NEW (cfg, ins, OP_BR);
6678 target = ip + 1 + (signed char)(*ip);
6680 GET_BBLOCK (cfg, tblock, target);
6681 link_bblock (cfg, bblock, tblock);
6682 ins->inst_target_bb = tblock;
6683 if (sp != stack_start) {
6684 handle_stack_args (cfg, stack_start, sp - stack_start);
6686 CHECK_UNVERIFIABLE (cfg);
6688 MONO_ADD_INS (bblock, ins);
6689 start_new_bblock = 1;
6690 inline_costs += BRANCH_COST;
6704 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6706 target = ip + 1 + *(signed char*)ip;
6712 inline_costs += BRANCH_COST;
6716 MONO_INST_NEW (cfg, ins, OP_BR);
6719 target = ip + 4 + (gint32)read32(ip);
6721 GET_BBLOCK (cfg, tblock, target);
6722 link_bblock (cfg, bblock, tblock);
6723 ins->inst_target_bb = tblock;
6724 if (sp != stack_start) {
6725 handle_stack_args (cfg, stack_start, sp - stack_start);
6727 CHECK_UNVERIFIABLE (cfg);
6730 MONO_ADD_INS (bblock, ins);
6732 start_new_bblock = 1;
6733 inline_costs += BRANCH_COST;
6740 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6741 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6742 guint32 opsize = is_short ? 1 : 4;
6744 CHECK_OPSIZE (opsize);
6746 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6749 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6754 GET_BBLOCK (cfg, tblock, target);
6755 link_bblock (cfg, bblock, tblock);
6756 GET_BBLOCK (cfg, tblock, ip);
6757 link_bblock (cfg, bblock, tblock);
6759 if (sp != stack_start) {
6760 handle_stack_args (cfg, stack_start, sp - stack_start);
6761 CHECK_UNVERIFIABLE (cfg);
6764 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6765 cmp->sreg1 = sp [0]->dreg;
6766 type_from_op (cmp, sp [0], NULL);
6769 #if SIZEOF_REGISTER == 4
6770 if (cmp->opcode == OP_LCOMPARE_IMM) {
6771 /* Convert it to OP_LCOMPARE */
6772 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6773 ins->type = STACK_I8;
6774 ins->dreg = alloc_dreg (cfg, STACK_I8);
6776 MONO_ADD_INS (bblock, ins);
6777 cmp->opcode = OP_LCOMPARE;
6778 cmp->sreg2 = ins->dreg;
6781 MONO_ADD_INS (bblock, cmp);
6783 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6784 type_from_op (ins, sp [0], NULL);
6785 MONO_ADD_INS (bblock, ins);
6786 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6787 GET_BBLOCK (cfg, tblock, target);
6788 ins->inst_true_bb = tblock;
6789 GET_BBLOCK (cfg, tblock, ip);
6790 ins->inst_false_bb = tblock;
6791 start_new_bblock = 2;
6794 inline_costs += BRANCH_COST;
6809 MONO_INST_NEW (cfg, ins, *ip);
6811 target = ip + 4 + (gint32)read32(ip);
6817 inline_costs += BRANCH_COST;
6821 MonoBasicBlock **targets;
6822 MonoBasicBlock *default_bblock;
6823 MonoJumpInfoBBTable *table;
6824 int offset_reg = alloc_preg (cfg);
6825 int target_reg = alloc_preg (cfg);
6826 int table_reg = alloc_preg (cfg);
6827 int sum_reg = alloc_preg (cfg);
6828 gboolean use_op_switch;
6832 n = read32 (ip + 1);
6835 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6839 CHECK_OPSIZE (n * sizeof (guint32));
6840 target = ip + n * sizeof (guint32);
6842 GET_BBLOCK (cfg, default_bblock, target);
6844 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6845 for (i = 0; i < n; ++i) {
6846 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6847 targets [i] = tblock;
6851 if (sp != stack_start) {
6853 * Link the current bb with the targets as well, so handle_stack_args
6854 * will set their in_stack correctly.
6856 link_bblock (cfg, bblock, default_bblock);
6857 for (i = 0; i < n; ++i)
6858 link_bblock (cfg, bblock, targets [i]);
6860 handle_stack_args (cfg, stack_start, sp - stack_start);
6862 CHECK_UNVERIFIABLE (cfg);
6865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6869 for (i = 0; i < n; ++i)
6870 link_bblock (cfg, bblock, targets [i]);
6872 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6873 table->table = targets;
6874 table->table_size = n;
6876 use_op_switch = FALSE;
6878 /* ARM implements SWITCH statements differently */
6879 /* FIXME: Make it use the generic implementation */
6880 if (!cfg->compile_aot)
6881 use_op_switch = TRUE;
6884 if (COMPILE_LLVM (cfg))
6885 use_op_switch = TRUE;
6887 cfg->cbb->has_jump_table = 1;
6889 if (use_op_switch) {
6890 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6891 ins->sreg1 = src1->dreg;
6892 ins->inst_p0 = table;
6893 ins->inst_many_bb = targets;
6894 ins->klass = GUINT_TO_POINTER (n);
6895 MONO_ADD_INS (cfg->cbb, ins);
6897 if (sizeof (gpointer) == 8)
6898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6902 #if SIZEOF_REGISTER == 8
6903 /* The upper word might not be zero, and we add it to a 64 bit address later */
6904 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6907 if (cfg->compile_aot) {
6908 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6910 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6911 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6912 ins->inst_p0 = table;
6913 ins->dreg = table_reg;
6914 MONO_ADD_INS (cfg->cbb, ins);
6917 /* FIXME: Use load_memindex */
6918 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6920 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6922 start_new_bblock = 1;
6923 inline_costs += (BRANCH_COST * 2);
6943 dreg = alloc_freg (cfg);
6946 dreg = alloc_lreg (cfg);
6949 dreg = alloc_preg (cfg);
6952 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6953 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6954 ins->flags |= ins_flag;
6956 MONO_ADD_INS (bblock, ins);
6971 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6972 ins->flags |= ins_flag;
6974 MONO_ADD_INS (bblock, ins);
6976 #if HAVE_WRITE_BARRIERS
6977 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6978 MonoInst *dummy_use;
6979 /* insert call to write barrier */
6980 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6981 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6982 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
6993 MONO_INST_NEW (cfg, ins, (*ip));
6995 ins->sreg1 = sp [0]->dreg;
6996 ins->sreg2 = sp [1]->dreg;
6997 type_from_op (ins, sp [0], sp [1]);
6999 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7001 /* Use the immediate opcodes if possible */
7002 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7003 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7004 if (imm_opcode != -1) {
7005 ins->opcode = imm_opcode;
7006 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7009 sp [1]->opcode = OP_NOP;
7013 MONO_ADD_INS ((cfg)->cbb, (ins));
7015 *sp++ = mono_decompose_opcode (cfg, ins);
7032 MONO_INST_NEW (cfg, ins, (*ip));
7034 ins->sreg1 = sp [0]->dreg;
7035 ins->sreg2 = sp [1]->dreg;
7036 type_from_op (ins, sp [0], sp [1]);
7038 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7039 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7041 /* FIXME: Pass opcode to is_inst_imm */
7043 /* Use the immediate opcodes if possible */
7044 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7047 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7048 if (imm_opcode != -1) {
7049 ins->opcode = imm_opcode;
7050 if (sp [1]->opcode == OP_I8CONST) {
7051 #if SIZEOF_REGISTER == 8
7052 ins->inst_imm = sp [1]->inst_l;
7054 ins->inst_ls_word = sp [1]->inst_ls_word;
7055 ins->inst_ms_word = sp [1]->inst_ms_word;
7059 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7062 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7063 if (sp [1]->next == NULL)
7064 sp [1]->opcode = OP_NOP;
7067 MONO_ADD_INS ((cfg)->cbb, (ins));
7069 *sp++ = mono_decompose_opcode (cfg, ins);
7082 case CEE_CONV_OVF_I8:
7083 case CEE_CONV_OVF_U8:
7087 /* Special case this earlier so we have long constants in the IR */
7088 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7089 int data = sp [-1]->inst_c0;
7090 sp [-1]->opcode = OP_I8CONST;
7091 sp [-1]->type = STACK_I8;
7092 #if SIZEOF_REGISTER == 8
7093 if ((*ip) == CEE_CONV_U8)
7094 sp [-1]->inst_c0 = (guint32)data;
7096 sp [-1]->inst_c0 = data;
7098 sp [-1]->inst_ls_word = data;
7099 if ((*ip) == CEE_CONV_U8)
7100 sp [-1]->inst_ms_word = 0;
7102 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7104 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7111 case CEE_CONV_OVF_I4:
7112 case CEE_CONV_OVF_I1:
7113 case CEE_CONV_OVF_I2:
7114 case CEE_CONV_OVF_I:
7115 case CEE_CONV_OVF_U:
7118 if (sp [-1]->type == STACK_R8) {
7119 ADD_UNOP (CEE_CONV_OVF_I8);
7126 case CEE_CONV_OVF_U1:
7127 case CEE_CONV_OVF_U2:
7128 case CEE_CONV_OVF_U4:
7131 if (sp [-1]->type == STACK_R8) {
7132 ADD_UNOP (CEE_CONV_OVF_U8);
7139 case CEE_CONV_OVF_I1_UN:
7140 case CEE_CONV_OVF_I2_UN:
7141 case CEE_CONV_OVF_I4_UN:
7142 case CEE_CONV_OVF_I8_UN:
7143 case CEE_CONV_OVF_U1_UN:
7144 case CEE_CONV_OVF_U2_UN:
7145 case CEE_CONV_OVF_U4_UN:
7146 case CEE_CONV_OVF_U8_UN:
7147 case CEE_CONV_OVF_I_UN:
7148 case CEE_CONV_OVF_U_UN:
7158 case CEE_ADD_OVF_UN:
7160 case CEE_MUL_OVF_UN:
7162 case CEE_SUB_OVF_UN:
7170 token = read32 (ip + 1);
7171 klass = mini_get_class (method, token, generic_context);
7172 CHECK_TYPELOAD (klass);
7174 if (generic_class_is_reference_type (cfg, klass)) {
7175 MonoInst *store, *load;
7176 int dreg = alloc_preg (cfg);
7178 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7179 load->flags |= ins_flag;
7180 MONO_ADD_INS (cfg->cbb, load);
7182 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7183 store->flags |= ins_flag;
7184 MONO_ADD_INS (cfg->cbb, store);
7186 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7198 token = read32 (ip + 1);
7199 klass = mini_get_class (method, token, generic_context);
7200 CHECK_TYPELOAD (klass);
7202 /* Optimize the common ldobj+stloc combination */
7212 loc_index = ip [5] - CEE_STLOC_0;
7219 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7220 CHECK_LOCAL (loc_index);
7222 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7223 ins->dreg = cfg->locals [loc_index]->dreg;
7229 /* Optimize the ldobj+stobj combination */
7230 /* The reference case ends up being a load+store anyway */
7231 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7236 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7252 CHECK_STACK_OVF (1);
7254 n = read32 (ip + 1);
7256 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7257 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7258 ins->type = STACK_OBJ;
7261 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7262 MonoInst *iargs [1];
7264 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7265 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7267 if (cfg->opt & MONO_OPT_SHARED) {
7268 MonoInst *iargs [3];
7270 if (cfg->compile_aot) {
7271 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7273 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7274 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7275 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7276 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7277 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7279 if (bblock->out_of_line) {
7280 MonoInst *iargs [2];
7282 if (image == mono_defaults.corlib) {
7284 * Avoid relocations in AOT and save some space by using a
7285 * version of helper_ldstr specialized to mscorlib.
7287 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7288 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7290 /* Avoid creating the string object */
7291 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7292 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7293 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7297 if (cfg->compile_aot) {
7298 NEW_LDSTRCONST (cfg, ins, image, n);
7300 MONO_ADD_INS (bblock, ins);
7303 NEW_PCONST (cfg, ins, NULL);
7304 ins->type = STACK_OBJ;
7305 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7307 MONO_ADD_INS (bblock, ins);
7316 MonoInst *iargs [2];
7317 MonoMethodSignature *fsig;
7320 MonoInst *vtable_arg = NULL;
7323 token = read32 (ip + 1);
7324 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7327 fsig = mono_method_get_signature (cmethod, image, token);
7331 mono_save_token_info (cfg, image, token, cmethod);
7333 if (!mono_class_init (cmethod->klass))
7336 if (cfg->generic_sharing_context)
7337 context_used = mono_method_check_context_used (cmethod);
7339 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7340 if (check_linkdemand (cfg, method, cmethod))
7342 CHECK_CFG_EXCEPTION;
7343 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7344 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7347 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7348 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7349 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7350 mono_class_vtable (cfg->domain, cmethod->klass);
7351 CHECK_TYPELOAD (cmethod->klass);
7353 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7354 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7357 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7358 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7360 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7362 CHECK_TYPELOAD (cmethod->klass);
7363 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7368 n = fsig->param_count;
7372 * Generate smaller code for the common newobj <exception> instruction in
7373 * argument checking code.
7375 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7376 is_exception_class (cmethod->klass) && n <= 2 &&
7377 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7378 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7379 MonoInst *iargs [3];
7381 g_assert (!vtable_arg);
7385 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7388 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7392 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7397 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7400 g_assert_not_reached ();
7408 /* move the args to allow room for 'this' in the first position */
7414 /* check_call_signature () requires sp[0] to be set */
7415 this_ins.type = STACK_OBJ;
7417 if (check_call_signature (cfg, fsig, sp))
7422 if (mini_class_is_system_array (cmethod->klass)) {
7423 g_assert (!vtable_arg);
7425 *sp = emit_get_rgctx_method (cfg, context_used,
7426 cmethod, MONO_RGCTX_INFO_METHOD);
7428 /* Avoid varargs in the common case */
7429 if (fsig->param_count == 1)
7430 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7431 else if (fsig->param_count == 2)
7432 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7433 else if (fsig->param_count == 3)
7434 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7436 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7437 } else if (cmethod->string_ctor) {
7438 g_assert (!context_used);
7439 g_assert (!vtable_arg);
7440 /* we simply pass a null pointer */
7441 EMIT_NEW_PCONST (cfg, *sp, NULL);
7442 /* now call the string ctor */
7443 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7445 MonoInst* callvirt_this_arg = NULL;
7447 if (cmethod->klass->valuetype) {
7448 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7449 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7450 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7455 * The code generated by mini_emit_virtual_call () expects
7456 * iargs [0] to be a boxed instance, but luckily the vcall
7457 * will be transformed into a normal call there.
7459 } else if (context_used) {
7463 if (cfg->opt & MONO_OPT_SHARED)
7464 rgctx_info = MONO_RGCTX_INFO_KLASS;
7466 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7467 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7469 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7472 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7474 CHECK_TYPELOAD (cmethod->klass);
7477 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7478 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7479 * As a workaround, we call class cctors before allocating objects.
7481 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7482 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7483 if (cfg->verbose_level > 2)
7484 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7485 class_inits = g_slist_prepend (class_inits, vtable);
7488 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7491 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7494 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7496 /* Now call the actual ctor */
7497 /* Avoid virtual calls to ctors if possible */
7498 if (cmethod->klass->marshalbyref)
7499 callvirt_this_arg = sp [0];
7501 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7502 mono_method_check_inlining (cfg, cmethod) &&
7503 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7504 !g_list_find (dont_inline, cmethod)) {
7507 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7508 cfg->real_offset += 5;
7511 inline_costs += costs - 5;
7514 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7516 } else if (context_used &&
7517 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7518 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7519 MonoInst *cmethod_addr;
7521 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7522 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7524 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7527 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7528 callvirt_this_arg, NULL, vtable_arg);
7532 if (alloc == NULL) {
7534 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7535 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7549 token = read32 (ip + 1);
7550 klass = mini_get_class (method, token, generic_context);
7551 CHECK_TYPELOAD (klass);
7552 if (sp [0]->type != STACK_OBJ)
7555 if (cfg->generic_sharing_context)
7556 context_used = mono_class_check_context_used (klass);
7558 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7565 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7567 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7571 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7572 MonoMethod *mono_castclass;
7573 MonoInst *iargs [1];
7576 mono_castclass = mono_marshal_get_castclass (klass);
7579 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7580 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7581 g_assert (costs > 0);
7584 cfg->real_offset += 5;
7589 inline_costs += costs;
7592 ins = handle_castclass (cfg, klass, *sp, context_used);
7593 CHECK_CFG_EXCEPTION;
7603 token = read32 (ip + 1);
7604 klass = mini_get_class (method, token, generic_context);
7605 CHECK_TYPELOAD (klass);
7606 if (sp [0]->type != STACK_OBJ)
7609 if (cfg->generic_sharing_context)
7610 context_used = mono_class_check_context_used (klass);
7612 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7619 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7621 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7625 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7626 MonoMethod *mono_isinst;
7627 MonoInst *iargs [1];
7630 mono_isinst = mono_marshal_get_isinst (klass);
7633 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7634 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7635 g_assert (costs > 0);
7638 cfg->real_offset += 5;
7643 inline_costs += costs;
7646 ins = handle_isinst (cfg, klass, *sp, context_used);
7647 CHECK_CFG_EXCEPTION;
7654 case CEE_UNBOX_ANY: {
7658 token = read32 (ip + 1);
7659 klass = mini_get_class (method, token, generic_context);
7660 CHECK_TYPELOAD (klass);
7662 mono_save_token_info (cfg, image, token, klass);
7664 if (cfg->generic_sharing_context)
7665 context_used = mono_class_check_context_used (klass);
7667 if (generic_class_is_reference_type (cfg, klass)) {
7668 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7670 MonoInst *iargs [2];
7675 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7676 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7680 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7681 MonoMethod *mono_castclass;
7682 MonoInst *iargs [1];
7685 mono_castclass = mono_marshal_get_castclass (klass);
7688 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7689 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7691 g_assert (costs > 0);
7694 cfg->real_offset += 5;
7698 inline_costs += costs;
7700 ins = handle_castclass (cfg, klass, *sp, 0);
7701 CHECK_CFG_EXCEPTION;
7709 if (mono_class_is_nullable (klass)) {
7710 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7717 ins = handle_unbox (cfg, klass, sp, context_used);
7723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7736 token = read32 (ip + 1);
7737 klass = mini_get_class (method, token, generic_context);
7738 CHECK_TYPELOAD (klass);
7740 mono_save_token_info (cfg, image, token, klass);
7742 if (cfg->generic_sharing_context)
7743 context_used = mono_class_check_context_used (klass);
7745 if (generic_class_is_reference_type (cfg, klass)) {
7751 if (klass == mono_defaults.void_class)
7753 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7755 /* frequent check in generic code: box (struct), brtrue */
7756 if (!mono_class_is_nullable (klass) &&
7757 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7758 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7760 MONO_INST_NEW (cfg, ins, OP_BR);
7761 if (*ip == CEE_BRTRUE_S) {
7764 target = ip + 1 + (signed char)(*ip);
7769 target = ip + 4 + (gint)(read32 (ip));
7772 GET_BBLOCK (cfg, tblock, target);
7773 link_bblock (cfg, bblock, tblock);
7774 ins->inst_target_bb = tblock;
7775 GET_BBLOCK (cfg, tblock, ip);
7777 * This leads to some inconsistency, since the two bblocks are
7778 * not really connected, but it is needed for handling stack
7779 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7780 * FIXME: This should only be needed if sp != stack_start, but that
7781 * doesn't work for some reason (test failure in mcs/tests on x86).
7783 link_bblock (cfg, bblock, tblock);
7784 if (sp != stack_start) {
7785 handle_stack_args (cfg, stack_start, sp - stack_start);
7787 CHECK_UNVERIFIABLE (cfg);
7789 MONO_ADD_INS (bblock, ins);
7790 start_new_bblock = 1;
7798 if (cfg->opt & MONO_OPT_SHARED)
7799 rgctx_info = MONO_RGCTX_INFO_KLASS;
7801 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7802 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7803 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7805 *sp++ = handle_box (cfg, val, klass);
7808 CHECK_CFG_EXCEPTION;
7817 token = read32 (ip + 1);
7818 klass = mini_get_class (method, token, generic_context);
7819 CHECK_TYPELOAD (klass);
7821 mono_save_token_info (cfg, image, token, klass);
7823 if (cfg->generic_sharing_context)
7824 context_used = mono_class_check_context_used (klass);
7826 if (mono_class_is_nullable (klass)) {
7829 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7830 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7834 ins = handle_unbox (cfg, klass, sp, context_used);
7844 MonoClassField *field;
7848 if (*ip == CEE_STFLD) {
7855 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7857 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7860 token = read32 (ip + 1);
7861 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7862 field = mono_method_get_wrapper_data (method, token);
7863 klass = field->parent;
7866 field = mono_field_from_token (image, token, &klass, generic_context);
7870 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7871 FIELD_ACCESS_FAILURE;
7872 mono_class_init (klass);
7874 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7875 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7876 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7877 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7880 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7881 if (*ip == CEE_STFLD) {
7882 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7884 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7885 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7886 MonoInst *iargs [5];
7889 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7890 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7891 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7895 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7896 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7897 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7898 g_assert (costs > 0);
7900 cfg->real_offset += 5;
7903 inline_costs += costs;
7905 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7910 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7912 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7914 #if HAVE_WRITE_BARRIERS
7915 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7916 /* insert call to write barrier */
7917 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7918 MonoInst *iargs [2], *dummy_use;
7921 dreg = alloc_preg (cfg);
7922 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7924 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7926 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7930 store->flags |= ins_flag;
7937 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7938 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7939 MonoInst *iargs [4];
7942 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7943 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7944 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7945 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7946 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7947 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7949 g_assert (costs > 0);
7951 cfg->real_offset += 5;
7955 inline_costs += costs;
7957 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7961 if (sp [0]->type == STACK_VTYPE) {
7964 /* Have to compute the address of the variable */
7966 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7968 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7970 g_assert (var->klass == klass);
7972 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7976 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7978 if (*ip == CEE_LDFLDA) {
7979 dreg = alloc_preg (cfg);
7981 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7982 ins->klass = mono_class_from_mono_type (field->type);
7983 ins->type = STACK_MP;
7988 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7989 load->flags |= ins_flag;
7990 load->flags |= MONO_INST_FAULT;
8001 MonoClassField *field;
8002 gpointer addr = NULL;
8003 gboolean is_special_static;
8006 token = read32 (ip + 1);
8008 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8009 field = mono_method_get_wrapper_data (method, token);
8010 klass = field->parent;
8013 field = mono_field_from_token (image, token, &klass, generic_context);
8016 mono_class_init (klass);
8017 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8018 FIELD_ACCESS_FAILURE;
8020 /* if the class is Critical then transparent code cannot access it's fields */
8021 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8022 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8025 * We can only support shared generic static
8026 * field access on architectures where the
8027 * trampoline code has been extended to handle
8028 * the generic class init.
8030 #ifndef MONO_ARCH_VTABLE_REG
8031 GENERIC_SHARING_FAILURE (*ip);
8034 if (cfg->generic_sharing_context)
8035 context_used = mono_class_check_context_used (klass);
8037 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8039 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8040 * to be called here.
8042 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8043 mono_class_vtable (cfg->domain, klass);
8044 CHECK_TYPELOAD (klass);
8046 mono_domain_lock (cfg->domain);
8047 if (cfg->domain->special_static_fields)
8048 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8049 mono_domain_unlock (cfg->domain);
8051 is_special_static = mono_class_field_is_special_static (field);
8053 /* Generate IR to compute the field address */
8054 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8056 * Fast access to TLS data
8057 * Inline version of get_thread_static_data () in
8061 int idx, static_data_reg, array_reg, dreg;
8062 MonoInst *thread_ins;
8064 // offset &= 0x7fffffff;
8065 // idx = (offset >> 24) - 1;
8066 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8068 thread_ins = mono_get_thread_intrinsic (cfg);
8069 MONO_ADD_INS (cfg->cbb, thread_ins);
8070 static_data_reg = alloc_ireg (cfg);
8071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8073 if (cfg->compile_aot) {
8074 int offset_reg, offset2_reg, idx_reg;
8076 /* For TLS variables, this will return the TLS offset */
8077 EMIT_NEW_SFLDACONST (cfg, ins, field);
8078 offset_reg = ins->dreg;
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8080 idx_reg = alloc_ireg (cfg);
8081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8084 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8085 array_reg = alloc_ireg (cfg);
8086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8087 offset2_reg = alloc_ireg (cfg);
8088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8089 dreg = alloc_ireg (cfg);
8090 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8092 offset = (gsize)addr & 0x7fffffff;
8093 idx = (offset >> 24) - 1;
8095 array_reg = alloc_ireg (cfg);
8096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8097 dreg = alloc_ireg (cfg);
8098 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8100 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8101 (cfg->compile_aot && is_special_static) ||
8102 (context_used && is_special_static)) {
8103 MonoInst *iargs [2];
8105 g_assert (field->parent);
8106 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8108 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8109 field, MONO_RGCTX_INFO_CLASS_FIELD);
8111 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8113 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8114 } else if (context_used) {
8115 MonoInst *static_data;
8118 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8119 method->klass->name_space, method->klass->name, method->name,
8120 depth, field->offset);
8123 if (mono_class_needs_cctor_run (klass, method)) {
8127 vtable = emit_get_rgctx_klass (cfg, context_used,
8128 klass, MONO_RGCTX_INFO_VTABLE);
8130 // FIXME: This doesn't work since it tries to pass the argument
8131 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8133 * The vtable pointer is always passed in a register regardless of
8134 * the calling convention, so assign it manually, and make a call
8135 * using a signature without parameters.
8137 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8138 #ifdef MONO_ARCH_VTABLE_REG
8139 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8140 cfg->uses_vtable_reg = TRUE;
8147 * The pointer we're computing here is
8149 * super_info.static_data + field->offset
8151 static_data = emit_get_rgctx_klass (cfg, context_used,
8152 klass, MONO_RGCTX_INFO_STATIC_DATA);
8154 if (field->offset == 0) {
8157 int addr_reg = mono_alloc_preg (cfg);
8158 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8160 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8161 MonoInst *iargs [2];
8163 g_assert (field->parent);
8164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8165 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8166 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8168 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8170 CHECK_TYPELOAD (klass);
8172 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8173 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8174 if (cfg->verbose_level > 2)
8175 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8176 class_inits = g_slist_prepend (class_inits, vtable);
8178 if (cfg->run_cctors) {
8180 /* This makes so that inline cannot trigger */
8181 /* .cctors: too many apps depend on them */
8182 /* running with a specific order... */
8183 if (! vtable->initialized)
8185 ex = mono_runtime_class_init_full (vtable, FALSE);
8187 set_exception_object (cfg, ex);
8188 goto exception_exit;
8192 addr = (char*)vtable->data + field->offset;
8194 if (cfg->compile_aot)
8195 EMIT_NEW_SFLDACONST (cfg, ins, field);
8197 EMIT_NEW_PCONST (cfg, ins, addr);
8199 MonoInst *iargs [1];
8200 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8201 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8205 /* Generate IR to do the actual load/store operation */
8207 if (*ip == CEE_LDSFLDA) {
8208 ins->klass = mono_class_from_mono_type (field->type);
8209 ins->type = STACK_PTR;
8211 } else if (*ip == CEE_STSFLD) {
8216 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8217 store->flags |= ins_flag;
8219 gboolean is_const = FALSE;
8220 MonoVTable *vtable = NULL;
8222 if (!context_used) {
8223 vtable = mono_class_vtable (cfg->domain, klass);
8224 CHECK_TYPELOAD (klass);
8226 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8227 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8228 gpointer addr = (char*)vtable->data + field->offset;
8229 int ro_type = field->type->type;
8230 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8231 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8233 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8236 case MONO_TYPE_BOOLEAN:
8238 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8242 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8245 case MONO_TYPE_CHAR:
8247 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8251 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8256 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8260 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8263 #ifndef HAVE_MOVING_COLLECTOR
8266 case MONO_TYPE_STRING:
8267 case MONO_TYPE_OBJECT:
8268 case MONO_TYPE_CLASS:
8269 case MONO_TYPE_SZARRAY:
8271 case MONO_TYPE_FNPTR:
8272 case MONO_TYPE_ARRAY:
8273 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8274 type_to_eval_stack_type ((cfg), field->type, *sp);
8280 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8285 case MONO_TYPE_VALUETYPE:
8295 CHECK_STACK_OVF (1);
8297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8298 load->flags |= ins_flag;
8311 token = read32 (ip + 1);
8312 klass = mini_get_class (method, token, generic_context);
8313 CHECK_TYPELOAD (klass);
8314 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8315 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8326 const char *data_ptr;
8328 guint32 field_token;
8334 token = read32 (ip + 1);
8336 klass = mini_get_class (method, token, generic_context);
8337 CHECK_TYPELOAD (klass);
8339 if (cfg->generic_sharing_context)
8340 context_used = mono_class_check_context_used (klass);
8342 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8343 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8344 ins->sreg1 = sp [0]->dreg;
8345 ins->type = STACK_I4;
8346 ins->dreg = alloc_ireg (cfg);
8347 MONO_ADD_INS (cfg->cbb, ins);
8348 *sp = mono_decompose_opcode (cfg, ins);
8353 MonoClass *array_class = mono_array_class_get (klass, 1);
8354 /* FIXME: we cannot get a managed
8355 allocator because we can't get the
8356 open generic class's vtable. We
8357 have the same problem in
8358 handle_alloc_from_inst(). This
8359 needs to be solved so that we can
8360 have managed allocs of shared
8363 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8364 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8366 MonoMethod *managed_alloc = NULL;
8368 /* FIXME: Decompose later to help abcrem */
8371 args [0] = emit_get_rgctx_klass (cfg, context_used,
8372 array_class, MONO_RGCTX_INFO_VTABLE);
8377 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8379 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8381 if (cfg->opt & MONO_OPT_SHARED) {
8382 /* Decompose now to avoid problems with references to the domainvar */
8383 MonoInst *iargs [3];
8385 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8386 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8389 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8391 /* Decompose later since it is needed by abcrem */
8392 MonoClass *array_type = mono_array_class_get (klass, 1);
8393 mono_class_vtable (cfg->domain, array_type);
8394 CHECK_TYPELOAD (array_type);
8396 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8397 ins->dreg = alloc_preg (cfg);
8398 ins->sreg1 = sp [0]->dreg;
8399 ins->inst_newa_class = klass;
8400 ins->type = STACK_OBJ;
8402 MONO_ADD_INS (cfg->cbb, ins);
8403 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8404 cfg->cbb->has_array_access = TRUE;
8406 /* Needed so mono_emit_load_get_addr () gets called */
8407 mono_get_got_var (cfg);
8417 * we inline/optimize the initialization sequence if possible.
8418 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8419 * for small sizes open code the memcpy
8420 * ensure the rva field is big enough
8422 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8423 MonoMethod *memcpy_method = get_memcpy_method ();
8424 MonoInst *iargs [3];
8425 int add_reg = alloc_preg (cfg);
8427 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8428 if (cfg->compile_aot) {
8429 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8431 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8433 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8434 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8443 if (sp [0]->type != STACK_OBJ)
8446 dreg = alloc_preg (cfg);
8447 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8448 ins->dreg = alloc_preg (cfg);
8449 ins->sreg1 = sp [0]->dreg;
8450 ins->type = STACK_I4;
8451 MONO_ADD_INS (cfg->cbb, ins);
8452 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8453 cfg->cbb->has_array_access = TRUE;
8461 if (sp [0]->type != STACK_OBJ)
8464 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8466 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8467 CHECK_TYPELOAD (klass);
8468 /* we need to make sure that this array is exactly the type it needs
8469 * to be for correctness. the wrappers are lax with their usage
8470 * so we need to ignore them here
8472 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8473 MonoClass *array_class = mono_array_class_get (klass, 1);
8474 mini_emit_check_array_type (cfg, sp [0], array_class);
8475 CHECK_TYPELOAD (array_class);
8479 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8494 case CEE_LDELEM_REF: {
8500 if (*ip == CEE_LDELEM) {
8502 token = read32 (ip + 1);
8503 klass = mini_get_class (method, token, generic_context);
8504 CHECK_TYPELOAD (klass);
8505 mono_class_init (klass);
8508 klass = array_access_to_klass (*ip);
8510 if (sp [0]->type != STACK_OBJ)
8513 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8515 if (sp [1]->opcode == OP_ICONST) {
8516 int array_reg = sp [0]->dreg;
8517 int index_reg = sp [1]->dreg;
8518 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8520 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8521 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8523 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8524 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8527 if (*ip == CEE_LDELEM)
8540 case CEE_STELEM_REF:
8547 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8549 if (*ip == CEE_STELEM) {
8551 token = read32 (ip + 1);
8552 klass = mini_get_class (method, token, generic_context);
8553 CHECK_TYPELOAD (klass);
8554 mono_class_init (klass);
8557 klass = array_access_to_klass (*ip);
8559 if (sp [0]->type != STACK_OBJ)
8562 /* storing a NULL doesn't need any of the complex checks in stelemref */
8563 if (generic_class_is_reference_type (cfg, klass) &&
8564 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8565 MonoMethod* helper = mono_marshal_get_stelemref ();
8566 MonoInst *iargs [3];
8568 if (sp [0]->type != STACK_OBJ)
8570 if (sp [2]->type != STACK_OBJ)
8577 mono_emit_method_call (cfg, helper, iargs, NULL);
8579 if (sp [1]->opcode == OP_ICONST) {
8580 int array_reg = sp [0]->dreg;
8581 int index_reg = sp [1]->dreg;
8582 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8584 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8585 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8587 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8588 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8592 if (*ip == CEE_STELEM)
8599 case CEE_CKFINITE: {
8603 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8604 ins->sreg1 = sp [0]->dreg;
8605 ins->dreg = alloc_freg (cfg);
8606 ins->type = STACK_R8;
8607 MONO_ADD_INS (bblock, ins);
8609 *sp++ = mono_decompose_opcode (cfg, ins);
8614 case CEE_REFANYVAL: {
8615 MonoInst *src_var, *src;
8617 int klass_reg = alloc_preg (cfg);
8618 int dreg = alloc_preg (cfg);
8621 MONO_INST_NEW (cfg, ins, *ip);
8624 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8625 CHECK_TYPELOAD (klass);
8626 mono_class_init (klass);
8628 if (cfg->generic_sharing_context)
8629 context_used = mono_class_check_context_used (klass);
8632 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8634 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8635 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8639 MonoInst *klass_ins;
8641 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8642 klass, MONO_RGCTX_INFO_KLASS);
8645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8648 mini_emit_class_check (cfg, klass_reg, klass);
8650 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8651 ins->type = STACK_MP;
8656 case CEE_MKREFANY: {
8657 MonoInst *loc, *addr;
8660 MONO_INST_NEW (cfg, ins, *ip);
8663 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8664 CHECK_TYPELOAD (klass);
8665 mono_class_init (klass);
8667 if (cfg->generic_sharing_context)
8668 context_used = mono_class_check_context_used (klass);
8670 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8671 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8674 MonoInst *const_ins;
8675 int type_reg = alloc_preg (cfg);
8677 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8681 } else if (cfg->compile_aot) {
8682 int const_reg = alloc_preg (cfg);
8683 int type_reg = alloc_preg (cfg);
8685 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8690 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8695 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8696 ins->type = STACK_VTYPE;
8697 ins->klass = mono_defaults.typed_reference_class;
8704 MonoClass *handle_class;
8706 CHECK_STACK_OVF (1);
8709 n = read32 (ip + 1);
8711 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8712 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8713 handle = mono_method_get_wrapper_data (method, n);
8714 handle_class = mono_method_get_wrapper_data (method, n + 1);
8715 if (handle_class == mono_defaults.typehandle_class)
8716 handle = &((MonoClass*)handle)->byval_arg;
8719 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8723 mono_class_init (handle_class);
8724 if (cfg->generic_sharing_context) {
8725 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8726 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8727 /* This case handles ldtoken
8728 of an open type, like for
8731 } else if (handle_class == mono_defaults.typehandle_class) {
8732 /* If we get a MONO_TYPE_CLASS
8733 then we need to provide the
8735 instantiation of it. */
8736 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8739 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8740 } else if (handle_class == mono_defaults.fieldhandle_class)
8741 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8742 else if (handle_class == mono_defaults.methodhandle_class)
8743 context_used = mono_method_check_context_used (handle);
8745 g_assert_not_reached ();
8748 if ((cfg->opt & MONO_OPT_SHARED) &&
8749 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8750 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8751 MonoInst *addr, *vtvar, *iargs [3];
8752 int method_context_used;
8754 if (cfg->generic_sharing_context)
8755 method_context_used = mono_method_check_context_used (method);
8757 method_context_used = 0;
8759 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8761 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8762 EMIT_NEW_ICONST (cfg, iargs [1], n);
8763 if (method_context_used) {
8764 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8765 method, MONO_RGCTX_INFO_METHOD);
8766 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8768 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8769 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8771 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8775 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8777 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8778 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8779 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8780 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8781 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8782 MonoClass *tclass = mono_class_from_mono_type (handle);
8784 mono_class_init (tclass);
8786 ins = emit_get_rgctx_klass (cfg, context_used,
8787 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8788 } else if (cfg->compile_aot) {
8789 if (method->wrapper_type) {
8790 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8791 /* Special case for static synchronized wrappers */
8792 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8794 /* FIXME: n is not a normal token */
8795 cfg->disable_aot = TRUE;
8796 EMIT_NEW_PCONST (cfg, ins, NULL);
8799 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8802 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8804 ins->type = STACK_OBJ;
8805 ins->klass = cmethod->klass;
8808 MonoInst *addr, *vtvar;
8810 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8813 if (handle_class == mono_defaults.typehandle_class) {
8814 ins = emit_get_rgctx_klass (cfg, context_used,
8815 mono_class_from_mono_type (handle),
8816 MONO_RGCTX_INFO_TYPE);
8817 } else if (handle_class == mono_defaults.methodhandle_class) {
8818 ins = emit_get_rgctx_method (cfg, context_used,
8819 handle, MONO_RGCTX_INFO_METHOD);
8820 } else if (handle_class == mono_defaults.fieldhandle_class) {
8821 ins = emit_get_rgctx_field (cfg, context_used,
8822 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8824 g_assert_not_reached ();
8826 } else if (cfg->compile_aot) {
8827 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8829 EMIT_NEW_PCONST (cfg, ins, handle);
8831 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8833 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8843 MONO_INST_NEW (cfg, ins, OP_THROW);
8845 ins->sreg1 = sp [0]->dreg;
8847 bblock->out_of_line = TRUE;
8848 MONO_ADD_INS (bblock, ins);
8849 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8850 MONO_ADD_INS (bblock, ins);
8853 link_bblock (cfg, bblock, end_bblock);
8854 start_new_bblock = 1;
8856 case CEE_ENDFINALLY:
8857 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8858 MONO_ADD_INS (bblock, ins);
8860 start_new_bblock = 1;
8863 * Control will leave the method so empty the stack, otherwise
8864 * the next basic block will start with a nonempty stack.
8866 while (sp != stack_start) {
8874 if (*ip == CEE_LEAVE) {
8876 target = ip + 5 + (gint32)read32(ip + 1);
8879 target = ip + 2 + (signed char)(ip [1]);
8882 /* empty the stack */
8883 while (sp != stack_start) {
8888 * If this leave statement is in a catch block, check for a
8889 * pending exception, and rethrow it if necessary.
8890 * We avoid doing this in runtime invoke wrappers, since those are called
8891 * by native code which excepts the wrapper to catch all exceptions.
8893 for (i = 0; i < header->num_clauses; ++i) {
8894 MonoExceptionClause *clause = &header->clauses [i];
8897 * Use <= in the final comparison to handle clauses with multiple
8898 * leave statements, like in bug #78024.
8899 * The ordering of the exception clauses guarantees that we find the
8902 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8904 MonoBasicBlock *dont_throw;
8909 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8912 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8914 NEW_BBLOCK (cfg, dont_throw);
8917 * Currently, we allways rethrow the abort exception, despite the
8918 * fact that this is not correct. See thread6.cs for an example.
8919 * But propagating the abort exception is more important than
8920 * getting the sematics right.
8922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8924 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8926 MONO_START_BB (cfg, dont_throw);
8931 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8933 for (tmp = handlers; tmp; tmp = tmp->next) {
8935 link_bblock (cfg, bblock, tblock);
8936 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8937 ins->inst_target_bb = tblock;
8938 MONO_ADD_INS (bblock, ins);
8939 bblock->has_call_handler = 1;
8940 if (COMPILE_LLVM (cfg)) {
8941 MonoBasicBlock *target_bb;
8944 * Link the finally bblock with the target, since it will
8945 * conceptually branch there.
8946 * FIXME: Have to link the bblock containing the endfinally.
8948 GET_BBLOCK (cfg, target_bb, target);
8949 link_bblock (cfg, tblock, target_bb);
8952 g_list_free (handlers);
8955 MONO_INST_NEW (cfg, ins, OP_BR);
8956 MONO_ADD_INS (bblock, ins);
8957 GET_BBLOCK (cfg, tblock, target);
8958 link_bblock (cfg, bblock, tblock);
8959 ins->inst_target_bb = tblock;
8960 start_new_bblock = 1;
8962 if (*ip == CEE_LEAVE)
8971 * Mono specific opcodes
8973 case MONO_CUSTOM_PREFIX: {
8975 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8979 case CEE_MONO_ICALL: {
8981 MonoJitICallInfo *info;
8983 token = read32 (ip + 2);
8984 func = mono_method_get_wrapper_data (method, token);
8985 info = mono_find_jit_icall_by_addr (func);
8988 CHECK_STACK (info->sig->param_count);
8989 sp -= info->sig->param_count;
8991 ins = mono_emit_jit_icall (cfg, info->func, sp);
8992 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8996 inline_costs += 10 * num_calls++;
9000 case CEE_MONO_LDPTR: {
9003 CHECK_STACK_OVF (1);
9005 token = read32 (ip + 2);
9007 ptr = mono_method_get_wrapper_data (method, token);
9008 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9009 MonoJitICallInfo *callinfo;
9010 const char *icall_name;
9012 icall_name = method->name + strlen ("__icall_wrapper_");
9013 g_assert (icall_name);
9014 callinfo = mono_find_jit_icall_by_name (icall_name);
9015 g_assert (callinfo);
9017 if (ptr == callinfo->func) {
9018 /* Will be transformed into an AOTCONST later */
9019 EMIT_NEW_PCONST (cfg, ins, ptr);
9025 /* FIXME: Generalize this */
9026 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9027 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9032 EMIT_NEW_PCONST (cfg, ins, ptr);
9035 inline_costs += 10 * num_calls++;
9036 /* Can't embed random pointers into AOT code */
9037 cfg->disable_aot = 1;
9040 case CEE_MONO_ICALL_ADDR: {
9041 MonoMethod *cmethod;
9044 CHECK_STACK_OVF (1);
9046 token = read32 (ip + 2);
9048 cmethod = mono_method_get_wrapper_data (method, token);
9050 if (cfg->compile_aot) {
9051 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9053 ptr = mono_lookup_internal_call (cmethod);
9055 EMIT_NEW_PCONST (cfg, ins, ptr);
9061 case CEE_MONO_VTADDR: {
9062 MonoInst *src_var, *src;
9068 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9069 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9074 case CEE_MONO_NEWOBJ: {
9075 MonoInst *iargs [2];
9077 CHECK_STACK_OVF (1);
9079 token = read32 (ip + 2);
9080 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9081 mono_class_init (klass);
9082 NEW_DOMAINCONST (cfg, iargs [0]);
9083 MONO_ADD_INS (cfg->cbb, iargs [0]);
9084 NEW_CLASSCONST (cfg, iargs [1], klass);
9085 MONO_ADD_INS (cfg->cbb, iargs [1]);
9086 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9088 inline_costs += 10 * num_calls++;
9091 case CEE_MONO_OBJADDR:
9094 MONO_INST_NEW (cfg, ins, OP_MOVE);
9095 ins->dreg = alloc_preg (cfg);
9096 ins->sreg1 = sp [0]->dreg;
9097 ins->type = STACK_MP;
9098 MONO_ADD_INS (cfg->cbb, ins);
9102 case CEE_MONO_LDNATIVEOBJ:
9104 * Similar to LDOBJ, but instead load the unmanaged
9105 * representation of the vtype to the stack.
9110 token = read32 (ip + 2);
9111 klass = mono_method_get_wrapper_data (method, token);
9112 g_assert (klass->valuetype);
9113 mono_class_init (klass);
9116 MonoInst *src, *dest, *temp;
9119 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9120 temp->backend.is_pinvoke = 1;
9121 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9122 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9124 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9125 dest->type = STACK_VTYPE;
9126 dest->klass = klass;
9132 case CEE_MONO_RETOBJ: {
9134 * Same as RET, but return the native representation of a vtype
9137 g_assert (cfg->ret);
9138 g_assert (mono_method_signature (method)->pinvoke);
9143 token = read32 (ip + 2);
9144 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9146 if (!cfg->vret_addr) {
9147 g_assert (cfg->ret_var_is_local);
9149 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9151 EMIT_NEW_RETLOADA (cfg, ins);
9153 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9155 if (sp != stack_start)
9158 MONO_INST_NEW (cfg, ins, OP_BR);
9159 ins->inst_target_bb = end_bblock;
9160 MONO_ADD_INS (bblock, ins);
9161 link_bblock (cfg, bblock, end_bblock);
9162 start_new_bblock = 1;
9166 case CEE_MONO_CISINST:
9167 case CEE_MONO_CCASTCLASS: {
9172 token = read32 (ip + 2);
9173 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9174 if (ip [1] == CEE_MONO_CISINST)
9175 ins = handle_cisinst (cfg, klass, sp [0]);
9177 ins = handle_ccastclass (cfg, klass, sp [0]);
9183 case CEE_MONO_SAVE_LMF:
9184 case CEE_MONO_RESTORE_LMF:
9185 #ifdef MONO_ARCH_HAVE_LMF_OPS
9186 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9187 MONO_ADD_INS (bblock, ins);
9188 cfg->need_lmf_area = TRUE;
9192 case CEE_MONO_CLASSCONST:
9193 CHECK_STACK_OVF (1);
9195 token = read32 (ip + 2);
9196 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9199 inline_costs += 10 * num_calls++;
9201 case CEE_MONO_NOT_TAKEN:
9202 bblock->out_of_line = TRUE;
9206 CHECK_STACK_OVF (1);
9208 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9209 ins->dreg = alloc_preg (cfg);
9210 ins->inst_offset = (gint32)read32 (ip + 2);
9211 ins->type = STACK_PTR;
9212 MONO_ADD_INS (bblock, ins);
9216 case CEE_MONO_DYN_CALL: {
9219 /* It would be easier to call a trampoline, but that would put an
9220 * extra frame on the stack, confusing exception handling. So
9221 * implement it inline using an opcode for now.
9224 if (!cfg->dyn_call_var) {
9225 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9226 /* prevent it from being register allocated */
9227 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9230 /* Has to use a call inst since it local regalloc expects it */
9231 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9232 ins = (MonoInst*)call;
9234 ins->sreg1 = sp [0]->dreg;
9235 ins->sreg2 = sp [1]->dreg;
9236 MONO_ADD_INS (bblock, ins);
9238 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9239 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9243 inline_costs += 10 * num_calls++;
9248 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9258 /* somewhat similar to LDTOKEN */
9259 MonoInst *addr, *vtvar;
9260 CHECK_STACK_OVF (1);
9261 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9263 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9264 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9266 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9267 ins->type = STACK_VTYPE;
9268 ins->klass = mono_defaults.argumenthandle_class;
9281 * The following transforms:
9282 * CEE_CEQ into OP_CEQ
9283 * CEE_CGT into OP_CGT
9284 * CEE_CGT_UN into OP_CGT_UN
9285 * CEE_CLT into OP_CLT
9286 * CEE_CLT_UN into OP_CLT_UN
9288 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9290 MONO_INST_NEW (cfg, ins, cmp->opcode);
9292 cmp->sreg1 = sp [0]->dreg;
9293 cmp->sreg2 = sp [1]->dreg;
9294 type_from_op (cmp, sp [0], sp [1]);
9296 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9297 cmp->opcode = OP_LCOMPARE;
9298 else if (sp [0]->type == STACK_R8)
9299 cmp->opcode = OP_FCOMPARE;
9301 cmp->opcode = OP_ICOMPARE;
9302 MONO_ADD_INS (bblock, cmp);
9303 ins->type = STACK_I4;
9304 ins->dreg = alloc_dreg (cfg, ins->type);
9305 type_from_op (ins, sp [0], sp [1]);
9307 if (cmp->opcode == OP_FCOMPARE) {
9309 * The backends expect the fceq opcodes to do the
9312 cmp->opcode = OP_NOP;
9313 ins->sreg1 = cmp->sreg1;
9314 ins->sreg2 = cmp->sreg2;
9316 MONO_ADD_INS (bblock, ins);
9323 MonoMethod *cil_method;
9324 gboolean needs_static_rgctx_invoke;
9326 CHECK_STACK_OVF (1);
9328 n = read32 (ip + 2);
9329 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9332 mono_class_init (cmethod->klass);
9334 mono_save_token_info (cfg, image, n, cmethod);
9336 if (cfg->generic_sharing_context)
9337 context_used = mono_method_check_context_used (cmethod);
9339 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9341 cil_method = cmethod;
9342 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9343 METHOD_ACCESS_FAILURE;
9345 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9346 if (check_linkdemand (cfg, method, cmethod))
9348 CHECK_CFG_EXCEPTION;
9349 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9350 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9354 * Optimize the common case of ldftn+delegate creation
9356 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9357 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9358 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9360 int invoke_context_used = 0;
9362 invoke = mono_get_delegate_invoke (ctor_method->klass);
9363 if (!invoke || !mono_method_signature (invoke))
9366 if (cfg->generic_sharing_context)
9367 invoke_context_used = mono_method_check_context_used (invoke);
9369 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9370 /* FIXME: SGEN support */
9371 if (invoke_context_used == 0) {
9372 MonoInst *target_ins;
9375 if (cfg->verbose_level > 3)
9376 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9377 target_ins = sp [-1];
9379 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9380 CHECK_CFG_EXCEPTION;
9389 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9390 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9394 inline_costs += 10 * num_calls++;
9397 case CEE_LDVIRTFTN: {
9402 n = read32 (ip + 2);
9403 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9406 mono_class_init (cmethod->klass);
9408 if (cfg->generic_sharing_context)
9409 context_used = mono_method_check_context_used (cmethod);
9411 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9412 if (check_linkdemand (cfg, method, cmethod))
9414 CHECK_CFG_EXCEPTION;
9415 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9416 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9422 args [1] = emit_get_rgctx_method (cfg, context_used,
9423 cmethod, MONO_RGCTX_INFO_METHOD);
9426 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9428 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9431 inline_costs += 10 * num_calls++;
9435 CHECK_STACK_OVF (1);
9437 n = read16 (ip + 2);
9439 EMIT_NEW_ARGLOAD (cfg, ins, n);
9444 CHECK_STACK_OVF (1);
9446 n = read16 (ip + 2);
9448 NEW_ARGLOADA (cfg, ins, n);
9449 MONO_ADD_INS (cfg->cbb, ins);
9457 n = read16 (ip + 2);
9459 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9461 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9465 CHECK_STACK_OVF (1);
9467 n = read16 (ip + 2);
9469 EMIT_NEW_LOCLOAD (cfg, ins, n);
9474 unsigned char *tmp_ip;
9475 CHECK_STACK_OVF (1);
9477 n = read16 (ip + 2);
9480 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9486 EMIT_NEW_LOCLOADA (cfg, ins, n);
9495 n = read16 (ip + 2);
9497 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9499 emit_stloc_ir (cfg, sp, header, n);
9506 if (sp != stack_start)
9508 if (cfg->method != method)
9510 * Inlining this into a loop in a parent could lead to
9511 * stack overflows which is different behavior than the
9512 * non-inlined case, thus disable inlining in this case.
9514 goto inline_failure;
9516 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9517 ins->dreg = alloc_preg (cfg);
9518 ins->sreg1 = sp [0]->dreg;
9519 ins->type = STACK_PTR;
9520 MONO_ADD_INS (cfg->cbb, ins);
9522 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9524 ins->flags |= MONO_INST_INIT;
9529 case CEE_ENDFILTER: {
9530 MonoExceptionClause *clause, *nearest;
9531 int cc, nearest_num;
9535 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9537 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9538 ins->sreg1 = (*sp)->dreg;
9539 MONO_ADD_INS (bblock, ins);
9540 start_new_bblock = 1;
9545 for (cc = 0; cc < header->num_clauses; ++cc) {
9546 clause = &header->clauses [cc];
9547 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9548 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9549 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9555 if ((ip - header->code) != nearest->handler_offset)
9560 case CEE_UNALIGNED_:
9561 ins_flag |= MONO_INST_UNALIGNED;
9562 /* FIXME: record alignment? we can assume 1 for now */
9567 ins_flag |= MONO_INST_VOLATILE;
9571 ins_flag |= MONO_INST_TAILCALL;
9572 cfg->flags |= MONO_CFG_HAS_TAIL;
9573 /* Can't inline tail calls at this time */
9574 inline_costs += 100000;
9581 token = read32 (ip + 2);
9582 klass = mini_get_class (method, token, generic_context);
9583 CHECK_TYPELOAD (klass);
9584 if (generic_class_is_reference_type (cfg, klass))
9585 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9587 mini_emit_initobj (cfg, *sp, NULL, klass);
9591 case CEE_CONSTRAINED_:
9593 token = read32 (ip + 2);
9594 if (method->wrapper_type != MONO_WRAPPER_NONE)
9595 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9597 constrained_call = mono_class_get_full (image, token, generic_context);
9598 CHECK_TYPELOAD (constrained_call);
9603 MonoInst *iargs [3];
9607 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9608 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9609 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9610 /* emit_memset only works when val == 0 */
9611 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9616 if (ip [1] == CEE_CPBLK) {
9617 MonoMethod *memcpy_method = get_memcpy_method ();
9618 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9620 MonoMethod *memset_method = get_memset_method ();
9621 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9631 ins_flag |= MONO_INST_NOTYPECHECK;
9633 ins_flag |= MONO_INST_NORANGECHECK;
9634 /* we ignore the no-nullcheck for now since we
9635 * really do it explicitly only when doing callvirt->call
9641 int handler_offset = -1;
9643 for (i = 0; i < header->num_clauses; ++i) {
9644 MonoExceptionClause *clause = &header->clauses [i];
9645 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9646 handler_offset = clause->handler_offset;
9651 bblock->flags |= BB_EXCEPTION_UNSAFE;
9653 g_assert (handler_offset != -1);
9655 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9656 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9657 ins->sreg1 = load->dreg;
9658 MONO_ADD_INS (bblock, ins);
9660 link_bblock (cfg, bblock, end_bblock);
9661 start_new_bblock = 1;
9669 CHECK_STACK_OVF (1);
9671 token = read32 (ip + 2);
9672 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9673 MonoType *type = mono_type_create_from_typespec (image, token);
9674 token = mono_type_size (type, &ialign);
9676 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9677 CHECK_TYPELOAD (klass);
9678 mono_class_init (klass);
9679 token = mono_class_value_size (klass, &align);
9681 EMIT_NEW_ICONST (cfg, ins, token);
9686 case CEE_REFANYTYPE: {
9687 MonoInst *src_var, *src;
9693 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9695 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9696 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9697 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9715 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9725 g_warning ("opcode 0x%02x not handled", *ip);
9729 if (start_new_bblock != 1)
9732 bblock->cil_length = ip - bblock->cil_code;
9733 bblock->next_bb = end_bblock;
9735 if (cfg->method == method && cfg->domainvar) {
9737 MonoInst *get_domain;
9739 cfg->cbb = init_localsbb;
9741 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9742 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9745 get_domain->dreg = alloc_preg (cfg);
9746 MONO_ADD_INS (cfg->cbb, get_domain);
9748 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9749 MONO_ADD_INS (cfg->cbb, store);
9752 #ifdef TARGET_POWERPC
9753 if (cfg->compile_aot)
9754 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9755 mono_get_got_var (cfg);
9758 if (cfg->method == method && cfg->got_var)
9759 mono_emit_load_got_addr (cfg);
9764 cfg->cbb = init_localsbb;
9766 for (i = 0; i < header->num_locals; ++i) {
9767 MonoType *ptype = header->locals [i];
9768 int t = ptype->type;
9769 dreg = cfg->locals [i]->dreg;
9771 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9772 t = mono_class_enum_basetype (ptype->data.klass)->type;
9774 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9775 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9776 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9777 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9778 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9779 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9780 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9781 ins->type = STACK_R8;
9782 ins->inst_p0 = (void*)&r8_0;
9783 ins->dreg = alloc_dreg (cfg, STACK_R8);
9784 MONO_ADD_INS (init_localsbb, ins);
9785 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9786 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9787 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9788 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9790 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9795 if (cfg->init_ref_vars && cfg->method == method) {
9796 /* Emit initialization for ref vars */
9797 // FIXME: Avoid duplication initialization for IL locals.
9798 for (i = 0; i < cfg->num_varinfo; ++i) {
9799 MonoInst *ins = cfg->varinfo [i];
9801 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9802 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9806 /* Add a sequence point for method entry/exit events */
9808 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9809 MONO_ADD_INS (init_localsbb, ins);
9810 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9811 MONO_ADD_INS (cfg->bb_exit, ins);
9816 if (cfg->method == method) {
9818 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9819 bb->region = mono_find_block_region (cfg, bb->real_offset);
9821 mono_create_spvar_for_region (cfg, bb->region);
9822 if (cfg->verbose_level > 2)
9823 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9827 g_slist_free (class_inits);
9828 dont_inline = g_list_remove (dont_inline, method);
9830 if (inline_costs < 0) {
9833 /* Method is too large */
9834 mname = mono_method_full_name (method, TRUE);
9835 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9836 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9838 mono_metadata_free_mh (header);
9842 if ((cfg->verbose_level > 2) && (cfg->method == method))
9843 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9845 mono_metadata_free_mh (header);
9846 return inline_costs;
9849 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9856 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9860 set_exception_type_from_invalid_il (cfg, method, ip);
9864 g_slist_free (class_inits);
9865 mono_basic_block_free (bb);
9866 dont_inline = g_list_remove (dont_inline, method);
9867 mono_metadata_free_mh (header);
9872 store_membase_reg_to_store_membase_imm (int opcode)
9875 case OP_STORE_MEMBASE_REG:
9876 return OP_STORE_MEMBASE_IMM;
9877 case OP_STOREI1_MEMBASE_REG:
9878 return OP_STOREI1_MEMBASE_IMM;
9879 case OP_STOREI2_MEMBASE_REG:
9880 return OP_STOREI2_MEMBASE_IMM;
9881 case OP_STOREI4_MEMBASE_REG:
9882 return OP_STOREI4_MEMBASE_IMM;
9883 case OP_STOREI8_MEMBASE_REG:
9884 return OP_STOREI8_MEMBASE_IMM;
9886 g_assert_not_reached ();
9892 #endif /* DISABLE_JIT */
9895 mono_op_to_op_imm (int opcode)
9905 return OP_IDIV_UN_IMM;
9909 return OP_IREM_UN_IMM;
9923 return OP_ISHR_UN_IMM;
9940 return OP_LSHR_UN_IMM;
9943 return OP_COMPARE_IMM;
9945 return OP_ICOMPARE_IMM;
9947 return OP_LCOMPARE_IMM;
9949 case OP_STORE_MEMBASE_REG:
9950 return OP_STORE_MEMBASE_IMM;
9951 case OP_STOREI1_MEMBASE_REG:
9952 return OP_STOREI1_MEMBASE_IMM;
9953 case OP_STOREI2_MEMBASE_REG:
9954 return OP_STOREI2_MEMBASE_IMM;
9955 case OP_STOREI4_MEMBASE_REG:
9956 return OP_STOREI4_MEMBASE_IMM;
9958 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9960 return OP_X86_PUSH_IMM;
9961 case OP_X86_COMPARE_MEMBASE_REG:
9962 return OP_X86_COMPARE_MEMBASE_IMM;
9964 #if defined(TARGET_AMD64)
9965 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9966 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9968 case OP_VOIDCALL_REG:
9977 return OP_LOCALLOC_IMM;
9984 ldind_to_load_membase (int opcode)
9988 return OP_LOADI1_MEMBASE;
9990 return OP_LOADU1_MEMBASE;
9992 return OP_LOADI2_MEMBASE;
9994 return OP_LOADU2_MEMBASE;
9996 return OP_LOADI4_MEMBASE;
9998 return OP_LOADU4_MEMBASE;
10000 return OP_LOAD_MEMBASE;
10001 case CEE_LDIND_REF:
10002 return OP_LOAD_MEMBASE;
10004 return OP_LOADI8_MEMBASE;
10006 return OP_LOADR4_MEMBASE;
10008 return OP_LOADR8_MEMBASE;
10010 g_assert_not_reached ();
10017 stind_to_store_membase (int opcode)
10021 return OP_STOREI1_MEMBASE_REG;
10023 return OP_STOREI2_MEMBASE_REG;
10025 return OP_STOREI4_MEMBASE_REG;
10027 case CEE_STIND_REF:
10028 return OP_STORE_MEMBASE_REG;
10030 return OP_STOREI8_MEMBASE_REG;
10032 return OP_STORER4_MEMBASE_REG;
10034 return OP_STORER8_MEMBASE_REG;
10036 g_assert_not_reached ();
10043 mono_load_membase_to_load_mem (int opcode)
10045 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10046 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10048 case OP_LOAD_MEMBASE:
10049 return OP_LOAD_MEM;
10050 case OP_LOADU1_MEMBASE:
10051 return OP_LOADU1_MEM;
10052 case OP_LOADU2_MEMBASE:
10053 return OP_LOADU2_MEM;
10054 case OP_LOADI4_MEMBASE:
10055 return OP_LOADI4_MEM;
10056 case OP_LOADU4_MEMBASE:
10057 return OP_LOADU4_MEM;
10058 #if SIZEOF_REGISTER == 8
10059 case OP_LOADI8_MEMBASE:
10060 return OP_LOADI8_MEM;
10069 op_to_op_dest_membase (int store_opcode, int opcode)
10071 #if defined(TARGET_X86)
10072 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10077 return OP_X86_ADD_MEMBASE_REG;
10079 return OP_X86_SUB_MEMBASE_REG;
10081 return OP_X86_AND_MEMBASE_REG;
10083 return OP_X86_OR_MEMBASE_REG;
10085 return OP_X86_XOR_MEMBASE_REG;
10088 return OP_X86_ADD_MEMBASE_IMM;
10091 return OP_X86_SUB_MEMBASE_IMM;
10094 return OP_X86_AND_MEMBASE_IMM;
10097 return OP_X86_OR_MEMBASE_IMM;
10100 return OP_X86_XOR_MEMBASE_IMM;
10106 #if defined(TARGET_AMD64)
10107 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10112 return OP_X86_ADD_MEMBASE_REG;
10114 return OP_X86_SUB_MEMBASE_REG;
10116 return OP_X86_AND_MEMBASE_REG;
10118 return OP_X86_OR_MEMBASE_REG;
10120 return OP_X86_XOR_MEMBASE_REG;
10122 return OP_X86_ADD_MEMBASE_IMM;
10124 return OP_X86_SUB_MEMBASE_IMM;
10126 return OP_X86_AND_MEMBASE_IMM;
10128 return OP_X86_OR_MEMBASE_IMM;
10130 return OP_X86_XOR_MEMBASE_IMM;
10132 return OP_AMD64_ADD_MEMBASE_REG;
10134 return OP_AMD64_SUB_MEMBASE_REG;
10136 return OP_AMD64_AND_MEMBASE_REG;
10138 return OP_AMD64_OR_MEMBASE_REG;
10140 return OP_AMD64_XOR_MEMBASE_REG;
10143 return OP_AMD64_ADD_MEMBASE_IMM;
10146 return OP_AMD64_SUB_MEMBASE_IMM;
10149 return OP_AMD64_AND_MEMBASE_IMM;
10152 return OP_AMD64_OR_MEMBASE_IMM;
10155 return OP_AMD64_XOR_MEMBASE_IMM;
10165 op_to_op_store_membase (int store_opcode, int opcode)
10167 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10170 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10171 return OP_X86_SETEQ_MEMBASE;
10173 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10174 return OP_X86_SETNE_MEMBASE;
10182 op_to_op_src1_membase (int load_opcode, int opcode)
10185 /* FIXME: This has sign extension issues */
10187 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10188 return OP_X86_COMPARE_MEMBASE8_IMM;
10191 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10196 return OP_X86_PUSH_MEMBASE;
10197 case OP_COMPARE_IMM:
10198 case OP_ICOMPARE_IMM:
10199 return OP_X86_COMPARE_MEMBASE_IMM;
10202 return OP_X86_COMPARE_MEMBASE_REG;
10206 #ifdef TARGET_AMD64
10207 /* FIXME: This has sign extension issues */
10209 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10210 return OP_X86_COMPARE_MEMBASE8_IMM;
10215 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10216 return OP_X86_PUSH_MEMBASE;
10218 /* FIXME: This only works for 32 bit immediates
10219 case OP_COMPARE_IMM:
10220 case OP_LCOMPARE_IMM:
10221 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10222 return OP_AMD64_COMPARE_MEMBASE_IMM;
10224 case OP_ICOMPARE_IMM:
10225 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10226 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10230 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10231 return OP_AMD64_COMPARE_MEMBASE_REG;
10234 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10235 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10244 op_to_op_src2_membase (int load_opcode, int opcode)
10247 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10253 return OP_X86_COMPARE_REG_MEMBASE;
10255 return OP_X86_ADD_REG_MEMBASE;
10257 return OP_X86_SUB_REG_MEMBASE;
10259 return OP_X86_AND_REG_MEMBASE;
10261 return OP_X86_OR_REG_MEMBASE;
10263 return OP_X86_XOR_REG_MEMBASE;
10267 #ifdef TARGET_AMD64
10270 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10271 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10275 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10276 return OP_AMD64_COMPARE_REG_MEMBASE;
10279 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10280 return OP_X86_ADD_REG_MEMBASE;
10282 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10283 return OP_X86_SUB_REG_MEMBASE;
10285 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10286 return OP_X86_AND_REG_MEMBASE;
10288 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10289 return OP_X86_OR_REG_MEMBASE;
10291 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10292 return OP_X86_XOR_REG_MEMBASE;
10294 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10295 return OP_AMD64_ADD_REG_MEMBASE;
10297 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10298 return OP_AMD64_SUB_REG_MEMBASE;
10300 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10301 return OP_AMD64_AND_REG_MEMBASE;
10303 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10304 return OP_AMD64_OR_REG_MEMBASE;
10306 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10307 return OP_AMD64_XOR_REG_MEMBASE;
10315 mono_op_to_op_imm_noemul (int opcode)
10318 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10323 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10331 return mono_op_to_op_imm (opcode);
10335 #ifndef DISABLE_JIT
10338 * mono_handle_global_vregs:
10340 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10344 mono_handle_global_vregs (MonoCompile *cfg)
10346 gint32 *vreg_to_bb;
10347 MonoBasicBlock *bb;
10350 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10352 #ifdef MONO_ARCH_SIMD_INTRINSICS
10353 if (cfg->uses_simd_intrinsics)
10354 mono_simd_simplify_indirection (cfg);
10357 /* Find local vregs used in more than one bb */
10358 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10359 MonoInst *ins = bb->code;
10360 int block_num = bb->block_num;
10362 if (cfg->verbose_level > 2)
10363 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10366 for (; ins; ins = ins->next) {
10367 const char *spec = INS_INFO (ins->opcode);
10368 int regtype = 0, regindex;
10371 if (G_UNLIKELY (cfg->verbose_level > 2))
10372 mono_print_ins (ins);
10374 g_assert (ins->opcode >= MONO_CEE_LAST);
10376 for (regindex = 0; regindex < 4; regindex ++) {
10379 if (regindex == 0) {
10380 regtype = spec [MONO_INST_DEST];
10381 if (regtype == ' ')
10384 } else if (regindex == 1) {
10385 regtype = spec [MONO_INST_SRC1];
10386 if (regtype == ' ')
10389 } else if (regindex == 2) {
10390 regtype = spec [MONO_INST_SRC2];
10391 if (regtype == ' ')
10394 } else if (regindex == 3) {
10395 regtype = spec [MONO_INST_SRC3];
10396 if (regtype == ' ')
10401 #if SIZEOF_REGISTER == 4
10402 /* In the LLVM case, the long opcodes are not decomposed */
10403 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10405 * Since some instructions reference the original long vreg,
10406 * and some reference the two component vregs, it is quite hard
10407 * to determine when it needs to be global. So be conservative.
10409 if (!get_vreg_to_inst (cfg, vreg)) {
10410 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10412 if (cfg->verbose_level > 2)
10413 printf ("LONG VREG R%d made global.\n", vreg);
10417 * Make the component vregs volatile since the optimizations can
10418 * get confused otherwise.
10420 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10421 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10425 g_assert (vreg != -1);
10427 prev_bb = vreg_to_bb [vreg];
10428 if (prev_bb == 0) {
10429 /* 0 is a valid block num */
10430 vreg_to_bb [vreg] = block_num + 1;
10431 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10432 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10435 if (!get_vreg_to_inst (cfg, vreg)) {
10436 if (G_UNLIKELY (cfg->verbose_level > 2))
10437 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10441 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10444 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10447 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10450 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10453 g_assert_not_reached ();
10457 /* Flag as having been used in more than one bb */
10458 vreg_to_bb [vreg] = -1;
10464 /* If a variable is used in only one bblock, convert it into a local vreg */
10465 for (i = 0; i < cfg->num_varinfo; i++) {
10466 MonoInst *var = cfg->varinfo [i];
10467 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10469 switch (var->type) {
10475 #if SIZEOF_REGISTER == 8
10478 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10479 /* Enabling this screws up the fp stack on x86 */
10482 /* Arguments are implicitly global */
10483 /* Putting R4 vars into registers doesn't work currently */
10484 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10486 * Make that the variable's liveness interval doesn't contain a call, since
10487 * that would cause the lvreg to be spilled, making the whole optimization
10490 /* This is too slow for JIT compilation */
10492 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10494 int def_index, call_index, ins_index;
10495 gboolean spilled = FALSE;
10500 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10501 const char *spec = INS_INFO (ins->opcode);
10503 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10504 def_index = ins_index;
10506 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10507 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10508 if (call_index > def_index) {
10514 if (MONO_IS_CALL (ins))
10515 call_index = ins_index;
10525 if (G_UNLIKELY (cfg->verbose_level > 2))
10526 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10527 var->flags |= MONO_INST_IS_DEAD;
10528 cfg->vreg_to_inst [var->dreg] = NULL;
10535 * Compress the varinfo and vars tables so the liveness computation is faster and
10536 * takes up less space.
10539 for (i = 0; i < cfg->num_varinfo; ++i) {
10540 MonoInst *var = cfg->varinfo [i];
10541 if (pos < i && cfg->locals_start == i)
10542 cfg->locals_start = pos;
10543 if (!(var->flags & MONO_INST_IS_DEAD)) {
10545 cfg->varinfo [pos] = cfg->varinfo [i];
10546 cfg->varinfo [pos]->inst_c0 = pos;
10547 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10548 cfg->vars [pos].idx = pos;
10549 #if SIZEOF_REGISTER == 4
10550 if (cfg->varinfo [pos]->type == STACK_I8) {
10551 /* Modify the two component vars too */
10554 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10555 var1->inst_c0 = pos;
10556 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10557 var1->inst_c0 = pos;
10564 cfg->num_varinfo = pos;
10565 if (cfg->locals_start > cfg->num_varinfo)
10566 cfg->locals_start = cfg->num_varinfo;
10570 * mono_spill_global_vars:
10572 * Generate spill code for variables which are not allocated to registers,
10573 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10574 * code is generated which could be optimized by the local optimization passes.
10577 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10579 MonoBasicBlock *bb;
10581 int orig_next_vreg;
10582 guint32 *vreg_to_lvreg;
10584 guint32 i, lvregs_len;
10585 gboolean dest_has_lvreg = FALSE;
10586 guint32 stacktypes [128];
10587 MonoInst **live_range_start, **live_range_end;
10588 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10590 *need_local_opts = FALSE;
10592 memset (spec2, 0, sizeof (spec2));
10594 /* FIXME: Move this function to mini.c */
10595 stacktypes ['i'] = STACK_PTR;
10596 stacktypes ['l'] = STACK_I8;
10597 stacktypes ['f'] = STACK_R8;
10598 #ifdef MONO_ARCH_SIMD_INTRINSICS
10599 stacktypes ['x'] = STACK_VTYPE;
10602 #if SIZEOF_REGISTER == 4
10603 /* Create MonoInsts for longs */
10604 for (i = 0; i < cfg->num_varinfo; i++) {
10605 MonoInst *ins = cfg->varinfo [i];
10607 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10608 switch (ins->type) {
10613 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10616 g_assert (ins->opcode == OP_REGOFFSET);
10618 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10620 tree->opcode = OP_REGOFFSET;
10621 tree->inst_basereg = ins->inst_basereg;
10622 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10624 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10626 tree->opcode = OP_REGOFFSET;
10627 tree->inst_basereg = ins->inst_basereg;
10628 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10638 /* FIXME: widening and truncation */
10641 * As an optimization, when a variable allocated to the stack is first loaded into
10642 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10643 * the variable again.
10645 orig_next_vreg = cfg->next_vreg;
10646 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10647 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10651 * These arrays contain the first and last instructions accessing a given
10653 * Since we emit bblocks in the same order we process them here, and we
10654 * don't split live ranges, these will precisely describe the live range of
10655 * the variable, i.e. the instruction range where a valid value can be found
10656 * in the variables location.
10657 * The live range is computed using the liveness info computed by the liveness pass.
10658 * We can't use vmv->range, since that is an abstract live range, and we need
10659 * one which is instruction precise.
10660 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10662 /* FIXME: Only do this if debugging info is requested */
10663 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10664 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10665 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10666 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10668 /* Add spill loads/stores */
10669 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10672 if (cfg->verbose_level > 2)
10673 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10675 /* Clear vreg_to_lvreg array */
10676 for (i = 0; i < lvregs_len; i++)
10677 vreg_to_lvreg [lvregs [i]] = 0;
10681 MONO_BB_FOR_EACH_INS (bb, ins) {
10682 const char *spec = INS_INFO (ins->opcode);
10683 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10684 gboolean store, no_lvreg;
10685 int sregs [MONO_MAX_SRC_REGS];
10687 if (G_UNLIKELY (cfg->verbose_level > 2))
10688 mono_print_ins (ins);
10690 if (ins->opcode == OP_NOP)
10694 * We handle LDADDR here as well, since it can only be decomposed
10695 * when variable addresses are known.
10697 if (ins->opcode == OP_LDADDR) {
10698 MonoInst *var = ins->inst_p0;
10700 if (var->opcode == OP_VTARG_ADDR) {
10701 /* Happens on SPARC/S390 where vtypes are passed by reference */
10702 MonoInst *vtaddr = var->inst_left;
10703 if (vtaddr->opcode == OP_REGVAR) {
10704 ins->opcode = OP_MOVE;
10705 ins->sreg1 = vtaddr->dreg;
10707 else if (var->inst_left->opcode == OP_REGOFFSET) {
10708 ins->opcode = OP_LOAD_MEMBASE;
10709 ins->inst_basereg = vtaddr->inst_basereg;
10710 ins->inst_offset = vtaddr->inst_offset;
10714 g_assert (var->opcode == OP_REGOFFSET);
10716 ins->opcode = OP_ADD_IMM;
10717 ins->sreg1 = var->inst_basereg;
10718 ins->inst_imm = var->inst_offset;
10721 *need_local_opts = TRUE;
10722 spec = INS_INFO (ins->opcode);
10725 if (ins->opcode < MONO_CEE_LAST) {
10726 mono_print_ins (ins);
10727 g_assert_not_reached ();
10731 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10735 if (MONO_IS_STORE_MEMBASE (ins)) {
10736 tmp_reg = ins->dreg;
10737 ins->dreg = ins->sreg2;
10738 ins->sreg2 = tmp_reg;
10741 spec2 [MONO_INST_DEST] = ' ';
10742 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10743 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10744 spec2 [MONO_INST_SRC3] = ' ';
10746 } else if (MONO_IS_STORE_MEMINDEX (ins))
10747 g_assert_not_reached ();
10752 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10753 printf ("\t %.3s %d", spec, ins->dreg);
10754 num_sregs = mono_inst_get_src_registers (ins, sregs);
10755 for (srcindex = 0; srcindex < 3; ++srcindex)
10756 printf (" %d", sregs [srcindex]);
10763 regtype = spec [MONO_INST_DEST];
10764 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10767 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10768 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10769 MonoInst *store_ins;
10771 MonoInst *def_ins = ins;
10772 int dreg = ins->dreg; /* The original vreg */
10774 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10776 if (var->opcode == OP_REGVAR) {
10777 ins->dreg = var->dreg;
10778 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10780 * Instead of emitting a load+store, use a _membase opcode.
10782 g_assert (var->opcode == OP_REGOFFSET);
10783 if (ins->opcode == OP_MOVE) {
10787 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10788 ins->inst_basereg = var->inst_basereg;
10789 ins->inst_offset = var->inst_offset;
10792 spec = INS_INFO (ins->opcode);
10796 g_assert (var->opcode == OP_REGOFFSET);
10798 prev_dreg = ins->dreg;
10800 /* Invalidate any previous lvreg for this vreg */
10801 vreg_to_lvreg [ins->dreg] = 0;
10805 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10807 store_opcode = OP_STOREI8_MEMBASE_REG;
10810 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10812 if (regtype == 'l') {
10813 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10814 mono_bblock_insert_after_ins (bb, ins, store_ins);
10815 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10816 mono_bblock_insert_after_ins (bb, ins, store_ins);
10817 def_ins = store_ins;
10820 g_assert (store_opcode != OP_STOREV_MEMBASE);
10822 /* Try to fuse the store into the instruction itself */
10823 /* FIXME: Add more instructions */
10824 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10825 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10826 ins->inst_imm = ins->inst_c0;
10827 ins->inst_destbasereg = var->inst_basereg;
10828 ins->inst_offset = var->inst_offset;
10829 spec = INS_INFO (ins->opcode);
10830 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10831 ins->opcode = store_opcode;
10832 ins->inst_destbasereg = var->inst_basereg;
10833 ins->inst_offset = var->inst_offset;
10837 tmp_reg = ins->dreg;
10838 ins->dreg = ins->sreg2;
10839 ins->sreg2 = tmp_reg;
10842 spec2 [MONO_INST_DEST] = ' ';
10843 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10844 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10845 spec2 [MONO_INST_SRC3] = ' ';
10847 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10848 // FIXME: The backends expect the base reg to be in inst_basereg
10849 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10851 ins->inst_basereg = var->inst_basereg;
10852 ins->inst_offset = var->inst_offset;
10853 spec = INS_INFO (ins->opcode);
10855 /* printf ("INS: "); mono_print_ins (ins); */
10856 /* Create a store instruction */
10857 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10859 /* Insert it after the instruction */
10860 mono_bblock_insert_after_ins (bb, ins, store_ins);
10862 def_ins = store_ins;
10865 * We can't assign ins->dreg to var->dreg here, since the
10866 * sregs could use it. So set a flag, and do it after
10869 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10870 dest_has_lvreg = TRUE;
10875 if (def_ins && !live_range_start [dreg]) {
10876 live_range_start [dreg] = def_ins;
10877 live_range_start_bb [dreg] = bb;
10884 num_sregs = mono_inst_get_src_registers (ins, sregs);
10885 for (srcindex = 0; srcindex < 3; ++srcindex) {
10886 regtype = spec [MONO_INST_SRC1 + srcindex];
10887 sreg = sregs [srcindex];
10889 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10890 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10891 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10892 MonoInst *use_ins = ins;
10893 MonoInst *load_ins;
10894 guint32 load_opcode;
10896 if (var->opcode == OP_REGVAR) {
10897 sregs [srcindex] = var->dreg;
10898 //mono_inst_set_src_registers (ins, sregs);
10899 live_range_end [sreg] = use_ins;
10900 live_range_end_bb [sreg] = bb;
10904 g_assert (var->opcode == OP_REGOFFSET);
10906 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10908 g_assert (load_opcode != OP_LOADV_MEMBASE);
10910 if (vreg_to_lvreg [sreg]) {
10911 g_assert (vreg_to_lvreg [sreg] != -1);
10913 /* The variable is already loaded to an lvreg */
10914 if (G_UNLIKELY (cfg->verbose_level > 2))
10915 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10916 sregs [srcindex] = vreg_to_lvreg [sreg];
10917 //mono_inst_set_src_registers (ins, sregs);
10921 /* Try to fuse the load into the instruction */
10922 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10923 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10924 sregs [0] = var->inst_basereg;
10925 //mono_inst_set_src_registers (ins, sregs);
10926 ins->inst_offset = var->inst_offset;
10927 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10928 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10929 sregs [1] = var->inst_basereg;
10930 //mono_inst_set_src_registers (ins, sregs);
10931 ins->inst_offset = var->inst_offset;
10933 if (MONO_IS_REAL_MOVE (ins)) {
10934 ins->opcode = OP_NOP;
10937 //printf ("%d ", srcindex); mono_print_ins (ins);
10939 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10941 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10942 if (var->dreg == prev_dreg) {
10944 * sreg refers to the value loaded by the load
10945 * emitted below, but we need to use ins->dreg
10946 * since it refers to the store emitted earlier.
10950 g_assert (sreg != -1);
10951 vreg_to_lvreg [var->dreg] = sreg;
10952 g_assert (lvregs_len < 1024);
10953 lvregs [lvregs_len ++] = var->dreg;
10957 sregs [srcindex] = sreg;
10958 //mono_inst_set_src_registers (ins, sregs);
10960 if (regtype == 'l') {
10961 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10962 mono_bblock_insert_before_ins (bb, ins, load_ins);
10963 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10964 mono_bblock_insert_before_ins (bb, ins, load_ins);
10965 use_ins = load_ins;
10968 #if SIZEOF_REGISTER == 4
10969 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10971 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10972 mono_bblock_insert_before_ins (bb, ins, load_ins);
10973 use_ins = load_ins;
10977 if (var->dreg < orig_next_vreg) {
10978 live_range_end [var->dreg] = use_ins;
10979 live_range_end_bb [var->dreg] = bb;
10983 mono_inst_set_src_registers (ins, sregs);
10985 if (dest_has_lvreg) {
10986 g_assert (ins->dreg != -1);
10987 vreg_to_lvreg [prev_dreg] = ins->dreg;
10988 g_assert (lvregs_len < 1024);
10989 lvregs [lvregs_len ++] = prev_dreg;
10990 dest_has_lvreg = FALSE;
10994 tmp_reg = ins->dreg;
10995 ins->dreg = ins->sreg2;
10996 ins->sreg2 = tmp_reg;
10999 if (MONO_IS_CALL (ins)) {
11000 /* Clear vreg_to_lvreg array */
11001 for (i = 0; i < lvregs_len; i++)
11002 vreg_to_lvreg [lvregs [i]] = 0;
11004 } else if (ins->opcode == OP_NOP) {
11006 MONO_INST_NULLIFY_SREGS (ins);
11009 if (cfg->verbose_level > 2)
11010 mono_print_ins_index (1, ins);
11013 /* Extend the live range based on the liveness info */
11014 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11015 for (i = 0; i < cfg->num_varinfo; i ++) {
11016 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11018 if (vreg_is_volatile (cfg, vi->vreg))
11019 /* The liveness info is incomplete */
11022 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11023 /* Live from at least the first ins of this bb */
11024 live_range_start [vi->vreg] = bb->code;
11025 live_range_start_bb [vi->vreg] = bb;
11028 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11029 /* Live at least until the last ins of this bb */
11030 live_range_end [vi->vreg] = bb->last_ins;
11031 live_range_end_bb [vi->vreg] = bb;
11037 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11039 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11040 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11042 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11043 for (i = 0; i < cfg->num_varinfo; ++i) {
11044 int vreg = MONO_VARINFO (cfg, i)->vreg;
11047 if (live_range_start [vreg]) {
11048 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11050 ins->inst_c1 = vreg;
11051 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11053 if (live_range_end [vreg]) {
11054 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11056 ins->inst_c1 = vreg;
11057 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11058 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11060 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11066 g_free (live_range_start);
11067 g_free (live_range_end);
11068 g_free (live_range_start_bb);
11069 g_free (live_range_end_bb);
11074 * - use 'iadd' instead of 'int_add'
11075 * - handling ovf opcodes: decompose in method_to_ir.
11076 * - unify iregs/fregs
11077 * -> partly done, the missing parts are:
11078 * - a more complete unification would involve unifying the hregs as well, so
11079 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11080 * would no longer map to the machine hregs, so the code generators would need to
11081 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11082 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11083 * fp/non-fp branches speeds it up by about 15%.
11084 * - use sext/zext opcodes instead of shifts
11086 * - get rid of TEMPLOADs if possible and use vregs instead
11087 * - clean up usage of OP_P/OP_ opcodes
11088 * - cleanup usage of DUMMY_USE
11089 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11091 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11092 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11093 * - make sure handle_stack_args () is called before the branch is emitted
11094 * - when the new IR is done, get rid of all unused stuff
11095 * - COMPARE/BEQ as separate instructions or unify them ?
11096 * - keeping them separate allows specialized compare instructions like
11097 * compare_imm, compare_membase
11098 * - most back ends unify fp compare+branch, fp compare+ceq
11099 * - integrate mono_save_args into inline_method
11100 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11101 * - handle long shift opts on 32 bit platforms somehow: they require
11102 * 3 sregs (2 for arg1 and 1 for arg2)
11103 * - make byref a 'normal' type.
11104 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11105 * variable if needed.
11106 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11107 * like inline_method.
11108 * - remove inlining restrictions
11109 * - fix LNEG and enable cfold of INEG
11110 * - generalize x86 optimizations like ldelema as a peephole optimization
11111 * - add store_mem_imm for amd64
11112 * - optimize the loading of the interruption flag in the managed->native wrappers
11113 * - avoid special handling of OP_NOP in passes
11114 * - move code inserting instructions into one function/macro.
11115 * - try a coalescing phase after liveness analysis
11116 * - add float -> vreg conversion + local optimizations on !x86
11117 * - figure out how to handle decomposed branches during optimizations, ie.
11118 * compare+branch, op_jump_table+op_br etc.
11119 * - promote RuntimeXHandles to vregs
11120 * - vtype cleanups:
11121 * - add a NEW_VARLOADA_VREG macro
11122 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11123 * accessing vtype fields.
11124 * - get rid of I8CONST on 64 bit platforms
11125 * - dealing with the increase in code size due to branches created during opcode
11127 * - use extended basic blocks
11128 * - all parts of the JIT
11129 * - handle_global_vregs () && local regalloc
11130 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11131 * - sources of increase in code size:
11134 * - isinst and castclass
11135 * - lvregs not allocated to global registers even if used multiple times
11136 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11138 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11139 * - add all micro optimizations from the old JIT
11140 * - put tree optimizations into the deadce pass
11141 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11142 * specific function.
11143 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11144 * fcompare + branchCC.
11145 * - create a helper function for allocating a stack slot, taking into account
11146 * MONO_CFG_HAS_SPILLUP.
11148 * - merge the ia64 switch changes.
11149 * - optimize mono_regstate2_alloc_int/float.
11150 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11151 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11152 * parts of the tree could be separated by other instructions, killing the tree
11153 * arguments, or stores killing loads etc. Also, should we fold loads into other
11154 * instructions if the result of the load is used multiple times ?
11155 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11156 * - LAST MERGE: 108395.
11157 * - when returning vtypes in registers, generate IR and append it to the end of the
11158 * last bb instead of doing it in the epilog.
11159 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11167 - When to decompose opcodes:
11168 - earlier: this makes some optimizations hard to implement, since the low level IR
11169 no longer contains the neccessary information. But it is easier to do.
11170 - later: harder to implement, enables more optimizations.
11171 - Branches inside bblocks:
11172 - created when decomposing complex opcodes.
11173 - branches to another bblock: harmless, but not tracked by the branch
11174 optimizations, so need to branch to a label at the start of the bblock.
11175 - branches to inside the same bblock: very problematic, trips up the local
11176 reg allocator. Can be fixed by spitting the current bblock, but that is a
11177 complex operation, since some local vregs can become global vregs etc.
11178 - Local/global vregs:
11179 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11180 local register allocator.
11181 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11182 structure, created by mono_create_var (). Assigned to hregs or the stack by
11183 the global register allocator.
11184 - When to do optimizations like alu->alu_imm:
11185 - earlier -> saves work later on since the IR will be smaller/simpler
11186 - later -> can work on more instructions
11187 - Handling of valuetypes:
11188 - When a vtype is pushed on the stack, a new temporary is created, an
11189 instruction computing its address (LDADDR) is emitted and pushed on
11190 the stack. Need to optimize cases when the vtype is used immediately as in
11191 argument passing, stloc etc.
11192 - Instead of the to_end stuff in the old JIT, simply call the function handling
11193 the values on the stack before emitting the last instruction of the bb.
11196 #endif /* DISABLE_JIT */