2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
511 res = g_list_append (res, handler);
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
567 inst->klass = klass = mono_class_from_mono_type (type);
569 inst->type = STACK_MP;
574 switch (type->type) {
576 inst->type = STACK_INV;
580 case MONO_TYPE_BOOLEAN:
586 inst->type = STACK_I4;
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
603 inst->type = STACK_I8;
607 inst->type = STACK_R8;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
615 inst->type = STACK_VTYPE;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
789 ins->opcode = OP_ICOMPARE;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
806 ins->opcode += beqops_op_map [src1->type];
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
839 ins->type = STACK_R8;
840 switch (src1->type) {
843 ins->opcode = OP_ICONV_TO_R_UN;
846 ins->opcode = OP_LCONV_TO_R_UN;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
874 ins->type = STACK_PTR;
875 switch (src1->type) {
877 ins->opcode = OP_ICONV_TO_U;
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
884 ins->opcode = OP_MOVE;
888 ins->opcode = OP_LCONV_TO_U;
891 ins->opcode = OP_FCONV_TO_U;
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
916 ins->type = STACK_R8;
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
975 param_table [STACK_MAX] [STACK_MAX] = {
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
984 switch (args->type) {
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
999 if (!sig->params [i]->byref)
1003 if (sig->params [i]->byref)
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1017 if (sig->params [i]->byref)
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1105 case MONO_TYPE_BOOLEAN:
1108 case MONO_TYPE_CHAR:
1115 case MONO_TYPE_FNPTR:
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1139 g_assert_not_reached ();
1146 array_access_to_klass (int opcode)
1150 return mono_defaults.byte_class;
1152 return mono_defaults.uint16_class;
1155 return mono_defaults.int_class;
1158 return mono_defaults.sbyte_class;
1161 return mono_defaults.int16_class;
1164 return mono_defaults.int32_class;
1166 return mono_defaults.uint32_class;
1169 return mono_defaults.int64_class;
1172 return mono_defaults.single_class;
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1180 g_assert_not_reached ();
1186 * We try to share variables when possible
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1271 bb->out_stack = outb->in_stack;
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1334 /* Find a bblock which has a different in_stack */
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1489 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1504 } else if (cfg->compile_aot) {
1505 int const_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1515 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1521 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1523 int intf_reg = alloc_preg (cfg);
1525 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1526 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1531 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1535 * Variant of the above that takes a register to the class, not the vtable.
1538 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int intf_bit_reg = alloc_preg (cfg);
1542 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1543 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1552 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1556 } else if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1567 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1569 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1573 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1575 if (cfg->compile_aot) {
1576 int const_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1586 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1589 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1592 int rank_reg = alloc_preg (cfg);
1593 int eclass_reg = alloc_preg (cfg);
1595 g_assert (!klass_inst);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1601 if (klass->cast_class == mono_defaults.object_class) {
1602 int parent_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1604 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1607 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class == mono_defaults.enum_class) {
1610 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1611 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1612 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1614 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1615 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1618 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1619 /* Check that the object is a vector too */
1620 int bounds_reg = alloc_preg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1626 int idepth_reg = alloc_preg (cfg);
1627 int stypes_reg = alloc_preg (cfg);
1628 int stype = alloc_preg (cfg);
1630 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1631 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1637 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1644 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1648 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1652 g_assert (val == 0);
1657 if ((size <= 4) && (size <= align)) {
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1666 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1668 #if SIZEOF_REGISTER == 8
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1676 val_reg = alloc_preg (cfg);
1678 if (SIZEOF_REGISTER == 8)
1679 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1681 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1684 /* This could be optimized further if neccesary */
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1693 #if !NO_UNALIGNED_ACCESS
1694 if (SIZEOF_REGISTER == 8) {
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1725 #endif /* DISABLE_JIT */
1728 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1735 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1736 g_assert (size < 10000);
1739 /* This could be optimized further if neccesary */
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1750 #if !NO_UNALIGNED_ACCESS
1751 if (SIZEOF_REGISTER == 8) {
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1792 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1798 type = mini_get_basic_type_from_generic (gsctx, type);
1799 switch (type->type) {
1800 case MONO_TYPE_VOID:
1801 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1804 case MONO_TYPE_BOOLEAN:
1807 case MONO_TYPE_CHAR:
1810 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1814 case MONO_TYPE_FNPTR:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1816 case MONO_TYPE_CLASS:
1817 case MONO_TYPE_STRING:
1818 case MONO_TYPE_OBJECT:
1819 case MONO_TYPE_SZARRAY:
1820 case MONO_TYPE_ARRAY:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1824 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1827 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1828 case MONO_TYPE_VALUETYPE:
1829 if (type->data.klass->enumtype) {
1830 type = mono_class_enum_basetype (type->data.klass);
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_TYPEDBYREF:
1835 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1836 case MONO_TYPE_GENERICINST:
1837 type = &type->data.generic_class->container_class->byval_arg;
1840 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1854 * Returns: non-0 value if arg can't be stored on a target.
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP)
1865 return arg->klass != mono_class_from_mono_type (target);
1866 if (arg->type == STACK_PTR)
1871 simple_type = mono_type_get_underlying_type (target);
1872 switch (simple_type->type) {
1873 case MONO_TYPE_VOID:
1877 case MONO_TYPE_BOOLEAN:
1880 case MONO_TYPE_CHAR:
1883 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1887 /* STACK_MP is needed when setting pinned locals */
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1893 case MONO_TYPE_FNPTR:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1897 case MONO_TYPE_CLASS:
1898 case MONO_TYPE_STRING:
1899 case MONO_TYPE_OBJECT:
1900 case MONO_TYPE_SZARRAY:
1901 case MONO_TYPE_ARRAY:
1902 if (arg->type != STACK_OBJ)
1904 /* FIXME: check type compatibility */
1908 if (arg->type != STACK_I8)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1934 klass = mono_class_from_mono_type (simple_type);
1935 if (klass != arg->klass)
1939 if (arg->type != STACK_OBJ)
1941 /* FIXME: check type compatibility */
1945 case MONO_TYPE_MVAR:
1946 /* FIXME: all the arguments must be references for now,
1947 * later look inside cfg and see if the arg num is
1948 * really a reference
1950 g_assert (cfg->generic_sharing_context);
1951 if (arg->type != STACK_OBJ)
1955 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1961 * Prepare arguments for passing to a function call.
1962 * Return a non-zero value if the arguments can't be passed to the given
1964 * The type checks are not yet complete and some conversions may need
1965 * casts on 32 or 64 bit architectures.
1967 * FIXME: implement this using target_type_is_incompatible ()
1970 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1972 MonoType *simple_type;
1976 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1980 for (i = 0; i < sig->param_count; ++i) {
1981 if (sig->params [i]->byref) {
1982 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1986 simple_type = sig->params [i];
1987 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1989 switch (simple_type->type) {
1990 case MONO_TYPE_VOID:
1995 case MONO_TYPE_BOOLEAN:
1998 case MONO_TYPE_CHAR:
2001 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2007 case MONO_TYPE_FNPTR:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2011 case MONO_TYPE_CLASS:
2012 case MONO_TYPE_STRING:
2013 case MONO_TYPE_OBJECT:
2014 case MONO_TYPE_SZARRAY:
2015 case MONO_TYPE_ARRAY:
2016 if (args [i]->type != STACK_OBJ)
2021 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != STACK_R8)
2029 case MONO_TYPE_VALUETYPE:
2030 if (simple_type->data.klass->enumtype) {
2031 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2034 if (args [i]->type != STACK_VTYPE)
2037 case MONO_TYPE_TYPEDBYREF:
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_GENERICINST:
2042 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2046 g_error ("unknown type 0x%02x in check_call_signature",
2054 callvirt_to_call (int opcode)
2059 case OP_VOIDCALLVIRT:
2068 g_assert_not_reached ();
2075 callvirt_to_call_membase (int opcode)
2079 return OP_CALL_MEMBASE;
2080 case OP_VOIDCALLVIRT:
2081 return OP_VOIDCALL_MEMBASE;
2083 return OP_FCALL_MEMBASE;
2085 return OP_LCALL_MEMBASE;
2087 return OP_VCALL_MEMBASE;
2089 g_assert_not_reached ();
2095 #ifdef MONO_ARCH_HAVE_IMT
2097 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2099 #ifdef MONO_ARCH_IMT_REG
2100 int method_reg = alloc_preg (cfg);
2103 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2104 } else if (cfg->compile_aot) {
2105 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2108 MONO_INST_NEW (cfg, ins, OP_PCONST);
2109 ins->inst_p0 = call->method;
2110 ins->dreg = method_reg;
2111 MONO_ADD_INS (cfg->cbb, ins);
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2116 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2121 static MonoJumpInfo *
2122 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2124 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2128 ji->data.target = target;
2133 inline static MonoCallInst *
2134 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2135 MonoInst **args, int calli, int virtual, int tail)
2138 #ifdef MONO_ARCH_SOFT_FLOAT
2143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2145 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2148 call->signature = sig;
2150 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2153 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2154 call->vret_var = cfg->vret_addr;
2155 //g_assert_not_reached ();
2157 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2158 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2161 temp->backend.is_pinvoke = sig->pinvoke;
2164 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2165 * address of return value to increase optimization opportunities.
2166 * Before vtype decomposition, the dreg of the call ins itself represents the
2167 * fact the call modifies the return value. After decomposition, the call will
2168 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2169 * will be transformed into an LDADDR.
2171 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2172 loada->dreg = alloc_preg (cfg);
2173 loada->inst_p0 = temp;
2174 /* We reference the call too since call->dreg could change during optimization */
2175 loada->inst_p1 = call;
2176 MONO_ADD_INS (cfg->cbb, loada);
2178 call->inst.dreg = temp->dreg;
2180 call->vret_var = loada;
2181 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2182 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2185 if (COMPILE_SOFT_FLOAT (cfg)) {
2187 * If the call has a float argument, we would need to do an r8->r4 conversion using
2188 * an icall, but that cannot be done during the call sequence since it would clobber
2189 * the call registers + the stack. So we do it before emitting the call.
2191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2193 MonoInst *in = call->args [i];
2195 if (i >= sig->hasthis)
2196 t = sig->params [i - sig->hasthis];
2198 t = &mono_defaults.int_class->byval_arg;
2199 t = mono_type_get_underlying_type (t);
2201 if (!t->byref && t->type == MONO_TYPE_R4) {
2202 MonoInst *iargs [1];
2206 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2208 /* The result will be in an int vreg */
2209 call->args [i] = conv;
2216 if (COMPILE_LLVM (cfg))
2217 mono_llvm_emit_call (cfg, call);
2219 mono_arch_emit_call (cfg, call);
2221 mono_arch_emit_call (cfg, call);
2224 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2225 cfg->flags |= MONO_CFG_HAS_CALLS;
2230 inline static MonoInst*
2231 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2233 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2235 call->inst.sreg1 = addr->dreg;
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2242 inline static MonoInst*
2243 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2245 #ifdef MONO_ARCH_RGCTX_REG
2250 rgctx_reg = mono_alloc_preg (cfg);
2251 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2253 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2255 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2256 cfg->uses_rgctx_reg = TRUE;
2257 call->rgctx_reg = TRUE;
2259 return (MonoInst*)call;
2261 g_assert_not_reached ();
2267 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2269 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2272 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2273 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2275 gboolean might_be_remote;
2276 gboolean virtual = this != NULL;
2277 gboolean enable_for_aot = TRUE;
2281 if (method->string_ctor) {
2282 /* Create the real signature */
2283 /* FIXME: Cache these */
2284 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2285 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2290 might_be_remote = this && sig->hasthis &&
2291 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2292 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2294 context_used = mono_method_check_context_used (method);
2295 if (might_be_remote && context_used) {
2298 g_assert (cfg->generic_sharing_context);
2300 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2302 return mono_emit_calli (cfg, sig, args, addr);
2305 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2307 if (might_be_remote)
2308 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2310 call->method = method;
2311 call->inst.flags |= MONO_INST_HAS_METHOD;
2312 call->inst.inst_left = this;
2315 int vtable_reg, slot_reg, this_reg;
2317 this_reg = this->dreg;
2319 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2320 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2321 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2323 /* Make a call to delegate->invoke_impl */
2324 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2325 call->inst.inst_basereg = this_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2327 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2329 return (MonoInst*)call;
2333 if ((!cfg->compile_aot || enable_for_aot) &&
2334 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2335 (MONO_METHOD_IS_FINAL (method) &&
2336 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2337 !(method->klass->marshalbyref && context_used)) {
2339 * the method is not virtual, we just need to ensure this is not null
2340 * and then we can call the method directly.
2342 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2344 * The check above ensures method is not gshared, this is needed since
2345 * gshared methods can't have wrappers.
2347 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2350 if (!method->string_ctor)
2351 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2353 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2360 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2362 * the method is virtual, but we can statically dispatch since either
2363 * it's class or the method itself are sealed.
2364 * But first we need to ensure it's not a null reference.
2366 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2368 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2374 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2376 vtable_reg = alloc_preg (cfg);
2377 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2378 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2380 #ifdef MONO_ARCH_HAVE_IMT
2382 guint32 imt_slot = mono_method_get_imt_slot (method);
2383 emit_imt_argument (cfg, call, imt_arg);
2384 slot_reg = vtable_reg;
2385 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2388 if (slot_reg == -1) {
2389 slot_reg = alloc_preg (cfg);
2390 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2391 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2394 slot_reg = vtable_reg;
2395 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2396 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2397 #ifdef MONO_ARCH_HAVE_IMT
2399 g_assert (mono_method_signature (method)->generic_param_count);
2400 emit_imt_argument (cfg, call, imt_arg);
2405 call->inst.sreg1 = slot_reg;
2406 call->virtual = TRUE;
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2415 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2416 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2418 #ifdef MONO_ARCH_RGCTX_REG
2425 #ifdef MONO_ARCH_RGCTX_REG
2426 rgctx_reg = mono_alloc_preg (cfg);
2427 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2432 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2434 call = (MonoCallInst*)ins;
2436 #ifdef MONO_ARCH_RGCTX_REG
2437 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2438 cfg->uses_rgctx_reg = TRUE;
2439 call->rgctx_reg = TRUE;
2449 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2451 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2455 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2462 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2465 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2467 return (MonoInst*)call;
2471 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2473 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2477 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2481 * mono_emit_abs_call:
2483 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2485 inline static MonoInst*
2486 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2487 MonoMethodSignature *sig, MonoInst **args)
2489 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2493 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2496 if (cfg->abs_patches == NULL)
2497 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2498 g_hash_table_insert (cfg->abs_patches, ji, ji);
2499 ins = mono_emit_native_call (cfg, ji, sig, args);
2500 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2505 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2508 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2512 * Native code might return non register sized integers
2513 * without initializing the upper bits.
2515 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2516 case OP_LOADI1_MEMBASE:
2517 widen_op = OP_ICONV_TO_I1;
2519 case OP_LOADU1_MEMBASE:
2520 widen_op = OP_ICONV_TO_U1;
2522 case OP_LOADI2_MEMBASE:
2523 widen_op = OP_ICONV_TO_I2;
2525 case OP_LOADU2_MEMBASE:
2526 widen_op = OP_ICONV_TO_U2;
2532 if (widen_op != -1) {
2533 int dreg = alloc_preg (cfg);
2536 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2537 widen->type = ins->type;
2547 get_memcpy_method (void)
2549 static MonoMethod *memcpy_method = NULL;
2550 if (!memcpy_method) {
2551 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2553 g_error ("Old corlib found. Install a new one");
2555 return memcpy_method;
2559 * Emit code to copy a valuetype of type @klass whose address is stored in
2560 * @src->dreg to memory whose address is stored at @dest->dreg.
2563 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2565 MonoInst *iargs [3];
2568 MonoMethod *memcpy_method;
2572 * This check breaks with spilled vars... need to handle it during verification anyway.
2573 * g_assert (klass && klass == src->klass && klass == dest->klass);
2577 n = mono_class_native_size (klass, &align);
2579 n = mono_class_value_size (klass, &align);
2581 #if HAVE_WRITE_BARRIERS
2582 /* if native is true there should be no references in the struct */
2583 if (klass->has_references && !native) {
2584 /* Avoid barriers when storing to the stack */
2585 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2586 (dest->opcode == OP_LDADDR))) {
2587 int context_used = 0;
2592 if (cfg->generic_sharing_context)
2593 context_used = mono_class_check_context_used (klass);
2595 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2597 if (cfg->compile_aot) {
2598 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2600 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2601 mono_class_compute_gc_descriptor (klass);
2605 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2610 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2611 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2612 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2616 EMIT_NEW_ICONST (cfg, iargs [2], n);
2618 memcpy_method = get_memcpy_method ();
2619 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2624 get_memset_method (void)
2626 static MonoMethod *memset_method = NULL;
2627 if (!memset_method) {
2628 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2630 g_error ("Old corlib found. Install a new one");
2632 return memset_method;
2636 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2638 MonoInst *iargs [3];
2641 MonoMethod *memset_method;
2643 /* FIXME: Optimize this for the case when dest is an LDADDR */
2645 mono_class_init (klass);
2646 n = mono_class_value_size (klass, &align);
2648 if (n <= sizeof (gpointer) * 5) {
2649 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2652 memset_method = get_memset_method ();
2654 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2655 EMIT_NEW_ICONST (cfg, iargs [2], n);
2656 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2661 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2663 MonoInst *this = NULL;
2665 g_assert (cfg->generic_sharing_context);
2667 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2668 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2669 !method->klass->valuetype)
2670 EMIT_NEW_ARGLOAD (cfg, this, 0);
2672 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2673 MonoInst *mrgctx_loc, *mrgctx_var;
2676 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2678 mrgctx_loc = mono_get_vtable_var (cfg);
2679 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2682 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2683 MonoInst *vtable_loc, *vtable_var;
2687 vtable_loc = mono_get_vtable_var (cfg);
2688 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2690 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2691 MonoInst *mrgctx_var = vtable_var;
2694 vtable_reg = alloc_preg (cfg);
2695 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2696 vtable_var->type = STACK_PTR;
2702 int vtable_reg, res_reg;
2704 vtable_reg = alloc_preg (cfg);
2705 res_reg = alloc_preg (cfg);
2706 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2711 static MonoJumpInfoRgctxEntry *
2712 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2714 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2715 res->method = method;
2716 res->in_mrgctx = in_mrgctx;
2717 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2718 res->data->type = patch_type;
2719 res->data->data.target = patch_data;
2720 res->info_type = info_type;
2725 static inline MonoInst*
2726 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2728 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2732 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2733 MonoClass *klass, int rgctx_type)
2735 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2736 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2738 return emit_rgctx_fetch (cfg, rgctx, entry);
2742 * emit_get_rgctx_method:
2744 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2745 * normal constants, else emit a load from the rgctx.
2748 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2749 MonoMethod *cmethod, int rgctx_type)
2751 if (!context_used) {
2754 switch (rgctx_type) {
2755 case MONO_RGCTX_INFO_METHOD:
2756 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2758 case MONO_RGCTX_INFO_METHOD_RGCTX:
2759 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2762 g_assert_not_reached ();
2765 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2766 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2768 return emit_rgctx_fetch (cfg, rgctx, entry);
2773 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2774 MonoClassField *field, int rgctx_type)
2776 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2779 return emit_rgctx_fetch (cfg, rgctx, entry);
2783 * On return the caller must check @klass for load errors.
2786 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2788 MonoInst *vtable_arg;
2790 int context_used = 0;
2792 if (cfg->generic_sharing_context)
2793 context_used = mono_class_check_context_used (klass);
2796 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2797 klass, MONO_RGCTX_INFO_VTABLE);
2799 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2803 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2806 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2807 #ifdef MONO_ARCH_VTABLE_REG
2808 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2809 cfg->uses_vtable_reg = TRUE;
2816 * On return the caller must check @array_class for load errors
2819 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2821 int vtable_reg = alloc_preg (cfg);
2822 int context_used = 0;
2824 if (cfg->generic_sharing_context)
2825 context_used = mono_class_check_context_used (array_class);
2827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2829 if (cfg->opt & MONO_OPT_SHARED) {
2830 int class_reg = alloc_preg (cfg);
2831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2832 if (cfg->compile_aot) {
2833 int klass_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2839 } else if (context_used) {
2840 MonoInst *vtable_ins;
2842 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2845 if (cfg->compile_aot) {
2849 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2851 vt_reg = alloc_preg (cfg);
2852 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2853 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2856 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2858 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2862 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2866 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2868 if (mini_get_debug_options ()->better_cast_details) {
2869 int to_klass_reg = alloc_preg (cfg);
2870 int vtable_reg = alloc_preg (cfg);
2871 int klass_reg = alloc_preg (cfg);
2872 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2875 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2879 MONO_ADD_INS (cfg->cbb, tls_get);
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2884 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2890 reset_cast_details (MonoCompile *cfg)
2892 /* Reset the variables holding the cast details */
2893 if (mini_get_debug_options ()->better_cast_details) {
2894 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2896 MONO_ADD_INS (cfg->cbb, tls_get);
2897 /* It is enough to reset the from field */
2898 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2903 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2904 * generic code is generated.
2907 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2909 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2912 MonoInst *rgctx, *addr;
2914 /* FIXME: What if the class is shared? We might not
2915 have to get the address of the method from the
2917 addr = emit_get_rgctx_method (cfg, context_used, method,
2918 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2920 rgctx = emit_get_rgctx (cfg, method, context_used);
2922 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2924 return mono_emit_method_call (cfg, method, &val, NULL);
2929 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2933 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2934 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2935 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2936 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2938 obj_reg = sp [0]->dreg;
2939 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2942 /* FIXME: generics */
2943 g_assert (klass->rank == 0);
2946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2947 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2953 MonoInst *element_class;
2955 /* This assertion is from the unboxcast insn */
2956 g_assert (klass->rank == 0);
2958 element_class = emit_get_rgctx_klass (cfg, context_used,
2959 klass->element_class, MONO_RGCTX_INFO_KLASS);
2961 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2962 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2964 save_cast_details (cfg, klass->element_class, obj_reg);
2965 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2966 reset_cast_details (cfg);
2969 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2970 MONO_ADD_INS (cfg->cbb, add);
2971 add->type = STACK_MP;
2978 * Returns NULL and set the cfg exception on error.
2981 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2983 MonoInst *iargs [2];
2986 if (cfg->opt & MONO_OPT_SHARED) {
2987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2988 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2990 alloc_ftn = mono_object_new;
2991 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2992 /* This happens often in argument checking code, eg. throw new FooException... */
2993 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2994 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2995 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2997 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2998 MonoMethod *managed_alloc = NULL;
3002 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3003 cfg->exception_ptr = klass;
3007 #ifndef MONO_CROSS_COMPILE
3008 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3011 if (managed_alloc) {
3012 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3013 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3015 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3017 guint32 lw = vtable->klass->instance_size;
3018 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3019 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3020 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3023 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3027 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3031 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3034 MonoInst *iargs [2];
3035 MonoMethod *managed_alloc = NULL;
3039 FIXME: we cannot get managed_alloc here because we can't get
3040 the class's vtable (because it's not a closed class)
3042 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3043 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3046 if (cfg->opt & MONO_OPT_SHARED) {
3047 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3048 iargs [1] = data_inst;
3049 alloc_ftn = mono_object_new;
3051 if (managed_alloc) {
3052 iargs [0] = data_inst;
3053 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3056 iargs [0] = data_inst;
3057 alloc_ftn = mono_object_new_specific;
3060 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3064 * Returns NULL and set the cfg exception on error.
3067 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3069 MonoInst *alloc, *ins;
3071 if (mono_class_is_nullable (klass)) {
3072 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3073 return mono_emit_method_call (cfg, method, &val, NULL);
3076 alloc = handle_alloc (cfg, klass, TRUE);
3080 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3086 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3088 MonoInst *alloc, *ins;
3090 if (mono_class_is_nullable (klass)) {
3091 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3092 /* FIXME: What if the class is shared? We might not
3093 have to get the method address from the RGCTX. */
3094 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3096 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3098 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3100 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3102 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3108 // FIXME: This doesn't work yet (class libs tests fail?)
3109 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3112 * Returns NULL and set the cfg exception on error.
3115 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3117 MonoBasicBlock *is_null_bb;
3118 int obj_reg = src->dreg;
3119 int vtable_reg = alloc_preg (cfg);
3120 MonoInst *klass_inst = NULL;
3125 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3126 klass, MONO_RGCTX_INFO_KLASS);
3128 if (is_complex_isinst (klass)) {
3129 /* Complex case, handle by an icall */
3135 args [1] = klass_inst;
3137 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3139 /* Simple case, handled by the code below */
3143 NEW_BBLOCK (cfg, is_null_bb);
3145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3148 save_cast_details (cfg, klass, obj_reg);
3150 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3154 int klass_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3158 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3159 /* the remoting code is broken, access the class for now */
3160 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3161 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3163 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3164 cfg->exception_ptr = klass;
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3172 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3175 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3179 MONO_START_BB (cfg, is_null_bb);
3181 reset_cast_details (cfg);
3187 * Returns NULL and set the cfg exception on error.
3190 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3193 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3194 int obj_reg = src->dreg;
3195 int vtable_reg = alloc_preg (cfg);
3196 int res_reg = alloc_preg (cfg);
3197 MonoInst *klass_inst = NULL;
3200 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3202 if (is_complex_isinst (klass)) {
3205 /* Complex case, handle by an icall */
3211 args [1] = klass_inst;
3213 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3215 /* Simple case, the code below can handle it */
3219 NEW_BBLOCK (cfg, is_null_bb);
3220 NEW_BBLOCK (cfg, false_bb);
3221 NEW_BBLOCK (cfg, end_bb);
3223 /* Do the assignment at the beginning, so the other assignment can be if converted */
3224 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3225 ins->type = STACK_OBJ;
3228 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3231 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3233 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3234 g_assert (!context_used);
3235 /* the is_null_bb target simply copies the input register to the output */
3236 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3238 int klass_reg = alloc_preg (cfg);
3241 int rank_reg = alloc_preg (cfg);
3242 int eclass_reg = alloc_preg (cfg);
3244 g_assert (!context_used);
3245 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3250 if (klass->cast_class == mono_defaults.object_class) {
3251 int parent_reg = alloc_preg (cfg);
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3253 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3254 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3255 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3256 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3257 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3258 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3259 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3260 } else if (klass->cast_class == mono_defaults.enum_class) {
3261 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3263 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3264 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3266 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3267 /* Check that the object is a vector too */
3268 int bounds_reg = alloc_preg (cfg);
3269 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3271 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3277 } else if (mono_class_is_nullable (klass)) {
3278 g_assert (!context_used);
3279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3280 /* the is_null_bb target simply copies the input register to the output */
3281 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3283 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3284 g_assert (!context_used);
3285 /* the remoting code is broken, access the class for now */
3286 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3287 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3289 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3290 cfg->exception_ptr = klass;
3293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3302 /* the is_null_bb target simply copies the input register to the output */
3303 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3308 MONO_START_BB (cfg, false_bb);
3310 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3313 MONO_START_BB (cfg, is_null_bb);
3315 MONO_START_BB (cfg, end_bb);
3321 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3323 /* This opcode takes as input an object reference and a class, and returns:
3324 0) if the object is an instance of the class,
3325 1) if the object is not instance of the class,
3326 2) if the object is a proxy whose type cannot be determined */
3329 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3330 int obj_reg = src->dreg;
3331 int dreg = alloc_ireg (cfg);
3333 int klass_reg = alloc_preg (cfg);
3335 NEW_BBLOCK (cfg, true_bb);
3336 NEW_BBLOCK (cfg, false_bb);
3337 NEW_BBLOCK (cfg, false2_bb);
3338 NEW_BBLOCK (cfg, end_bb);
3339 NEW_BBLOCK (cfg, no_proxy_bb);
3341 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3344 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3345 NEW_BBLOCK (cfg, interface_fail_bb);
3347 tmp_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3349 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3350 MONO_START_BB (cfg, interface_fail_bb);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3353 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3355 tmp_reg = alloc_preg (cfg);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3360 tmp_reg = alloc_preg (cfg);
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3364 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3365 tmp_reg = alloc_preg (cfg);
3366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3369 tmp_reg = alloc_preg (cfg);
3370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3371 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3372 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3374 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3377 MONO_START_BB (cfg, no_proxy_bb);
3379 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3382 MONO_START_BB (cfg, false_bb);
3384 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3387 MONO_START_BB (cfg, false2_bb);
3389 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3390 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3392 MONO_START_BB (cfg, true_bb);
3394 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3396 MONO_START_BB (cfg, end_bb);
3399 MONO_INST_NEW (cfg, ins, OP_ICONST);
3401 ins->type = STACK_I4;
3407 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3409 /* This opcode takes as input an object reference and a class, and returns:
3410 0) if the object is an instance of the class,
3411 1) if the object is a proxy whose type cannot be determined
3412 an InvalidCastException exception is thrown otherwhise*/
3415 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3416 int obj_reg = src->dreg;
3417 int dreg = alloc_ireg (cfg);
3418 int tmp_reg = alloc_preg (cfg);
3419 int klass_reg = alloc_preg (cfg);
3421 NEW_BBLOCK (cfg, end_bb);
3422 NEW_BBLOCK (cfg, ok_result_bb);
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3427 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3428 NEW_BBLOCK (cfg, interface_fail_bb);
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3431 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3432 MONO_START_BB (cfg, interface_fail_bb);
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3435 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3437 tmp_reg = alloc_preg (cfg);
3438 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3440 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3442 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3446 NEW_BBLOCK (cfg, no_proxy_bb);
3448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3450 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3456 tmp_reg = alloc_preg (cfg);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3458 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3461 NEW_BBLOCK (cfg, fail_1_bb);
3463 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3465 MONO_START_BB (cfg, fail_1_bb);
3467 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3470 MONO_START_BB (cfg, no_proxy_bb);
3472 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3475 MONO_START_BB (cfg, ok_result_bb);
3477 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3479 MONO_START_BB (cfg, end_bb);
3482 MONO_INST_NEW (cfg, ins, OP_ICONST);
3484 ins->type = STACK_I4;
3490 * Returns NULL and set the cfg exception on error.
3492 static G_GNUC_UNUSED MonoInst*
3493 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3495 gpointer *trampoline;
3496 MonoInst *obj, *method_ins, *tramp_ins;
3500 obj = handle_alloc (cfg, klass, FALSE);
3504 /* Inline the contents of mono_delegate_ctor */
3506 /* Set target field */
3507 /* Optimize away setting of NULL target */
3508 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3511 /* Set method field */
3512 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3513 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3516 * To avoid looking up the compiled code belonging to the target method
3517 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3518 * store it, and we fill it after the method has been compiled.
3520 if (!cfg->compile_aot && !method->dynamic) {
3521 MonoInst *code_slot_ins;
3524 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3526 domain = mono_domain_get ();
3527 mono_domain_lock (domain);
3528 if (!domain_jit_info (domain)->method_code_hash)
3529 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3530 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3532 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3533 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3535 mono_domain_unlock (domain);
3537 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3539 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3542 /* Set invoke_impl field */
3543 if (cfg->compile_aot) {
3544 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3546 trampoline = mono_create_delegate_trampoline (klass);
3547 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3551 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3557 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3559 MonoJitICallInfo *info;
3561 /* Need to register the icall so it gets an icall wrapper */
3562 info = mono_get_array_new_va_icall (rank);
3564 cfg->flags |= MONO_CFG_HAS_VARARGS;
3566 /* mono_array_new_va () needs a vararg calling convention */
3567 cfg->disable_llvm = TRUE;
3569 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3570 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3574 mono_emit_load_got_addr (MonoCompile *cfg)
3576 MonoInst *getaddr, *dummy_use;
3578 if (!cfg->got_var || cfg->got_var_allocated)
3581 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3582 getaddr->dreg = cfg->got_var->dreg;
3584 /* Add it to the start of the first bblock */
3585 if (cfg->bb_entry->code) {
3586 getaddr->next = cfg->bb_entry->code;
3587 cfg->bb_entry->code = getaddr;
3590 MONO_ADD_INS (cfg->bb_entry, getaddr);
3592 cfg->got_var_allocated = TRUE;
3595 * Add a dummy use to keep the got_var alive, since real uses might
3596 * only be generated by the back ends.
3597 * Add it to end_bblock, so the variable's lifetime covers the whole
3599 * It would be better to make the usage of the got var explicit in all
3600 * cases when the backend needs it (i.e. calls, throw etc.), so this
3601 * wouldn't be needed.
3603 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3604 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3607 static int inline_limit;
3608 static gboolean inline_limit_inited;
3611 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3613 MonoMethodHeaderSummary header;
3615 #ifdef MONO_ARCH_SOFT_FLOAT
3616 MonoMethodSignature *sig = mono_method_signature (method);
3620 if (cfg->generic_sharing_context)
3623 if (cfg->inline_depth > 10)
3626 #ifdef MONO_ARCH_HAVE_LMF_OPS
3627 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3628 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3629 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3634 if (!mono_method_get_header_summary (method, &header))
3637 /*runtime, icall and pinvoke are checked by summary call*/
3638 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3639 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3640 (method->klass->marshalbyref) ||
3644 /* also consider num_locals? */
3645 /* Do the size check early to avoid creating vtables */
3646 if (!inline_limit_inited) {
3647 if (getenv ("MONO_INLINELIMIT"))
3648 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3650 inline_limit = INLINE_LENGTH_LIMIT;
3651 inline_limit_inited = TRUE;
3653 if (header.code_size >= inline_limit)
3657 * if we can initialize the class of the method right away, we do,
3658 * otherwise we don't allow inlining if the class needs initialization,
3659 * since it would mean inserting a call to mono_runtime_class_init()
3660 * inside the inlined code
3662 if (!(cfg->opt & MONO_OPT_SHARED)) {
3663 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3664 if (cfg->run_cctors && method->klass->has_cctor) {
3665 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3666 if (!method->klass->runtime_info)
3667 /* No vtable created yet */
3669 vtable = mono_class_vtable (cfg->domain, method->klass);
3672 /* This makes so that inline cannot trigger */
3673 /* .cctors: too many apps depend on them */
3674 /* running with a specific order... */
3675 if (! vtable->initialized)
3677 mono_runtime_class_init (vtable);
3679 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3680 if (!method->klass->runtime_info)
3681 /* No vtable created yet */
3683 vtable = mono_class_vtable (cfg->domain, method->klass);
3686 if (!vtable->initialized)
3691 * If we're compiling for shared code
3692 * the cctor will need to be run at aot method load time, for example,
3693 * or at the end of the compilation of the inlining method.
3695 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3700 * CAS - do not inline methods with declarative security
3701 * Note: this has to be before any possible return TRUE;
3703 if (mono_method_has_declsec (method))
3706 #ifdef MONO_ARCH_SOFT_FLOAT
3708 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3710 for (i = 0; i < sig->param_count; ++i)
3711 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3719 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3721 if (vtable->initialized && !cfg->compile_aot)
3724 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3727 if (!mono_class_needs_cctor_run (vtable->klass, method))
3730 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3731 /* The initialization is already done before the method is called */
3738 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3742 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3744 mono_class_init (klass);
3745 size = mono_class_array_element_size (klass);
3747 mult_reg = alloc_preg (cfg);
3748 array_reg = arr->dreg;
3749 index_reg = index->dreg;
3751 #if SIZEOF_REGISTER == 8
3752 /* The array reg is 64 bits but the index reg is only 32 */
3753 if (COMPILE_LLVM (cfg)) {
3755 index2_reg = index_reg;
3757 index2_reg = alloc_preg (cfg);
3758 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3761 if (index->type == STACK_I8) {
3762 index2_reg = alloc_preg (cfg);
3763 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3765 index2_reg = index_reg;
3769 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3771 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3772 if (size == 1 || size == 2 || size == 4 || size == 8) {
3773 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3775 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3776 ins->type = STACK_PTR;
3782 add_reg = alloc_preg (cfg);
3784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3785 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3786 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3787 ins->type = STACK_PTR;
3788 MONO_ADD_INS (cfg->cbb, ins);
3793 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3795 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3797 int bounds_reg = alloc_preg (cfg);
3798 int add_reg = alloc_preg (cfg);
3799 int mult_reg = alloc_preg (cfg);
3800 int mult2_reg = alloc_preg (cfg);
3801 int low1_reg = alloc_preg (cfg);
3802 int low2_reg = alloc_preg (cfg);
3803 int high1_reg = alloc_preg (cfg);
3804 int high2_reg = alloc_preg (cfg);
3805 int realidx1_reg = alloc_preg (cfg);
3806 int realidx2_reg = alloc_preg (cfg);
3807 int sum_reg = alloc_preg (cfg);
3812 mono_class_init (klass);
3813 size = mono_class_array_element_size (klass);
3815 index1 = index_ins1->dreg;
3816 index2 = index_ins2->dreg;
3818 /* range checking */
3819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3820 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3823 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3824 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3827 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3828 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3831 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3832 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3836 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3838 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3839 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3842 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3844 ins->type = STACK_MP;
3846 MONO_ADD_INS (cfg->cbb, ins);
3853 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3857 MonoMethod *addr_method;
3860 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3863 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3865 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3866 /* emit_ldelema_2 depends on OP_LMUL */
3867 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3868 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3872 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3873 addr_method = mono_marshal_get_array_address (rank, element_size);
3874 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3879 static MonoBreakPolicy
3880 always_insert_breakpoint (MonoMethod *method)
3882 return MONO_BREAK_POLICY_ALWAYS;
3885 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3888 * mono_set_break_policy:
3889 * policy_callback: the new callback function
3891 * Allow embedders to decide wherther to actually obey breakpoint instructions
3892 * (both break IL instructions and Debugger.Break () method calls), for example
3893 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3894 * untrusted or semi-trusted code.
3896 * @policy_callback will be called every time a break point instruction needs to
3897 * be inserted with the method argument being the method that calls Debugger.Break()
3898 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3899 * if it wants the breakpoint to not be effective in the given method.
3900 * #MONO_BREAK_POLICY_ALWAYS is the default.
3903 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3905 if (policy_callback)
3906 break_policy_func = policy_callback;
3908 break_policy_func = always_insert_breakpoint;
3912 should_insert_brekpoint (MonoMethod *method) {
3913 switch (break_policy_func (method)) {
3914 case MONO_BREAK_POLICY_ALWAYS:
3916 case MONO_BREAK_POLICY_NEVER:
3918 case MONO_BREAK_POLICY_ON_DBG:
3919 return mono_debug_using_mono_debugger ();
3921 g_warning ("Incorrect value returned from break policy callback");
3927 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3929 MonoInst *ins = NULL;
3931 static MonoClass *runtime_helpers_class = NULL;
3932 if (! runtime_helpers_class)
3933 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3934 "System.Runtime.CompilerServices", "RuntimeHelpers");
3936 if (cmethod->klass == mono_defaults.string_class) {
3937 if (strcmp (cmethod->name, "get_Chars") == 0) {
3938 int dreg = alloc_ireg (cfg);
3939 int index_reg = alloc_preg (cfg);
3940 int mult_reg = alloc_preg (cfg);
3941 int add_reg = alloc_preg (cfg);
3943 #if SIZEOF_REGISTER == 8
3944 /* The array reg is 64 bits but the index reg is only 32 */
3945 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3947 index_reg = args [1]->dreg;
3949 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3951 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3952 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3953 add_reg = ins->dreg;
3954 /* Avoid a warning */
3956 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3959 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3961 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3962 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3964 type_from_op (ins, NULL, NULL);
3966 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3967 int dreg = alloc_ireg (cfg);
3968 /* Decompose later to allow more optimizations */
3969 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3970 ins->type = STACK_I4;
3971 cfg->cbb->has_array_access = TRUE;
3972 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3975 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3976 int mult_reg = alloc_preg (cfg);
3977 int add_reg = alloc_preg (cfg);
3979 /* The corlib functions check for oob already. */
3980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3981 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3985 } else if (cmethod->klass == mono_defaults.object_class) {
3987 if (strcmp (cmethod->name, "GetType") == 0) {
3988 int dreg = alloc_preg (cfg);
3989 int vt_reg = alloc_preg (cfg);
3990 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3991 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3992 type_from_op (ins, NULL, NULL);
3995 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3996 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3997 int dreg = alloc_ireg (cfg);
3998 int t1 = alloc_ireg (cfg);
4000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4001 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4002 ins->type = STACK_I4;
4006 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4007 MONO_INST_NEW (cfg, ins, OP_NOP);
4008 MONO_ADD_INS (cfg->cbb, ins);
4012 } else if (cmethod->klass == mono_defaults.array_class) {
4013 if (cmethod->name [0] != 'g')
4016 if (strcmp (cmethod->name, "get_Rank") == 0) {
4017 int dreg = alloc_ireg (cfg);
4018 int vtable_reg = alloc_preg (cfg);
4019 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4020 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4021 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4022 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4023 type_from_op (ins, NULL, NULL);
4026 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4027 int dreg = alloc_ireg (cfg);
4029 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4030 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4031 type_from_op (ins, NULL, NULL);
4036 } else if (cmethod->klass == runtime_helpers_class) {
4038 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4039 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4043 } else if (cmethod->klass == mono_defaults.thread_class) {
4044 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4045 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4046 MONO_ADD_INS (cfg->cbb, ins);
4048 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4049 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4050 MONO_ADD_INS (cfg->cbb, ins);
4053 } else if (cmethod->klass == mono_defaults.monitor_class) {
4054 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4055 if (strcmp (cmethod->name, "Enter") == 0) {
4058 if (COMPILE_LLVM (cfg)) {
4060 * Pass the argument normally, the LLVM backend will handle the
4061 * calling convention problems.
4063 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4065 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4066 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4067 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4068 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4071 return (MonoInst*)call;
4072 } else if (strcmp (cmethod->name, "Exit") == 0) {
4075 if (COMPILE_LLVM (cfg)) {
4076 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4078 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4079 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4080 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4081 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4084 return (MonoInst*)call;
4086 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4087 MonoMethod *fast_method = NULL;
4089 /* Avoid infinite recursion */
4090 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4091 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4092 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4095 if (strcmp (cmethod->name, "Enter") == 0 ||
4096 strcmp (cmethod->name, "Exit") == 0)
4097 fast_method = mono_monitor_get_fast_path (cmethod);
4101 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4103 } else if (mini_class_is_system_array (cmethod->klass) &&
4104 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4105 MonoInst *addr, *store, *load;
4106 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4108 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4109 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4110 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4112 } else if (cmethod->klass->image == mono_defaults.corlib &&
4113 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4114 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4117 #if SIZEOF_REGISTER == 8
4118 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4119 /* 64 bit reads are already atomic */
4120 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4121 ins->dreg = mono_alloc_preg (cfg);
4122 ins->inst_basereg = args [0]->dreg;
4123 ins->inst_offset = 0;
4124 MONO_ADD_INS (cfg->cbb, ins);
4128 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4129 if (strcmp (cmethod->name, "Increment") == 0) {
4130 MonoInst *ins_iconst;
4133 if (fsig->params [0]->type == MONO_TYPE_I4)
4134 opcode = OP_ATOMIC_ADD_NEW_I4;
4135 #if SIZEOF_REGISTER == 8
4136 else if (fsig->params [0]->type == MONO_TYPE_I8)
4137 opcode = OP_ATOMIC_ADD_NEW_I8;
4140 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4141 ins_iconst->inst_c0 = 1;
4142 ins_iconst->dreg = mono_alloc_ireg (cfg);
4143 MONO_ADD_INS (cfg->cbb, ins_iconst);
4145 MONO_INST_NEW (cfg, ins, opcode);
4146 ins->dreg = mono_alloc_ireg (cfg);
4147 ins->inst_basereg = args [0]->dreg;
4148 ins->inst_offset = 0;
4149 ins->sreg2 = ins_iconst->dreg;
4150 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4151 MONO_ADD_INS (cfg->cbb, ins);
4153 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4154 MonoInst *ins_iconst;
4157 if (fsig->params [0]->type == MONO_TYPE_I4)
4158 opcode = OP_ATOMIC_ADD_NEW_I4;
4159 #if SIZEOF_REGISTER == 8
4160 else if (fsig->params [0]->type == MONO_TYPE_I8)
4161 opcode = OP_ATOMIC_ADD_NEW_I8;
4164 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4165 ins_iconst->inst_c0 = -1;
4166 ins_iconst->dreg = mono_alloc_ireg (cfg);
4167 MONO_ADD_INS (cfg->cbb, ins_iconst);
4169 MONO_INST_NEW (cfg, ins, opcode);
4170 ins->dreg = mono_alloc_ireg (cfg);
4171 ins->inst_basereg = args [0]->dreg;
4172 ins->inst_offset = 0;
4173 ins->sreg2 = ins_iconst->dreg;
4174 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4175 MONO_ADD_INS (cfg->cbb, ins);
4177 } else if (strcmp (cmethod->name, "Add") == 0) {
4180 if (fsig->params [0]->type == MONO_TYPE_I4)
4181 opcode = OP_ATOMIC_ADD_NEW_I4;
4182 #if SIZEOF_REGISTER == 8
4183 else if (fsig->params [0]->type == MONO_TYPE_I8)
4184 opcode = OP_ATOMIC_ADD_NEW_I8;
4188 MONO_INST_NEW (cfg, ins, opcode);
4189 ins->dreg = mono_alloc_ireg (cfg);
4190 ins->inst_basereg = args [0]->dreg;
4191 ins->inst_offset = 0;
4192 ins->sreg2 = args [1]->dreg;
4193 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4194 MONO_ADD_INS (cfg->cbb, ins);
4197 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4199 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4200 if (strcmp (cmethod->name, "Exchange") == 0) {
4202 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4204 if (fsig->params [0]->type == MONO_TYPE_I4)
4205 opcode = OP_ATOMIC_EXCHANGE_I4;
4206 #if SIZEOF_REGISTER == 8
4207 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4208 (fsig->params [0]->type == MONO_TYPE_I))
4209 opcode = OP_ATOMIC_EXCHANGE_I8;
4211 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4212 opcode = OP_ATOMIC_EXCHANGE_I4;
4217 MONO_INST_NEW (cfg, ins, opcode);
4218 ins->dreg = mono_alloc_ireg (cfg);
4219 ins->inst_basereg = args [0]->dreg;
4220 ins->inst_offset = 0;
4221 ins->sreg2 = args [1]->dreg;
4222 MONO_ADD_INS (cfg->cbb, ins);
4224 switch (fsig->params [0]->type) {
4226 ins->type = STACK_I4;
4230 ins->type = STACK_I8;
4232 case MONO_TYPE_OBJECT:
4233 ins->type = STACK_OBJ;
4236 g_assert_not_reached ();
4239 #if HAVE_WRITE_BARRIERS
4241 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4242 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4246 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4248 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4249 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4251 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4252 if (fsig->params [1]->type == MONO_TYPE_I4)
4254 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4255 size = sizeof (gpointer);
4256 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4259 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4260 ins->dreg = alloc_ireg (cfg);
4261 ins->sreg1 = args [0]->dreg;
4262 ins->sreg2 = args [1]->dreg;
4263 ins->sreg3 = args [2]->dreg;
4264 ins->type = STACK_I4;
4265 MONO_ADD_INS (cfg->cbb, ins);
4266 } else if (size == 8) {
4267 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4268 ins->dreg = alloc_ireg (cfg);
4269 ins->sreg1 = args [0]->dreg;
4270 ins->sreg2 = args [1]->dreg;
4271 ins->sreg3 = args [2]->dreg;
4272 ins->type = STACK_I8;
4273 MONO_ADD_INS (cfg->cbb, ins);
4275 /* g_assert_not_reached (); */
4277 #if HAVE_WRITE_BARRIERS
4279 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4280 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4284 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4288 } else if (cmethod->klass->image == mono_defaults.corlib) {
4289 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4290 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4291 if (should_insert_brekpoint (cfg->method))
4292 MONO_INST_NEW (cfg, ins, OP_BREAK);
4294 MONO_INST_NEW (cfg, ins, OP_NOP);
4295 MONO_ADD_INS (cfg->cbb, ins);
4298 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4299 && strcmp (cmethod->klass->name, "Environment") == 0) {
4301 EMIT_NEW_ICONST (cfg, ins, 1);
4303 EMIT_NEW_ICONST (cfg, ins, 0);
4307 } else if (cmethod->klass == mono_defaults.math_class) {
4309 * There is general branches code for Min/Max, but it does not work for
4311 * http://everything2.com/?node_id=1051618
4315 #ifdef MONO_ARCH_SIMD_INTRINSICS
4316 if (cfg->opt & MONO_OPT_SIMD) {
4317 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4323 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4327 * This entry point could be used later for arbitrary method
4330 inline static MonoInst*
4331 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4332 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4334 if (method->klass == mono_defaults.string_class) {
4335 /* managed string allocation support */
4336 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4337 MonoInst *iargs [2];
4338 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4339 MonoMethod *managed_alloc = NULL;
4341 g_assert (vtable); /*Should not fail since it System.String*/
4342 #ifndef MONO_CROSS_COMPILE
4343 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4347 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4348 iargs [1] = args [0];
4349 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4356 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4358 MonoInst *store, *temp;
4361 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4362 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4365 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4366 * would be different than the MonoInst's used to represent arguments, and
4367 * the ldelema implementation can't deal with that.
4368 * Solution: When ldelema is used on an inline argument, create a var for
4369 * it, emit ldelema on that var, and emit the saving code below in
4370 * inline_method () if needed.
4372 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4373 cfg->args [i] = temp;
4374 /* This uses cfg->args [i] which is set by the preceeding line */
4375 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4376 store->cil_code = sp [0]->cil_code;
4381 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4382 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4384 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4386 check_inline_called_method_name_limit (MonoMethod *called_method)
4389 static char *limit = NULL;
4391 if (limit == NULL) {
4392 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4394 if (limit_string != NULL)
4395 limit = limit_string;
4397 limit = (char *) "";
4400 if (limit [0] != '\0') {
4401 char *called_method_name = mono_method_full_name (called_method, TRUE);
4403 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4404 g_free (called_method_name);
4406 //return (strncmp_result <= 0);
4407 return (strncmp_result == 0);
4414 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4416 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4419 static char *limit = NULL;
4421 if (limit == NULL) {
4422 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4423 if (limit_string != NULL) {
4424 limit = limit_string;
4426 limit = (char *) "";
4430 if (limit [0] != '\0') {
4431 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4433 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4434 g_free (caller_method_name);
4436 //return (strncmp_result <= 0);
4437 return (strncmp_result == 0);
4445 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4446 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4448 MonoInst *ins, *rvar = NULL;
4449 MonoMethodHeader *cheader;
4450 MonoBasicBlock *ebblock, *sbblock;
4452 MonoMethod *prev_inlined_method;
4453 MonoInst **prev_locals, **prev_args;
4454 MonoType **prev_arg_types;
4455 guint prev_real_offset;
4456 GHashTable *prev_cbb_hash;
4457 MonoBasicBlock **prev_cil_offset_to_bb;
4458 MonoBasicBlock *prev_cbb;
4459 unsigned char* prev_cil_start;
4460 guint32 prev_cil_offset_to_bb_len;
4461 MonoMethod *prev_current_method;
4462 MonoGenericContext *prev_generic_context;
4463 gboolean ret_var_set, prev_ret_var_set;
4465 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4467 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4468 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4471 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4472 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4476 if (cfg->verbose_level > 2)
4477 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4479 if (!cmethod->inline_info) {
4480 mono_jit_stats.inlineable_methods++;
4481 cmethod->inline_info = 1;
4483 /* allocate space to store the return value */
4484 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4485 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4488 /* allocate local variables */
4489 cheader = mono_method_get_header (cmethod);
4490 prev_locals = cfg->locals;
4491 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4492 for (i = 0; i < cheader->num_locals; ++i)
4493 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4495 /* allocate start and end blocks */
4496 /* This is needed so if the inline is aborted, we can clean up */
4497 NEW_BBLOCK (cfg, sbblock);
4498 sbblock->real_offset = real_offset;
4500 NEW_BBLOCK (cfg, ebblock);
4501 ebblock->block_num = cfg->num_bblocks++;
4502 ebblock->real_offset = real_offset;
4504 prev_args = cfg->args;
4505 prev_arg_types = cfg->arg_types;
4506 prev_inlined_method = cfg->inlined_method;
4507 cfg->inlined_method = cmethod;
4508 cfg->ret_var_set = FALSE;
4509 cfg->inline_depth ++;
4510 prev_real_offset = cfg->real_offset;
4511 prev_cbb_hash = cfg->cbb_hash;
4512 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4513 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4514 prev_cil_start = cfg->cil_start;
4515 prev_cbb = cfg->cbb;
4516 prev_current_method = cfg->current_method;
4517 prev_generic_context = cfg->generic_context;
4518 prev_ret_var_set = cfg->ret_var_set;
4520 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4522 ret_var_set = cfg->ret_var_set;
4524 cfg->inlined_method = prev_inlined_method;
4525 cfg->real_offset = prev_real_offset;
4526 cfg->cbb_hash = prev_cbb_hash;
4527 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4528 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4529 cfg->cil_start = prev_cil_start;
4530 cfg->locals = prev_locals;
4531 cfg->args = prev_args;
4532 cfg->arg_types = prev_arg_types;
4533 cfg->current_method = prev_current_method;
4534 cfg->generic_context = prev_generic_context;
4535 cfg->ret_var_set = prev_ret_var_set;
4536 cfg->inline_depth --;
4538 if ((costs >= 0 && costs < 60) || inline_allways) {
4539 if (cfg->verbose_level > 2)
4540 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4542 mono_jit_stats.inlined_methods++;
4544 /* always add some code to avoid block split failures */
4545 MONO_INST_NEW (cfg, ins, OP_NOP);
4546 MONO_ADD_INS (prev_cbb, ins);
4548 prev_cbb->next_bb = sbblock;
4549 link_bblock (cfg, prev_cbb, sbblock);
4552 * Get rid of the begin and end bblocks if possible to aid local
4555 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4557 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4558 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4560 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4561 MonoBasicBlock *prev = ebblock->in_bb [0];
4562 mono_merge_basic_blocks (cfg, prev, ebblock);
4564 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4565 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4566 cfg->cbb = prev_cbb;
4574 * If the inlined method contains only a throw, then the ret var is not
4575 * set, so set it to a dummy value.
4578 static double r8_0 = 0.0;
4580 switch (rvar->type) {
4582 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4585 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4590 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4593 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4594 ins->type = STACK_R8;
4595 ins->inst_p0 = (void*)&r8_0;
4596 ins->dreg = rvar->dreg;
4597 MONO_ADD_INS (cfg->cbb, ins);
4600 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4603 g_assert_not_reached ();
4607 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4612 if (cfg->verbose_level > 2)
4613 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4614 cfg->exception_type = MONO_EXCEPTION_NONE;
4615 mono_loader_clear_error ();
4617 /* This gets rid of the newly added bblocks */
4618 cfg->cbb = prev_cbb;
4624 * Some of these comments may well be out-of-date.
4625 * Design decisions: we do a single pass over the IL code (and we do bblock
4626 * splitting/merging in the few cases when it's required: a back jump to an IL
4627 * address that was not already seen as bblock starting point).
4628 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4629 * Complex operations are decomposed in simpler ones right away. We need to let the
4630 * arch-specific code peek and poke inside this process somehow (except when the
4631 * optimizations can take advantage of the full semantic info of coarse opcodes).
4632 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4633 * MonoInst->opcode initially is the IL opcode or some simplification of that
4634 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4635 * opcode with value bigger than OP_LAST.
4636 * At this point the IR can be handed over to an interpreter, a dumb code generator
4637 * or to the optimizing code generator that will translate it to SSA form.
4639 * Profiling directed optimizations.
4640 * We may compile by default with few or no optimizations and instrument the code
4641 * or the user may indicate what methods to optimize the most either in a config file
4642 * or through repeated runs where the compiler applies offline the optimizations to
4643 * each method and then decides if it was worth it.
4646 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4647 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4648 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4649 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4650 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4651 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4652 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4653 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4655 /* offset from br.s -> br like opcodes */
4656 #define BIG_BRANCH_OFFSET 13
4659 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4661 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4663 return b == NULL || b == bb;
4667 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4669 unsigned char *ip = start;
4670 unsigned char *target;
4673 MonoBasicBlock *bblock;
4674 const MonoOpcode *opcode;
4677 cli_addr = ip - start;
4678 i = mono_opcode_value ((const guint8 **)&ip, end);
4681 opcode = &mono_opcodes [i];
4682 switch (opcode->argument) {
4683 case MonoInlineNone:
4686 case MonoInlineString:
4687 case MonoInlineType:
4688 case MonoInlineField:
4689 case MonoInlineMethod:
4692 case MonoShortInlineR:
4699 case MonoShortInlineVar:
4700 case MonoShortInlineI:
4703 case MonoShortInlineBrTarget:
4704 target = start + cli_addr + 2 + (signed char)ip [1];
4705 GET_BBLOCK (cfg, bblock, target);
4708 GET_BBLOCK (cfg, bblock, ip);
4710 case MonoInlineBrTarget:
4711 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4712 GET_BBLOCK (cfg, bblock, target);
4715 GET_BBLOCK (cfg, bblock, ip);
4717 case MonoInlineSwitch: {
4718 guint32 n = read32 (ip + 1);
4721 cli_addr += 5 + 4 * n;
4722 target = start + cli_addr;
4723 GET_BBLOCK (cfg, bblock, target);
4725 for (j = 0; j < n; ++j) {
4726 target = start + cli_addr + (gint32)read32 (ip);
4727 GET_BBLOCK (cfg, bblock, target);
4737 g_assert_not_reached ();
4740 if (i == CEE_THROW) {
4741 unsigned char *bb_start = ip - 1;
4743 /* Find the start of the bblock containing the throw */
4745 while ((bb_start >= start) && !bblock) {
4746 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4750 bblock->out_of_line = 1;
4759 static inline MonoMethod *
4760 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4764 if (m->wrapper_type != MONO_WRAPPER_NONE)
4765 return mono_method_get_wrapper_data (m, token);
4767 method = mono_get_method_full (m->klass->image, token, klass, context);
4772 static inline MonoMethod *
4773 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4775 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4777 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4783 static inline MonoClass*
4784 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4788 if (method->wrapper_type != MONO_WRAPPER_NONE)
4789 klass = mono_method_get_wrapper_data (method, token);
4791 klass = mono_class_get_full (method->klass->image, token, context);
4793 mono_class_init (klass);
4798 * Returns TRUE if the JIT should abort inlining because "callee"
4799 * is influenced by security attributes.
4802 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4806 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4810 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4811 if (result == MONO_JIT_SECURITY_OK)
4814 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4815 /* Generate code to throw a SecurityException before the actual call/link */
4816 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4819 NEW_ICONST (cfg, args [0], 4);
4820 NEW_METHODCONST (cfg, args [1], caller);
4821 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4822 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4823 /* don't hide previous results */
4824 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4825 cfg->exception_data = result;
4833 throw_exception (void)
4835 static MonoMethod *method = NULL;
4838 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4839 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4846 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4848 MonoMethod *thrower = throw_exception ();
4851 EMIT_NEW_PCONST (cfg, args [0], ex);
4852 mono_emit_method_call (cfg, thrower, args, NULL);
4856 * Return the original method is a wrapper is specified. We can only access
4857 * the custom attributes from the original method.
4860 get_original_method (MonoMethod *method)
4862 if (method->wrapper_type == MONO_WRAPPER_NONE)
4865 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4866 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4869 /* in other cases we need to find the original method */
4870 return mono_marshal_method_from_wrapper (method);
4874 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4875 MonoBasicBlock *bblock, unsigned char *ip)
4877 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4878 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4881 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4882 caller = get_original_method (caller);
4886 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4887 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4888 emit_throw_exception (cfg, mono_get_exception_field_access ());
4892 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4893 MonoBasicBlock *bblock, unsigned char *ip)
4895 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4896 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4899 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4900 caller = get_original_method (caller);
4904 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4905 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4906 emit_throw_exception (cfg, mono_get_exception_method_access ());
4910 * Check that the IL instructions at ip are the array initialization
4911 * sequence and return the pointer to the data and the size.
4914 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4917 * newarr[System.Int32]
4919 * ldtoken field valuetype ...
4920 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4922 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4923 guint32 token = read32 (ip + 7);
4924 guint32 field_token = read32 (ip + 2);
4925 guint32 field_index = field_token & 0xffffff;
4927 const char *data_ptr;
4929 MonoMethod *cmethod;
4930 MonoClass *dummy_class;
4931 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4937 *out_field_token = field_token;
4939 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4942 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4944 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4945 case MONO_TYPE_BOOLEAN:
4949 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4950 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4951 case MONO_TYPE_CHAR:
4961 return NULL; /* stupid ARM FP swapped format */
4971 if (size > mono_type_size (field->type, &dummy_align))
4974 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4975 if (!method->klass->image->dynamic) {
4976 field_index = read32 (ip + 2) & 0xffffff;
4977 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4978 data_ptr = mono_image_rva_map (method->klass->image, rva);
4979 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4980 /* for aot code we do the lookup on load */
4981 if (aot && data_ptr)
4982 return GUINT_TO_POINTER (rva);
4984 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4986 data_ptr = mono_field_get_data (field);
4994 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4996 char *method_fname = mono_method_full_name (method, TRUE);
4999 if (mono_method_get_header (method)->code_size == 0)
5000 method_code = g_strdup ("method body is empty.");
5002 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5003 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5004 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5005 g_free (method_fname);
5006 g_free (method_code);
5010 set_exception_object (MonoCompile *cfg, MonoException *exception)
5012 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5013 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5014 cfg->exception_ptr = exception;
5018 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5022 if (cfg->generic_sharing_context)
5023 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5025 type = &klass->byval_arg;
5026 return MONO_TYPE_IS_REFERENCE (type);
5030 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5033 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5034 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5035 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5036 /* Optimize reg-reg moves away */
5038 * Can't optimize other opcodes, since sp[0] might point to
5039 * the last ins of a decomposed opcode.
5041 sp [0]->dreg = (cfg)->locals [n]->dreg;
5043 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5048 * ldloca inhibits many optimizations so try to get rid of it in common
5051 static inline unsigned char *
5052 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5061 local = read16 (ip + 2);
5065 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5066 gboolean skip = FALSE;
5068 /* From the INITOBJ case */
5069 token = read32 (ip + 2);
5070 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5071 CHECK_TYPELOAD (klass);
5072 if (generic_class_is_reference_type (cfg, klass)) {
5073 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5074 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5075 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5076 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5077 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5090 is_exception_class (MonoClass *class)
5093 if (class == mono_defaults.exception_class)
5095 class = class->parent;
5101 * mono_method_to_ir:
5103 * Translate the .net IL into linear IR.
5106 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5107 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5108 guint inline_offset, gboolean is_virtual_call)
5111 MonoInst *ins, **sp, **stack_start;
5112 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5113 MonoSimpleBasicBlock *bb = NULL;
5114 MonoMethod *cmethod, *method_definition;
5115 MonoInst **arg_array;
5116 MonoMethodHeader *header;
5118 guint32 token, ins_flag;
5120 MonoClass *constrained_call = NULL;
5121 unsigned char *ip, *end, *target, *err_pos;
5122 static double r8_0 = 0.0;
5123 MonoMethodSignature *sig;
5124 MonoGenericContext *generic_context = NULL;
5125 MonoGenericContainer *generic_container = NULL;
5126 MonoType **param_types;
5127 int i, n, start_new_bblock, dreg;
5128 int num_calls = 0, inline_costs = 0;
5129 int breakpoint_id = 0;
5131 MonoBoolean security, pinvoke;
5132 MonoSecurityManager* secman = NULL;
5133 MonoDeclSecurityActions actions;
5134 GSList *class_inits = NULL;
5135 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5137 gboolean init_locals, seq_points, skip_dead_blocks;
5139 /* serialization and xdomain stuff may need access to private fields and methods */
5140 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5141 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5142 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5143 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5144 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5145 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5147 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5149 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5150 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5151 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5152 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5154 image = method->klass->image;
5155 header = mono_method_get_header (method);
5156 generic_container = mono_method_get_generic_container (method);
5157 sig = mono_method_signature (method);
5158 num_args = sig->hasthis + sig->param_count;
5159 ip = (unsigned char*)header->code;
5160 cfg->cil_start = ip;
5161 end = ip + header->code_size;
5162 mono_jit_stats.cil_code_size += header->code_size;
5163 init_locals = header->init_locals;
5165 seq_points = cfg->gen_seq_points && cfg->method == method;
5168 * Methods without init_locals set could cause asserts in various passes
5173 method_definition = method;
5174 while (method_definition->is_inflated) {
5175 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5176 method_definition = imethod->declaring;
5179 /* SkipVerification is not allowed if core-clr is enabled */
5180 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5182 dont_verify_stloc = TRUE;
5185 if (!dont_verify && mini_method_verify (cfg, method_definition))
5186 goto exception_exit;
5188 if (mono_debug_using_mono_debugger ())
5189 cfg->keep_cil_nops = TRUE;
5191 if (sig->is_inflated)
5192 generic_context = mono_method_get_context (method);
5193 else if (generic_container)
5194 generic_context = &generic_container->context;
5195 cfg->generic_context = generic_context;
5197 if (!cfg->generic_sharing_context)
5198 g_assert (!sig->has_type_parameters);
5200 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5201 g_assert (method->is_inflated);
5202 g_assert (mono_method_get_context (method)->method_inst);
5204 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5205 g_assert (sig->generic_param_count);
5207 if (cfg->method == method) {
5208 cfg->real_offset = 0;
5210 cfg->real_offset = inline_offset;
5213 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5214 cfg->cil_offset_to_bb_len = header->code_size;
5216 cfg->current_method = method;
5218 if (cfg->verbose_level > 2)
5219 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5221 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5223 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5224 for (n = 0; n < sig->param_count; ++n)
5225 param_types [n + sig->hasthis] = sig->params [n];
5226 cfg->arg_types = param_types;
5228 dont_inline = g_list_prepend (dont_inline, method);
5229 if (cfg->method == method) {
5231 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5232 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5235 NEW_BBLOCK (cfg, start_bblock);
5236 cfg->bb_entry = start_bblock;
5237 start_bblock->cil_code = NULL;
5238 start_bblock->cil_length = 0;
5241 NEW_BBLOCK (cfg, end_bblock);
5242 cfg->bb_exit = end_bblock;
5243 end_bblock->cil_code = NULL;
5244 end_bblock->cil_length = 0;
5245 g_assert (cfg->num_bblocks == 2);
5247 arg_array = cfg->args;
5249 if (header->num_clauses) {
5250 cfg->spvars = g_hash_table_new (NULL, NULL);
5251 cfg->exvars = g_hash_table_new (NULL, NULL);
5253 /* handle exception clauses */
5254 for (i = 0; i < header->num_clauses; ++i) {
5255 MonoBasicBlock *try_bb;
5256 MonoExceptionClause *clause = &header->clauses [i];
5257 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5258 try_bb->real_offset = clause->try_offset;
5259 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5260 tblock->real_offset = clause->handler_offset;
5261 tblock->flags |= BB_EXCEPTION_HANDLER;
5263 link_bblock (cfg, try_bb, tblock);
5265 if (*(ip + clause->handler_offset) == CEE_POP)
5266 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5268 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5269 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5270 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5271 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5272 MONO_ADD_INS (tblock, ins);
5274 /* todo: is a fault block unsafe to optimize? */
5275 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5276 tblock->flags |= BB_EXCEPTION_UNSAFE;
5280 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5282 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5284 /* catch and filter blocks get the exception object on the stack */
5285 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5286 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5287 MonoInst *dummy_use;
5289 /* mostly like handle_stack_args (), but just sets the input args */
5290 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5291 tblock->in_scount = 1;
5292 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5293 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5296 * Add a dummy use for the exvar so its liveness info will be
5300 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5302 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5303 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5304 tblock->flags |= BB_EXCEPTION_HANDLER;
5305 tblock->real_offset = clause->data.filter_offset;
5306 tblock->in_scount = 1;
5307 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5308 /* The filter block shares the exvar with the handler block */
5309 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5310 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5311 MONO_ADD_INS (tblock, ins);
5315 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5316 clause->data.catch_class &&
5317 cfg->generic_sharing_context &&
5318 mono_class_check_context_used (clause->data.catch_class)) {
5320 * In shared generic code with catch
5321 * clauses containing type variables
5322 * the exception handling code has to
5323 * be able to get to the rgctx.
5324 * Therefore we have to make sure that
5325 * the vtable/mrgctx argument (for
5326 * static or generic methods) or the
5327 * "this" argument (for non-static
5328 * methods) are live.
5330 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5331 mini_method_get_context (method)->method_inst ||
5332 method->klass->valuetype) {
5333 mono_get_vtable_var (cfg);
5335 MonoInst *dummy_use;
5337 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5342 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5343 cfg->cbb = start_bblock;
5344 cfg->args = arg_array;
5345 mono_save_args (cfg, sig, inline_args);
5348 /* FIRST CODE BLOCK */
5349 NEW_BBLOCK (cfg, bblock);
5350 bblock->cil_code = ip;
5354 ADD_BBLOCK (cfg, bblock);
5356 if (cfg->method == method) {
5357 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5358 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5359 MONO_INST_NEW (cfg, ins, OP_BREAK);
5360 MONO_ADD_INS (bblock, ins);
5364 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5365 secman = mono_security_manager_get_methods ();
5367 security = (secman && mono_method_has_declsec (method));
5368 /* at this point having security doesn't mean we have any code to generate */
5369 if (security && (cfg->method == method)) {
5370 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5371 * And we do not want to enter the next section (with allocation) if we
5372 * have nothing to generate */
5373 security = mono_declsec_get_demands (method, &actions);
5376 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5377 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5379 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5380 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5381 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5383 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5384 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5388 mono_custom_attrs_free (custom);
5391 custom = mono_custom_attrs_from_class (wrapped->klass);
5392 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5396 mono_custom_attrs_free (custom);
5399 /* not a P/Invoke after all */
5404 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5405 /* we use a separate basic block for the initialization code */
5406 NEW_BBLOCK (cfg, init_localsbb);
5407 cfg->bb_init = init_localsbb;
5408 init_localsbb->real_offset = cfg->real_offset;
5409 start_bblock->next_bb = init_localsbb;
5410 init_localsbb->next_bb = bblock;
5411 link_bblock (cfg, start_bblock, init_localsbb);
5412 link_bblock (cfg, init_localsbb, bblock);
5414 cfg->cbb = init_localsbb;
5416 start_bblock->next_bb = bblock;
5417 link_bblock (cfg, start_bblock, bblock);
5420 /* at this point we know, if security is TRUE, that some code needs to be generated */
5421 if (security && (cfg->method == method)) {
5424 mono_jit_stats.cas_demand_generation++;
5426 if (actions.demand.blob) {
5427 /* Add code for SecurityAction.Demand */
5428 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5429 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5430 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5431 mono_emit_method_call (cfg, secman->demand, args, NULL);
5433 if (actions.noncasdemand.blob) {
5434 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5435 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5436 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5437 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5438 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5439 mono_emit_method_call (cfg, secman->demand, args, NULL);
5441 if (actions.demandchoice.blob) {
5442 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5443 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5444 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5445 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5446 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5450 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5452 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5455 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5456 /* check if this is native code, e.g. an icall or a p/invoke */
5457 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5458 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5460 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5461 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5463 /* if this ia a native call then it can only be JITted from platform code */
5464 if ((icall || pinvk) && method->klass && method->klass->image) {
5465 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5466 MonoException *ex = icall ? mono_get_exception_security () :
5467 mono_get_exception_method_access ();
5468 emit_throw_exception (cfg, ex);
5475 if (header->code_size == 0)
5478 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5483 if (cfg->method == method)
5484 mono_debug_init_method (cfg, bblock, breakpoint_id);
5486 for (n = 0; n < header->num_locals; ++n) {
5487 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5492 /* We force the vtable variable here for all shared methods
5493 for the possibility that they might show up in a stack
5494 trace where their exact instantiation is needed. */
5495 if (cfg->generic_sharing_context && method == cfg->method) {
5496 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5497 mini_method_get_context (method)->method_inst ||
5498 method->klass->valuetype) {
5499 mono_get_vtable_var (cfg);
5501 /* FIXME: Is there a better way to do this?
5502 We need the variable live for the duration
5503 of the whole method. */
5504 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5508 /* add a check for this != NULL to inlined methods */
5509 if (is_virtual_call) {
5512 NEW_ARGLOAD (cfg, arg_ins, 0);
5513 MONO_ADD_INS (cfg->cbb, arg_ins);
5514 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5517 skip_dead_blocks = !dont_verify;
5518 if (skip_dead_blocks) {
5519 bb = mono_basic_block_split (method, &error);
5520 if (!mono_error_ok (&error)) {
5521 mono_error_cleanup (&error);
5527 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5528 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5531 start_new_bblock = 0;
5534 if (cfg->method == method)
5535 cfg->real_offset = ip - header->code;
5537 cfg->real_offset = inline_offset;
5542 if (start_new_bblock) {
5543 bblock->cil_length = ip - bblock->cil_code;
5544 if (start_new_bblock == 2) {
5545 g_assert (ip == tblock->cil_code);
5547 GET_BBLOCK (cfg, tblock, ip);
5549 bblock->next_bb = tblock;
5552 start_new_bblock = 0;
5553 for (i = 0; i < bblock->in_scount; ++i) {
5554 if (cfg->verbose_level > 3)
5555 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5556 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5560 g_slist_free (class_inits);
5563 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5564 link_bblock (cfg, bblock, tblock);
5565 if (sp != stack_start) {
5566 handle_stack_args (cfg, stack_start, sp - stack_start);
5568 CHECK_UNVERIFIABLE (cfg);
5570 bblock->next_bb = tblock;
5573 for (i = 0; i < bblock->in_scount; ++i) {
5574 if (cfg->verbose_level > 3)
5575 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5576 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5579 g_slist_free (class_inits);
5584 if (skip_dead_blocks) {
5585 int ip_offset = ip - header->code;
5587 if (ip_offset == bb->end)
5591 int op_size = mono_opcode_size (ip, end);
5592 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5594 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5596 if (ip_offset + op_size == bb->end) {
5597 MONO_INST_NEW (cfg, ins, OP_NOP);
5598 MONO_ADD_INS (bblock, ins);
5599 start_new_bblock = 1;
5607 * Sequence points are points where the debugger can place a breakpoint.
5608 * Currently, we generate these automatically at points where the IL
5611 if (seq_points && sp == stack_start) {
5612 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5613 MONO_ADD_INS (cfg->cbb, ins);
5616 bblock->real_offset = cfg->real_offset;
5618 if ((cfg->method == method) && cfg->coverage_info) {
5619 guint32 cil_offset = ip - header->code;
5620 cfg->coverage_info->data [cil_offset].cil_code = ip;
5622 /* TODO: Use an increment here */
5623 #if defined(TARGET_X86)
5624 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5625 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5627 MONO_ADD_INS (cfg->cbb, ins);
5629 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5630 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5634 if (cfg->verbose_level > 3)
5635 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5639 if (cfg->keep_cil_nops)
5640 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5642 MONO_INST_NEW (cfg, ins, OP_NOP);
5644 MONO_ADD_INS (bblock, ins);
5647 if (should_insert_brekpoint (cfg->method))
5648 MONO_INST_NEW (cfg, ins, OP_BREAK);
5650 MONO_INST_NEW (cfg, ins, OP_NOP);
5652 MONO_ADD_INS (bblock, ins);
5658 CHECK_STACK_OVF (1);
5659 n = (*ip)-CEE_LDARG_0;
5661 EMIT_NEW_ARGLOAD (cfg, ins, n);
5669 CHECK_STACK_OVF (1);
5670 n = (*ip)-CEE_LDLOC_0;
5672 EMIT_NEW_LOCLOAD (cfg, ins, n);
5681 n = (*ip)-CEE_STLOC_0;
5684 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5686 emit_stloc_ir (cfg, sp, header, n);
5693 CHECK_STACK_OVF (1);
5696 EMIT_NEW_ARGLOAD (cfg, ins, n);
5702 CHECK_STACK_OVF (1);
5705 NEW_ARGLOADA (cfg, ins, n);
5706 MONO_ADD_INS (cfg->cbb, ins);
5716 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5718 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5723 CHECK_STACK_OVF (1);
5726 EMIT_NEW_LOCLOAD (cfg, ins, n);
5730 case CEE_LDLOCA_S: {
5731 unsigned char *tmp_ip;
5733 CHECK_STACK_OVF (1);
5734 CHECK_LOCAL (ip [1]);
5736 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5742 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5751 CHECK_LOCAL (ip [1]);
5752 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5754 emit_stloc_ir (cfg, sp, header, ip [1]);
5759 CHECK_STACK_OVF (1);
5760 EMIT_NEW_PCONST (cfg, ins, NULL);
5761 ins->type = STACK_OBJ;
5766 CHECK_STACK_OVF (1);
5767 EMIT_NEW_ICONST (cfg, ins, -1);
5780 CHECK_STACK_OVF (1);
5781 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5787 CHECK_STACK_OVF (1);
5789 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5795 CHECK_STACK_OVF (1);
5796 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5802 CHECK_STACK_OVF (1);
5803 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5804 ins->type = STACK_I8;
5805 ins->dreg = alloc_dreg (cfg, STACK_I8);
5807 ins->inst_l = (gint64)read64 (ip);
5808 MONO_ADD_INS (bblock, ins);
5814 gboolean use_aotconst = FALSE;
5816 #ifdef TARGET_POWERPC
5817 /* FIXME: Clean this up */
5818 if (cfg->compile_aot)
5819 use_aotconst = TRUE;
5822 /* FIXME: we should really allocate this only late in the compilation process */
5823 f = mono_domain_alloc (cfg->domain, sizeof (float));
5825 CHECK_STACK_OVF (1);
5831 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5833 dreg = alloc_freg (cfg);
5834 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5835 ins->type = STACK_R8;
5837 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5838 ins->type = STACK_R8;
5839 ins->dreg = alloc_dreg (cfg, STACK_R8);
5841 MONO_ADD_INS (bblock, ins);
5851 gboolean use_aotconst = FALSE;
5853 #ifdef TARGET_POWERPC
5854 /* FIXME: Clean this up */
5855 if (cfg->compile_aot)
5856 use_aotconst = TRUE;
5859 /* FIXME: we should really allocate this only late in the compilation process */
5860 d = mono_domain_alloc (cfg->domain, sizeof (double));
5862 CHECK_STACK_OVF (1);
5868 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5870 dreg = alloc_freg (cfg);
5871 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5872 ins->type = STACK_R8;
5874 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5875 ins->type = STACK_R8;
5876 ins->dreg = alloc_dreg (cfg, STACK_R8);
5878 MONO_ADD_INS (bblock, ins);
5887 MonoInst *temp, *store;
5889 CHECK_STACK_OVF (1);
5893 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5894 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5896 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5899 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5912 if (sp [0]->type == STACK_R8)
5913 /* we need to pop the value from the x86 FP stack */
5914 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5923 if (stack_start != sp)
5925 token = read32 (ip + 1);
5926 /* FIXME: check the signature matches */
5927 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5932 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5933 GENERIC_SHARING_FAILURE (CEE_JMP);
5935 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5936 CHECK_CFG_EXCEPTION;
5938 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5940 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5943 /* Handle tail calls similarly to calls */
5944 n = fsig->param_count + fsig->hasthis;
5946 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5947 call->method = cmethod;
5948 call->tail_call = TRUE;
5949 call->signature = mono_method_signature (cmethod);
5950 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5951 call->inst.inst_p0 = cmethod;
5952 for (i = 0; i < n; ++i)
5953 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5955 mono_arch_emit_call (cfg, call);
5956 MONO_ADD_INS (bblock, (MonoInst*)call);
5959 for (i = 0; i < num_args; ++i)
5960 /* Prevent arguments from being optimized away */
5961 arg_array [i]->flags |= MONO_INST_VOLATILE;
5963 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5964 ins = (MonoInst*)call;
5965 ins->inst_p0 = cmethod;
5966 MONO_ADD_INS (bblock, ins);
5970 start_new_bblock = 1;
5975 case CEE_CALLVIRT: {
5976 MonoInst *addr = NULL;
5977 MonoMethodSignature *fsig = NULL;
5979 int virtual = *ip == CEE_CALLVIRT;
5980 int calli = *ip == CEE_CALLI;
5981 gboolean pass_imt_from_rgctx = FALSE;
5982 MonoInst *imt_arg = NULL;
5983 gboolean pass_vtable = FALSE;
5984 gboolean pass_mrgctx = FALSE;
5985 MonoInst *vtable_arg = NULL;
5986 gboolean check_this = FALSE;
5987 gboolean supported_tail_call = FALSE;
5990 token = read32 (ip + 1);
5997 if (method->wrapper_type != MONO_WRAPPER_NONE)
5998 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6000 fsig = mono_metadata_parse_signature (image, token);
6002 n = fsig->param_count + fsig->hasthis;
6004 if (method->dynamic && fsig->pinvoke) {
6008 * This is a call through a function pointer using a pinvoke
6009 * signature. Have to create a wrapper and call that instead.
6010 * FIXME: This is very slow, need to create a wrapper at JIT time
6011 * instead based on the signature.
6013 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6014 EMIT_NEW_PCONST (cfg, args [1], fsig);
6016 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6019 MonoMethod *cil_method;
6021 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6022 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6023 cil_method = cmethod;
6024 } else if (constrained_call) {
6025 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6027 * This is needed since get_method_constrained can't find
6028 * the method in klass representing a type var.
6029 * The type var is guaranteed to be a reference type in this
6032 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6033 cil_method = cmethod;
6034 g_assert (!cmethod->klass->valuetype);
6036 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6039 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6040 cil_method = cmethod;
6045 if (!dont_verify && !cfg->skip_visibility) {
6046 MonoMethod *target_method = cil_method;
6047 if (method->is_inflated) {
6048 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6050 if (!mono_method_can_access_method (method_definition, target_method) &&
6051 !mono_method_can_access_method (method, cil_method))
6052 METHOD_ACCESS_FAILURE;
6055 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6056 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6058 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6059 /* MS.NET seems to silently convert this to a callvirt */
6062 if (!cmethod->klass->inited)
6063 if (!mono_class_init (cmethod->klass))
6066 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6067 mini_class_is_system_array (cmethod->klass)) {
6068 array_rank = cmethod->klass->rank;
6069 fsig = mono_method_signature (cmethod);
6071 if (mono_method_signature (cmethod)->pinvoke) {
6072 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6073 check_for_pending_exc, FALSE);
6074 fsig = mono_method_signature (wrapper);
6075 } else if (constrained_call) {
6076 fsig = mono_method_signature (cmethod);
6078 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6082 mono_save_token_info (cfg, image, token, cil_method);
6084 n = fsig->param_count + fsig->hasthis;
6086 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6087 if (check_linkdemand (cfg, method, cmethod))
6089 CHECK_CFG_EXCEPTION;
6092 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6093 g_assert_not_reached ();
6096 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6099 if (!cfg->generic_sharing_context && cmethod)
6100 g_assert (!mono_method_check_context_used (cmethod));
6104 //g_assert (!virtual || fsig->hasthis);
6108 if (constrained_call) {
6110 * We have the `constrained.' prefix opcode.
6112 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6114 * The type parameter is instantiated as a valuetype,
6115 * but that type doesn't override the method we're
6116 * calling, so we need to box `this'.
6118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6119 ins->klass = constrained_call;
6120 sp [0] = handle_box (cfg, ins, constrained_call);
6121 CHECK_CFG_EXCEPTION;
6122 } else if (!constrained_call->valuetype) {
6123 int dreg = alloc_preg (cfg);
6126 * The type parameter is instantiated as a reference
6127 * type. We have a managed pointer on the stack, so
6128 * we need to dereference it here.
6130 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6131 ins->type = STACK_OBJ;
6133 } else if (cmethod->klass->valuetype)
6135 constrained_call = NULL;
6138 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6142 * If the callee is a shared method, then its static cctor
6143 * might not get called after the call was patched.
6145 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6146 emit_generic_class_init (cfg, cmethod->klass);
6147 CHECK_TYPELOAD (cmethod->klass);
6150 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6151 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6152 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6153 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6154 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6157 * Pass vtable iff target method might
6158 * be shared, which means that sharing
6159 * is enabled for its class and its
6160 * context is sharable (and it's not a
6163 if (sharing_enabled && context_sharable &&
6164 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6168 if (cmethod && mini_method_get_context (cmethod) &&
6169 mini_method_get_context (cmethod)->method_inst) {
6170 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6171 MonoGenericContext *context = mini_method_get_context (cmethod);
6172 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6174 g_assert (!pass_vtable);
6176 if (sharing_enabled && context_sharable)
6180 if (cfg->generic_sharing_context && cmethod) {
6181 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6183 context_used = mono_method_check_context_used (cmethod);
6185 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6186 /* Generic method interface
6187 calls are resolved via a
6188 helper function and don't
6190 if (!cmethod_context || !cmethod_context->method_inst)
6191 pass_imt_from_rgctx = TRUE;
6195 * If a shared method calls another
6196 * shared method then the caller must
6197 * have a generic sharing context
6198 * because the magic trampoline
6199 * requires it. FIXME: We shouldn't
6200 * have to force the vtable/mrgctx
6201 * variable here. Instead there
6202 * should be a flag in the cfg to
6203 * request a generic sharing context.
6206 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6207 mono_get_vtable_var (cfg);
6212 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6214 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6216 CHECK_TYPELOAD (cmethod->klass);
6217 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6222 g_assert (!vtable_arg);
6224 if (!cfg->compile_aot) {
6226 * emit_get_rgctx_method () calls mono_class_vtable () so check
6227 * for type load errors before.
6229 mono_class_vtable (cfg->domain, cmethod->klass);
6230 CHECK_TYPELOAD (cmethod->klass);
6233 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6235 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6236 MONO_METHOD_IS_FINAL (cmethod)) {
6243 if (pass_imt_from_rgctx) {
6244 g_assert (!pass_vtable);
6247 imt_arg = emit_get_rgctx_method (cfg, context_used,
6248 cmethod, MONO_RGCTX_INFO_METHOD);
6252 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6254 /* Calling virtual generic methods */
6255 if (cmethod && virtual &&
6256 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6257 !(MONO_METHOD_IS_FINAL (cmethod) &&
6258 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6259 mono_method_signature (cmethod)->generic_param_count) {
6260 MonoInst *this_temp, *this_arg_temp, *store;
6261 MonoInst *iargs [4];
6263 g_assert (mono_method_signature (cmethod)->is_inflated);
6265 /* Prevent inlining of methods that contain indirect calls */
6268 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6269 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6270 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6271 g_assert (!imt_arg);
6273 g_assert (cmethod->is_inflated);
6274 imt_arg = emit_get_rgctx_method (cfg, context_used,
6275 cmethod, MONO_RGCTX_INFO_METHOD);
6276 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6280 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6281 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6282 MONO_ADD_INS (bblock, store);
6284 /* FIXME: This should be a managed pointer */
6285 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6287 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6288 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6289 cmethod, MONO_RGCTX_INFO_METHOD);
6290 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6291 addr = mono_emit_jit_icall (cfg,
6292 mono_helper_compile_generic_method, iargs);
6294 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6296 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6299 if (!MONO_TYPE_IS_VOID (fsig->ret))
6300 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6307 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6308 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6310 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6314 /* FIXME: runtime generic context pointer for jumps? */
6315 /* FIXME: handle this for generic sharing eventually */
6316 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6319 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6322 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6323 /* Handle tail calls similarly to calls */
6324 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6326 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6327 call->tail_call = TRUE;
6328 call->method = cmethod;
6329 call->signature = mono_method_signature (cmethod);
6332 * We implement tail calls by storing the actual arguments into the
6333 * argument variables, then emitting a CEE_JMP.
6335 for (i = 0; i < n; ++i) {
6336 /* Prevent argument from being register allocated */
6337 arg_array [i]->flags |= MONO_INST_VOLATILE;
6338 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6342 ins = (MonoInst*)call;
6343 ins->inst_p0 = cmethod;
6344 ins->inst_p1 = arg_array [0];
6345 MONO_ADD_INS (bblock, ins);
6346 link_bblock (cfg, bblock, end_bblock);
6347 start_new_bblock = 1;
6348 /* skip CEE_RET as well */
6354 /* Conversion to a JIT intrinsic */
6355 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6356 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6357 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6368 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6369 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6370 mono_method_check_inlining (cfg, cmethod) &&
6371 !g_list_find (dont_inline, cmethod)) {
6373 gboolean allways = FALSE;
6375 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6376 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6377 /* Prevent inlining of methods that call wrappers */
6379 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6383 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6385 cfg->real_offset += 5;
6388 if (!MONO_TYPE_IS_VOID (fsig->ret))
6389 /* *sp is already set by inline_method */
6392 inline_costs += costs;
6398 inline_costs += 10 * num_calls++;
6400 /* Tail recursion elimination */
6401 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6402 gboolean has_vtargs = FALSE;
6405 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6408 /* keep it simple */
6409 for (i = fsig->param_count - 1; i >= 0; i--) {
6410 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6415 for (i = 0; i < n; ++i)
6416 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6417 MONO_INST_NEW (cfg, ins, OP_BR);
6418 MONO_ADD_INS (bblock, ins);
6419 tblock = start_bblock->out_bb [0];
6420 link_bblock (cfg, bblock, tblock);
6421 ins->inst_target_bb = tblock;
6422 start_new_bblock = 1;
6424 /* skip the CEE_RET, too */
6425 if (ip_in_bb (cfg, bblock, ip + 5))
6435 /* Generic sharing */
6436 /* FIXME: only do this for generic methods if
6437 they are not shared! */
6438 if (context_used && !imt_arg && !array_rank &&
6439 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6440 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6441 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6442 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6445 g_assert (cfg->generic_sharing_context && cmethod);
6449 * We are compiling a call to a
6450 * generic method from shared code,
6451 * which means that we have to look up
6452 * the method in the rgctx and do an
6455 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6458 /* Indirect calls */
6460 g_assert (!imt_arg);
6462 if (*ip == CEE_CALL)
6463 g_assert (context_used);
6464 else if (*ip == CEE_CALLI)
6465 g_assert (!vtable_arg);
6467 /* FIXME: what the hell is this??? */
6468 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6469 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6471 /* Prevent inlining of methods with indirect calls */
6475 #ifdef MONO_ARCH_RGCTX_REG
6477 int rgctx_reg = mono_alloc_preg (cfg);
6479 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6480 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6481 call = (MonoCallInst*)ins;
6482 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6483 cfg->uses_rgctx_reg = TRUE;
6484 call->rgctx_reg = TRUE;
6489 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6491 * Instead of emitting an indirect call, emit a direct call
6492 * with the contents of the aotconst as the patch info.
6494 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6496 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6497 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6500 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6503 if (!MONO_TYPE_IS_VOID (fsig->ret))
6504 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6515 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6516 if (sp [fsig->param_count]->type == STACK_OBJ) {
6517 MonoInst *iargs [2];
6520 iargs [1] = sp [fsig->param_count];
6522 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6525 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6526 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6527 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6528 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6530 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6533 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6534 if (!cmethod->klass->element_class->valuetype && !readonly)
6535 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6536 CHECK_TYPELOAD (cmethod->klass);
6539 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6542 g_assert_not_reached ();
6550 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6552 if (!MONO_TYPE_IS_VOID (fsig->ret))
6553 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6563 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6565 } else if (imt_arg) {
6566 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6568 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6571 if (!MONO_TYPE_IS_VOID (fsig->ret))
6572 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6579 if (cfg->method != method) {
6580 /* return from inlined method */
6582 * If in_count == 0, that means the ret is unreachable due to
6583 * being preceeded by a throw. In that case, inline_method () will
6584 * handle setting the return value
6585 * (test case: test_0_inline_throw ()).
6587 if (return_var && cfg->cbb->in_count) {
6591 //g_assert (returnvar != -1);
6592 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6593 cfg->ret_var_set = TRUE;
6597 MonoType *ret_type = mono_method_signature (method)->ret;
6601 * Place a seq point here too even through the IL stack is not
6602 * empty, so a step over on
6605 * will work correctly.
6607 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6608 MONO_ADD_INS (cfg->cbb, ins);
6611 g_assert (!return_var);
6614 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6617 if (!cfg->vret_addr) {
6620 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6622 EMIT_NEW_RETLOADA (cfg, ret_addr);
6624 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6625 ins->klass = mono_class_from_mono_type (ret_type);
6628 #ifdef MONO_ARCH_SOFT_FLOAT
6629 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6630 MonoInst *iargs [1];
6634 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6635 mono_arch_emit_setret (cfg, method, conv);
6637 mono_arch_emit_setret (cfg, method, *sp);
6640 mono_arch_emit_setret (cfg, method, *sp);
6645 if (sp != stack_start)
6647 MONO_INST_NEW (cfg, ins, OP_BR);
6649 ins->inst_target_bb = end_bblock;
6650 MONO_ADD_INS (bblock, ins);
6651 link_bblock (cfg, bblock, end_bblock);
6652 start_new_bblock = 1;
6656 MONO_INST_NEW (cfg, ins, OP_BR);
6658 target = ip + 1 + (signed char)(*ip);
6660 GET_BBLOCK (cfg, tblock, target);
6661 link_bblock (cfg, bblock, tblock);
6662 ins->inst_target_bb = tblock;
6663 if (sp != stack_start) {
6664 handle_stack_args (cfg, stack_start, sp - stack_start);
6666 CHECK_UNVERIFIABLE (cfg);
6668 MONO_ADD_INS (bblock, ins);
6669 start_new_bblock = 1;
6670 inline_costs += BRANCH_COST;
6684 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6686 target = ip + 1 + *(signed char*)ip;
6692 inline_costs += BRANCH_COST;
6696 MONO_INST_NEW (cfg, ins, OP_BR);
6699 target = ip + 4 + (gint32)read32(ip);
6701 GET_BBLOCK (cfg, tblock, target);
6702 link_bblock (cfg, bblock, tblock);
6703 ins->inst_target_bb = tblock;
6704 if (sp != stack_start) {
6705 handle_stack_args (cfg, stack_start, sp - stack_start);
6707 CHECK_UNVERIFIABLE (cfg);
6710 MONO_ADD_INS (bblock, ins);
6712 start_new_bblock = 1;
6713 inline_costs += BRANCH_COST;
6720 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6721 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6722 guint32 opsize = is_short ? 1 : 4;
6724 CHECK_OPSIZE (opsize);
6726 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6729 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6734 GET_BBLOCK (cfg, tblock, target);
6735 link_bblock (cfg, bblock, tblock);
6736 GET_BBLOCK (cfg, tblock, ip);
6737 link_bblock (cfg, bblock, tblock);
6739 if (sp != stack_start) {
6740 handle_stack_args (cfg, stack_start, sp - stack_start);
6741 CHECK_UNVERIFIABLE (cfg);
6744 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6745 cmp->sreg1 = sp [0]->dreg;
6746 type_from_op (cmp, sp [0], NULL);
6749 #if SIZEOF_REGISTER == 4
6750 if (cmp->opcode == OP_LCOMPARE_IMM) {
6751 /* Convert it to OP_LCOMPARE */
6752 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6753 ins->type = STACK_I8;
6754 ins->dreg = alloc_dreg (cfg, STACK_I8);
6756 MONO_ADD_INS (bblock, ins);
6757 cmp->opcode = OP_LCOMPARE;
6758 cmp->sreg2 = ins->dreg;
6761 MONO_ADD_INS (bblock, cmp);
6763 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6764 type_from_op (ins, sp [0], NULL);
6765 MONO_ADD_INS (bblock, ins);
6766 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6767 GET_BBLOCK (cfg, tblock, target);
6768 ins->inst_true_bb = tblock;
6769 GET_BBLOCK (cfg, tblock, ip);
6770 ins->inst_false_bb = tblock;
6771 start_new_bblock = 2;
6774 inline_costs += BRANCH_COST;
6789 MONO_INST_NEW (cfg, ins, *ip);
6791 target = ip + 4 + (gint32)read32(ip);
6797 inline_costs += BRANCH_COST;
6801 MonoBasicBlock **targets;
6802 MonoBasicBlock *default_bblock;
6803 MonoJumpInfoBBTable *table;
6804 int offset_reg = alloc_preg (cfg);
6805 int target_reg = alloc_preg (cfg);
6806 int table_reg = alloc_preg (cfg);
6807 int sum_reg = alloc_preg (cfg);
6808 gboolean use_op_switch;
6812 n = read32 (ip + 1);
6815 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6819 CHECK_OPSIZE (n * sizeof (guint32));
6820 target = ip + n * sizeof (guint32);
6822 GET_BBLOCK (cfg, default_bblock, target);
6824 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6825 for (i = 0; i < n; ++i) {
6826 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6827 targets [i] = tblock;
6831 if (sp != stack_start) {
6833 * Link the current bb with the targets as well, so handle_stack_args
6834 * will set their in_stack correctly.
6836 link_bblock (cfg, bblock, default_bblock);
6837 for (i = 0; i < n; ++i)
6838 link_bblock (cfg, bblock, targets [i]);
6840 handle_stack_args (cfg, stack_start, sp - stack_start);
6842 CHECK_UNVERIFIABLE (cfg);
6845 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6849 for (i = 0; i < n; ++i)
6850 link_bblock (cfg, bblock, targets [i]);
6852 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6853 table->table = targets;
6854 table->table_size = n;
6856 use_op_switch = FALSE;
6858 /* ARM implements SWITCH statements differently */
6859 /* FIXME: Make it use the generic implementation */
6860 if (!cfg->compile_aot)
6861 use_op_switch = TRUE;
6864 if (COMPILE_LLVM (cfg))
6865 use_op_switch = TRUE;
6867 cfg->cbb->has_jump_table = 1;
6869 if (use_op_switch) {
6870 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6871 ins->sreg1 = src1->dreg;
6872 ins->inst_p0 = table;
6873 ins->inst_many_bb = targets;
6874 ins->klass = GUINT_TO_POINTER (n);
6875 MONO_ADD_INS (cfg->cbb, ins);
6877 if (sizeof (gpointer) == 8)
6878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6882 #if SIZEOF_REGISTER == 8
6883 /* The upper word might not be zero, and we add it to a 64 bit address later */
6884 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6887 if (cfg->compile_aot) {
6888 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6890 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6891 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6892 ins->inst_p0 = table;
6893 ins->dreg = table_reg;
6894 MONO_ADD_INS (cfg->cbb, ins);
6897 /* FIXME: Use load_memindex */
6898 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6899 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6900 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6902 start_new_bblock = 1;
6903 inline_costs += (BRANCH_COST * 2);
6923 dreg = alloc_freg (cfg);
6926 dreg = alloc_lreg (cfg);
6929 dreg = alloc_preg (cfg);
6932 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6933 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6934 ins->flags |= ins_flag;
6936 MONO_ADD_INS (bblock, ins);
6951 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6952 ins->flags |= ins_flag;
6954 MONO_ADD_INS (bblock, ins);
6956 #if HAVE_WRITE_BARRIERS
6957 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6958 /* insert call to write barrier */
6959 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6960 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6971 MONO_INST_NEW (cfg, ins, (*ip));
6973 ins->sreg1 = sp [0]->dreg;
6974 ins->sreg2 = sp [1]->dreg;
6975 type_from_op (ins, sp [0], sp [1]);
6977 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6979 /* Use the immediate opcodes if possible */
6980 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6981 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6982 if (imm_opcode != -1) {
6983 ins->opcode = imm_opcode;
6984 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6987 sp [1]->opcode = OP_NOP;
6991 MONO_ADD_INS ((cfg)->cbb, (ins));
6993 *sp++ = mono_decompose_opcode (cfg, ins);
7010 MONO_INST_NEW (cfg, ins, (*ip));
7012 ins->sreg1 = sp [0]->dreg;
7013 ins->sreg2 = sp [1]->dreg;
7014 type_from_op (ins, sp [0], sp [1]);
7016 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7017 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7019 /* FIXME: Pass opcode to is_inst_imm */
7021 /* Use the immediate opcodes if possible */
7022 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7025 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7026 if (imm_opcode != -1) {
7027 ins->opcode = imm_opcode;
7028 if (sp [1]->opcode == OP_I8CONST) {
7029 #if SIZEOF_REGISTER == 8
7030 ins->inst_imm = sp [1]->inst_l;
7032 ins->inst_ls_word = sp [1]->inst_ls_word;
7033 ins->inst_ms_word = sp [1]->inst_ms_word;
7037 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7040 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7041 if (sp [1]->next == NULL)
7042 sp [1]->opcode = OP_NOP;
7045 MONO_ADD_INS ((cfg)->cbb, (ins));
7047 *sp++ = mono_decompose_opcode (cfg, ins);
7060 case CEE_CONV_OVF_I8:
7061 case CEE_CONV_OVF_U8:
7065 /* Special case this earlier so we have long constants in the IR */
7066 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7067 int data = sp [-1]->inst_c0;
7068 sp [-1]->opcode = OP_I8CONST;
7069 sp [-1]->type = STACK_I8;
7070 #if SIZEOF_REGISTER == 8
7071 if ((*ip) == CEE_CONV_U8)
7072 sp [-1]->inst_c0 = (guint32)data;
7074 sp [-1]->inst_c0 = data;
7076 sp [-1]->inst_ls_word = data;
7077 if ((*ip) == CEE_CONV_U8)
7078 sp [-1]->inst_ms_word = 0;
7080 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7082 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7089 case CEE_CONV_OVF_I4:
7090 case CEE_CONV_OVF_I1:
7091 case CEE_CONV_OVF_I2:
7092 case CEE_CONV_OVF_I:
7093 case CEE_CONV_OVF_U:
7096 if (sp [-1]->type == STACK_R8) {
7097 ADD_UNOP (CEE_CONV_OVF_I8);
7104 case CEE_CONV_OVF_U1:
7105 case CEE_CONV_OVF_U2:
7106 case CEE_CONV_OVF_U4:
7109 if (sp [-1]->type == STACK_R8) {
7110 ADD_UNOP (CEE_CONV_OVF_U8);
7117 case CEE_CONV_OVF_I1_UN:
7118 case CEE_CONV_OVF_I2_UN:
7119 case CEE_CONV_OVF_I4_UN:
7120 case CEE_CONV_OVF_I8_UN:
7121 case CEE_CONV_OVF_U1_UN:
7122 case CEE_CONV_OVF_U2_UN:
7123 case CEE_CONV_OVF_U4_UN:
7124 case CEE_CONV_OVF_U8_UN:
7125 case CEE_CONV_OVF_I_UN:
7126 case CEE_CONV_OVF_U_UN:
7136 case CEE_ADD_OVF_UN:
7138 case CEE_MUL_OVF_UN:
7140 case CEE_SUB_OVF_UN:
7148 token = read32 (ip + 1);
7149 klass = mini_get_class (method, token, generic_context);
7150 CHECK_TYPELOAD (klass);
7152 if (generic_class_is_reference_type (cfg, klass)) {
7153 MonoInst *store, *load;
7154 int dreg = alloc_preg (cfg);
7156 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7157 load->flags |= ins_flag;
7158 MONO_ADD_INS (cfg->cbb, load);
7160 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7161 store->flags |= ins_flag;
7162 MONO_ADD_INS (cfg->cbb, store);
7164 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7176 token = read32 (ip + 1);
7177 klass = mini_get_class (method, token, generic_context);
7178 CHECK_TYPELOAD (klass);
7180 /* Optimize the common ldobj+stloc combination */
7190 loc_index = ip [5] - CEE_STLOC_0;
7197 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7198 CHECK_LOCAL (loc_index);
7200 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7201 ins->dreg = cfg->locals [loc_index]->dreg;
7207 /* Optimize the ldobj+stobj combination */
7208 /* The reference case ends up being a load+store anyway */
7209 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7214 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7221 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7230 CHECK_STACK_OVF (1);
7232 n = read32 (ip + 1);
7234 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7235 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7236 ins->type = STACK_OBJ;
7239 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7240 MonoInst *iargs [1];
7242 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7243 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7245 if (cfg->opt & MONO_OPT_SHARED) {
7246 MonoInst *iargs [3];
7248 if (cfg->compile_aot) {
7249 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7251 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7252 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7253 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7254 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7255 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7257 if (bblock->out_of_line) {
7258 MonoInst *iargs [2];
7260 if (image == mono_defaults.corlib) {
7262 * Avoid relocations in AOT and save some space by using a
7263 * version of helper_ldstr specialized to mscorlib.
7265 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7266 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7268 /* Avoid creating the string object */
7269 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7270 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7271 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7275 if (cfg->compile_aot) {
7276 NEW_LDSTRCONST (cfg, ins, image, n);
7278 MONO_ADD_INS (bblock, ins);
7281 NEW_PCONST (cfg, ins, NULL);
7282 ins->type = STACK_OBJ;
7283 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7285 MONO_ADD_INS (bblock, ins);
7294 MonoInst *iargs [2];
7295 MonoMethodSignature *fsig;
7298 MonoInst *vtable_arg = NULL;
7301 token = read32 (ip + 1);
7302 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7305 fsig = mono_method_get_signature (cmethod, image, token);
7309 mono_save_token_info (cfg, image, token, cmethod);
7311 if (!mono_class_init (cmethod->klass))
7314 if (cfg->generic_sharing_context)
7315 context_used = mono_method_check_context_used (cmethod);
7317 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7318 if (check_linkdemand (cfg, method, cmethod))
7320 CHECK_CFG_EXCEPTION;
7321 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7322 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7325 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7326 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7327 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7328 mono_class_vtable (cfg->domain, cmethod->klass);
7329 CHECK_TYPELOAD (cmethod->klass);
7331 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7332 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7335 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7336 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7338 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7340 CHECK_TYPELOAD (cmethod->klass);
7341 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7346 n = fsig->param_count;
7350 * Generate smaller code for the common newobj <exception> instruction in
7351 * argument checking code.
7353 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7354 is_exception_class (cmethod->klass) && n <= 2 &&
7355 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7356 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7357 MonoInst *iargs [3];
7359 g_assert (!vtable_arg);
7363 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7366 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7370 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7375 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7378 g_assert_not_reached ();
7386 /* move the args to allow room for 'this' in the first position */
7392 /* check_call_signature () requires sp[0] to be set */
7393 this_ins.type = STACK_OBJ;
7395 if (check_call_signature (cfg, fsig, sp))
7400 if (mini_class_is_system_array (cmethod->klass)) {
7401 g_assert (!vtable_arg);
7403 *sp = emit_get_rgctx_method (cfg, context_used,
7404 cmethod, MONO_RGCTX_INFO_METHOD);
7406 /* Avoid varargs in the common case */
7407 if (fsig->param_count == 1)
7408 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7409 else if (fsig->param_count == 2)
7410 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7411 else if (fsig->param_count == 3)
7412 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7414 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7415 } else if (cmethod->string_ctor) {
7416 g_assert (!context_used);
7417 g_assert (!vtable_arg);
7418 /* we simply pass a null pointer */
7419 EMIT_NEW_PCONST (cfg, *sp, NULL);
7420 /* now call the string ctor */
7421 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7423 MonoInst* callvirt_this_arg = NULL;
7425 if (cmethod->klass->valuetype) {
7426 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7427 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7428 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7433 * The code generated by mini_emit_virtual_call () expects
7434 * iargs [0] to be a boxed instance, but luckily the vcall
7435 * will be transformed into a normal call there.
7437 } else if (context_used) {
7441 if (cfg->opt & MONO_OPT_SHARED)
7442 rgctx_info = MONO_RGCTX_INFO_KLASS;
7444 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7445 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7447 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7450 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7452 CHECK_TYPELOAD (cmethod->klass);
7455 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7456 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7457 * As a workaround, we call class cctors before allocating objects.
7459 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7460 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7461 if (cfg->verbose_level > 2)
7462 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7463 class_inits = g_slist_prepend (class_inits, vtable);
7466 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7469 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7472 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7474 /* Now call the actual ctor */
7475 /* Avoid virtual calls to ctors if possible */
7476 if (cmethod->klass->marshalbyref)
7477 callvirt_this_arg = sp [0];
7479 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7480 mono_method_check_inlining (cfg, cmethod) &&
7481 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7482 !g_list_find (dont_inline, cmethod)) {
7485 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7486 cfg->real_offset += 5;
7489 inline_costs += costs - 5;
7492 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7494 } else if (context_used &&
7495 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7496 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7497 MonoInst *cmethod_addr;
7499 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7500 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7502 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7505 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7506 callvirt_this_arg, NULL, vtable_arg);
7510 if (alloc == NULL) {
7512 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7513 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7527 token = read32 (ip + 1);
7528 klass = mini_get_class (method, token, generic_context);
7529 CHECK_TYPELOAD (klass);
7530 if (sp [0]->type != STACK_OBJ)
7533 if (cfg->generic_sharing_context)
7534 context_used = mono_class_check_context_used (klass);
7536 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7543 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7545 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7549 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7550 MonoMethod *mono_castclass;
7551 MonoInst *iargs [1];
7554 mono_castclass = mono_marshal_get_castclass (klass);
7557 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7558 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7559 g_assert (costs > 0);
7562 cfg->real_offset += 5;
7567 inline_costs += costs;
7570 ins = handle_castclass (cfg, klass, *sp, context_used);
7571 CHECK_CFG_EXCEPTION;
7581 token = read32 (ip + 1);
7582 klass = mini_get_class (method, token, generic_context);
7583 CHECK_TYPELOAD (klass);
7584 if (sp [0]->type != STACK_OBJ)
7587 if (cfg->generic_sharing_context)
7588 context_used = mono_class_check_context_used (klass);
7590 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7597 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7599 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7603 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7604 MonoMethod *mono_isinst;
7605 MonoInst *iargs [1];
7608 mono_isinst = mono_marshal_get_isinst (klass);
7611 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7612 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7613 g_assert (costs > 0);
7616 cfg->real_offset += 5;
7621 inline_costs += costs;
7624 ins = handle_isinst (cfg, klass, *sp, context_used);
7625 CHECK_CFG_EXCEPTION;
7632 case CEE_UNBOX_ANY: {
7636 token = read32 (ip + 1);
7637 klass = mini_get_class (method, token, generic_context);
7638 CHECK_TYPELOAD (klass);
7640 mono_save_token_info (cfg, image, token, klass);
7642 if (cfg->generic_sharing_context)
7643 context_used = mono_class_check_context_used (klass);
7645 if (generic_class_is_reference_type (cfg, klass)) {
7646 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7648 MonoInst *iargs [2];
7653 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7654 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7658 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7659 MonoMethod *mono_castclass;
7660 MonoInst *iargs [1];
7663 mono_castclass = mono_marshal_get_castclass (klass);
7666 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7667 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7669 g_assert (costs > 0);
7672 cfg->real_offset += 5;
7676 inline_costs += costs;
7678 ins = handle_castclass (cfg, klass, *sp, 0);
7679 CHECK_CFG_EXCEPTION;
7687 if (mono_class_is_nullable (klass)) {
7688 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7695 ins = handle_unbox (cfg, klass, sp, context_used);
7701 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7714 token = read32 (ip + 1);
7715 klass = mini_get_class (method, token, generic_context);
7716 CHECK_TYPELOAD (klass);
7718 mono_save_token_info (cfg, image, token, klass);
7720 if (cfg->generic_sharing_context)
7721 context_used = mono_class_check_context_used (klass);
7723 if (generic_class_is_reference_type (cfg, klass)) {
7729 if (klass == mono_defaults.void_class)
7731 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7733 /* frequent check in generic code: box (struct), brtrue */
7734 if (!mono_class_is_nullable (klass) &&
7735 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7736 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7738 MONO_INST_NEW (cfg, ins, OP_BR);
7739 if (*ip == CEE_BRTRUE_S) {
7742 target = ip + 1 + (signed char)(*ip);
7747 target = ip + 4 + (gint)(read32 (ip));
7750 GET_BBLOCK (cfg, tblock, target);
7751 link_bblock (cfg, bblock, tblock);
7752 ins->inst_target_bb = tblock;
7753 GET_BBLOCK (cfg, tblock, ip);
7755 * This leads to some inconsistency, since the two bblocks are
7756 * not really connected, but it is needed for handling stack
7757 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7758 * FIXME: This should only be needed if sp != stack_start, but that
7759 * doesn't work for some reason (test failure in mcs/tests on x86).
7761 link_bblock (cfg, bblock, tblock);
7762 if (sp != stack_start) {
7763 handle_stack_args (cfg, stack_start, sp - stack_start);
7765 CHECK_UNVERIFIABLE (cfg);
7767 MONO_ADD_INS (bblock, ins);
7768 start_new_bblock = 1;
7776 if (cfg->opt & MONO_OPT_SHARED)
7777 rgctx_info = MONO_RGCTX_INFO_KLASS;
7779 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7780 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7781 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7783 *sp++ = handle_box (cfg, val, klass);
7786 CHECK_CFG_EXCEPTION;
7795 token = read32 (ip + 1);
7796 klass = mini_get_class (method, token, generic_context);
7797 CHECK_TYPELOAD (klass);
7799 mono_save_token_info (cfg, image, token, klass);
7801 if (cfg->generic_sharing_context)
7802 context_used = mono_class_check_context_used (klass);
7804 if (mono_class_is_nullable (klass)) {
7807 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7808 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7812 ins = handle_unbox (cfg, klass, sp, context_used);
7822 MonoClassField *field;
7826 if (*ip == CEE_STFLD) {
7833 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7835 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7838 token = read32 (ip + 1);
7839 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7840 field = mono_method_get_wrapper_data (method, token);
7841 klass = field->parent;
7844 field = mono_field_from_token (image, token, &klass, generic_context);
7848 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7849 FIELD_ACCESS_FAILURE;
7850 mono_class_init (klass);
7852 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7853 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7854 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7855 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7858 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7859 if (*ip == CEE_STFLD) {
7860 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7862 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7863 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7864 MonoInst *iargs [5];
7867 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7868 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7869 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7873 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7874 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7875 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7876 g_assert (costs > 0);
7878 cfg->real_offset += 5;
7881 inline_costs += costs;
7883 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7888 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7890 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7892 #if HAVE_WRITE_BARRIERS
7893 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7894 /* insert call to write barrier */
7895 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7896 MonoInst *iargs [2];
7899 dreg = alloc_preg (cfg);
7900 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7902 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7906 store->flags |= ins_flag;
7913 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7914 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7915 MonoInst *iargs [4];
7918 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7919 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7920 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7921 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7922 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7923 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7925 g_assert (costs > 0);
7927 cfg->real_offset += 5;
7931 inline_costs += costs;
7933 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7937 if (sp [0]->type == STACK_VTYPE) {
7940 /* Have to compute the address of the variable */
7942 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7944 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7946 g_assert (var->klass == klass);
7948 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7952 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7954 if (*ip == CEE_LDFLDA) {
7955 dreg = alloc_preg (cfg);
7957 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7958 ins->klass = mono_class_from_mono_type (field->type);
7959 ins->type = STACK_MP;
7964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7965 load->flags |= ins_flag;
7966 load->flags |= MONO_INST_FAULT;
7977 MonoClassField *field;
7978 gpointer addr = NULL;
7979 gboolean is_special_static;
7982 token = read32 (ip + 1);
7984 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7985 field = mono_method_get_wrapper_data (method, token);
7986 klass = field->parent;
7989 field = mono_field_from_token (image, token, &klass, generic_context);
7992 mono_class_init (klass);
7993 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7994 FIELD_ACCESS_FAILURE;
7996 /* if the class is Critical then transparent code cannot access it's fields */
7997 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7998 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8001 * We can only support shared generic static
8002 * field access on architectures where the
8003 * trampoline code has been extended to handle
8004 * the generic class init.
8006 #ifndef MONO_ARCH_VTABLE_REG
8007 GENERIC_SHARING_FAILURE (*ip);
8010 if (cfg->generic_sharing_context)
8011 context_used = mono_class_check_context_used (klass);
8013 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8015 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8016 * to be called here.
8018 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8019 mono_class_vtable (cfg->domain, klass);
8020 CHECK_TYPELOAD (klass);
8022 mono_domain_lock (cfg->domain);
8023 if (cfg->domain->special_static_fields)
8024 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8025 mono_domain_unlock (cfg->domain);
8027 is_special_static = mono_class_field_is_special_static (field);
8029 /* Generate IR to compute the field address */
8031 if ((cfg->opt & MONO_OPT_SHARED) ||
8032 (cfg->compile_aot && is_special_static) ||
8033 (context_used && is_special_static)) {
8034 MonoInst *iargs [2];
8036 g_assert (field->parent);
8037 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8039 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8040 field, MONO_RGCTX_INFO_CLASS_FIELD);
8042 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8044 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8045 } else if (context_used) {
8046 MonoInst *static_data;
8049 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8050 method->klass->name_space, method->klass->name, method->name,
8051 depth, field->offset);
8054 if (mono_class_needs_cctor_run (klass, method)) {
8058 vtable = emit_get_rgctx_klass (cfg, context_used,
8059 klass, MONO_RGCTX_INFO_VTABLE);
8061 // FIXME: This doesn't work since it tries to pass the argument
8062 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8064 * The vtable pointer is always passed in a register regardless of
8065 * the calling convention, so assign it manually, and make a call
8066 * using a signature without parameters.
8068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8069 #ifdef MONO_ARCH_VTABLE_REG
8070 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8071 cfg->uses_vtable_reg = TRUE;
8078 * The pointer we're computing here is
8080 * super_info.static_data + field->offset
8082 static_data = emit_get_rgctx_klass (cfg, context_used,
8083 klass, MONO_RGCTX_INFO_STATIC_DATA);
8085 if (field->offset == 0) {
8088 int addr_reg = mono_alloc_preg (cfg);
8089 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8091 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8092 MonoInst *iargs [2];
8094 g_assert (field->parent);
8095 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8096 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8097 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8099 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8101 CHECK_TYPELOAD (klass);
8103 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8104 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8105 if (cfg->verbose_level > 2)
8106 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8107 class_inits = g_slist_prepend (class_inits, vtable);
8109 if (cfg->run_cctors) {
8111 /* This makes so that inline cannot trigger */
8112 /* .cctors: too many apps depend on them */
8113 /* running with a specific order... */
8114 if (! vtable->initialized)
8116 ex = mono_runtime_class_init_full (vtable, FALSE);
8118 set_exception_object (cfg, ex);
8119 goto exception_exit;
8123 addr = (char*)vtable->data + field->offset;
8125 if (cfg->compile_aot)
8126 EMIT_NEW_SFLDACONST (cfg, ins, field);
8128 EMIT_NEW_PCONST (cfg, ins, addr);
8131 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8132 * This could be later optimized to do just a couple of
8133 * memory dereferences with constant offsets.
8135 if (!cfg->compile_aot && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg)) {
8137 * Fast access to TLS data
8138 * Inline version of get_thread_static_data () in
8142 int idx, static_data_reg, array_reg, dreg;
8143 MonoInst *thread_ins;
8145 offset = (gsize)addr & 0x7fffffff;
8146 idx = (offset >> 24) - 1;
8148 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8149 thread_ins = mono_get_thread_intrinsic (cfg);
8150 MONO_ADD_INS (cfg->cbb, thread_ins);
8151 static_data_reg = alloc_ireg (cfg);
8152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8153 array_reg = alloc_ireg (cfg);
8154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8155 dreg = alloc_ireg (cfg);
8156 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8158 MonoInst *iargs [1];
8159 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8160 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8165 /* Generate IR to do the actual load/store operation */
8167 if (*ip == CEE_LDSFLDA) {
8168 ins->klass = mono_class_from_mono_type (field->type);
8169 ins->type = STACK_PTR;
8171 } else if (*ip == CEE_STSFLD) {
8176 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8177 store->flags |= ins_flag;
8179 gboolean is_const = FALSE;
8180 MonoVTable *vtable = NULL;
8182 if (!context_used) {
8183 vtable = mono_class_vtable (cfg->domain, klass);
8184 CHECK_TYPELOAD (klass);
8186 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8187 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8188 gpointer addr = (char*)vtable->data + field->offset;
8189 int ro_type = field->type->type;
8190 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8191 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8193 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8196 case MONO_TYPE_BOOLEAN:
8198 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8202 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8205 case MONO_TYPE_CHAR:
8207 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8211 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8216 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8220 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8223 #ifndef HAVE_MOVING_COLLECTOR
8226 case MONO_TYPE_STRING:
8227 case MONO_TYPE_OBJECT:
8228 case MONO_TYPE_CLASS:
8229 case MONO_TYPE_SZARRAY:
8231 case MONO_TYPE_FNPTR:
8232 case MONO_TYPE_ARRAY:
8233 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8234 type_to_eval_stack_type ((cfg), field->type, *sp);
8240 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8245 case MONO_TYPE_VALUETYPE:
8255 CHECK_STACK_OVF (1);
8257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8258 load->flags |= ins_flag;
8271 token = read32 (ip + 1);
8272 klass = mini_get_class (method, token, generic_context);
8273 CHECK_TYPELOAD (klass);
8274 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8275 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8286 const char *data_ptr;
8288 guint32 field_token;
8294 token = read32 (ip + 1);
8296 klass = mini_get_class (method, token, generic_context);
8297 CHECK_TYPELOAD (klass);
8299 if (cfg->generic_sharing_context)
8300 context_used = mono_class_check_context_used (klass);
8302 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8303 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8304 ins->sreg1 = sp [0]->dreg;
8305 ins->type = STACK_I4;
8306 ins->dreg = alloc_ireg (cfg);
8307 MONO_ADD_INS (cfg->cbb, ins);
8308 *sp = mono_decompose_opcode (cfg, ins);
8313 MonoClass *array_class = mono_array_class_get (klass, 1);
8314 /* FIXME: we cannot get a managed
8315 allocator because we can't get the
8316 open generic class's vtable. We
8317 have the same problem in
8318 handle_alloc_from_inst(). This
8319 needs to be solved so that we can
8320 have managed allocs of shared
8323 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8324 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8326 MonoMethod *managed_alloc = NULL;
8328 /* FIXME: Decompose later to help abcrem */
8331 args [0] = emit_get_rgctx_klass (cfg, context_used,
8332 array_class, MONO_RGCTX_INFO_VTABLE);
8337 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8339 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8341 if (cfg->opt & MONO_OPT_SHARED) {
8342 /* Decompose now to avoid problems with references to the domainvar */
8343 MonoInst *iargs [3];
8345 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8346 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8349 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8351 /* Decompose later since it is needed by abcrem */
8352 MonoClass *array_type = mono_array_class_get (klass, 1);
8353 mono_class_vtable (cfg->domain, array_type);
8354 CHECK_TYPELOAD (array_type);
8356 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8357 ins->dreg = alloc_preg (cfg);
8358 ins->sreg1 = sp [0]->dreg;
8359 ins->inst_newa_class = klass;
8360 ins->type = STACK_OBJ;
8362 MONO_ADD_INS (cfg->cbb, ins);
8363 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8364 cfg->cbb->has_array_access = TRUE;
8366 /* Needed so mono_emit_load_get_addr () gets called */
8367 mono_get_got_var (cfg);
8377 * we inline/optimize the initialization sequence if possible.
8378 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8379 * for small sizes open code the memcpy
8380 * ensure the rva field is big enough
8382 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8383 MonoMethod *memcpy_method = get_memcpy_method ();
8384 MonoInst *iargs [3];
8385 int add_reg = alloc_preg (cfg);
8387 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8388 if (cfg->compile_aot) {
8389 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8391 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8393 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8394 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8403 if (sp [0]->type != STACK_OBJ)
8406 dreg = alloc_preg (cfg);
8407 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8408 ins->dreg = alloc_preg (cfg);
8409 ins->sreg1 = sp [0]->dreg;
8410 ins->type = STACK_I4;
8411 MONO_ADD_INS (cfg->cbb, ins);
8412 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8413 cfg->cbb->has_array_access = TRUE;
8421 if (sp [0]->type != STACK_OBJ)
8424 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8426 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8427 CHECK_TYPELOAD (klass);
8428 /* we need to make sure that this array is exactly the type it needs
8429 * to be for correctness. the wrappers are lax with their usage
8430 * so we need to ignore them here
8432 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8433 MonoClass *array_class = mono_array_class_get (klass, 1);
8434 mini_emit_check_array_type (cfg, sp [0], array_class);
8435 CHECK_TYPELOAD (array_class);
8439 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8454 case CEE_LDELEM_REF: {
8460 if (*ip == CEE_LDELEM) {
8462 token = read32 (ip + 1);
8463 klass = mini_get_class (method, token, generic_context);
8464 CHECK_TYPELOAD (klass);
8465 mono_class_init (klass);
8468 klass = array_access_to_klass (*ip);
8470 if (sp [0]->type != STACK_OBJ)
8473 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8475 if (sp [1]->opcode == OP_ICONST) {
8476 int array_reg = sp [0]->dreg;
8477 int index_reg = sp [1]->dreg;
8478 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8480 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8481 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8483 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8487 if (*ip == CEE_LDELEM)
8500 case CEE_STELEM_REF:
8507 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8509 if (*ip == CEE_STELEM) {
8511 token = read32 (ip + 1);
8512 klass = mini_get_class (method, token, generic_context);
8513 CHECK_TYPELOAD (klass);
8514 mono_class_init (klass);
8517 klass = array_access_to_klass (*ip);
8519 if (sp [0]->type != STACK_OBJ)
8522 /* storing a NULL doesn't need any of the complex checks in stelemref */
8523 if (generic_class_is_reference_type (cfg, klass) &&
8524 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8525 MonoMethod* helper = mono_marshal_get_stelemref ();
8526 MonoInst *iargs [3];
8528 if (sp [0]->type != STACK_OBJ)
8530 if (sp [2]->type != STACK_OBJ)
8537 mono_emit_method_call (cfg, helper, iargs, NULL);
8539 if (sp [1]->opcode == OP_ICONST) {
8540 int array_reg = sp [0]->dreg;
8541 int index_reg = sp [1]->dreg;
8542 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8544 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8545 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8547 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8548 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8552 if (*ip == CEE_STELEM)
8559 case CEE_CKFINITE: {
8563 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8564 ins->sreg1 = sp [0]->dreg;
8565 ins->dreg = alloc_freg (cfg);
8566 ins->type = STACK_R8;
8567 MONO_ADD_INS (bblock, ins);
8569 *sp++ = mono_decompose_opcode (cfg, ins);
8574 case CEE_REFANYVAL: {
8575 MonoInst *src_var, *src;
8577 int klass_reg = alloc_preg (cfg);
8578 int dreg = alloc_preg (cfg);
8581 MONO_INST_NEW (cfg, ins, *ip);
8584 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8585 CHECK_TYPELOAD (klass);
8586 mono_class_init (klass);
8588 if (cfg->generic_sharing_context)
8589 context_used = mono_class_check_context_used (klass);
8592 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8594 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8595 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8599 MonoInst *klass_ins;
8601 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8602 klass, MONO_RGCTX_INFO_KLASS);
8605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8606 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8608 mini_emit_class_check (cfg, klass_reg, klass);
8610 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8611 ins->type = STACK_MP;
8616 case CEE_MKREFANY: {
8617 MonoInst *loc, *addr;
8620 MONO_INST_NEW (cfg, ins, *ip);
8623 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8624 CHECK_TYPELOAD (klass);
8625 mono_class_init (klass);
8627 if (cfg->generic_sharing_context)
8628 context_used = mono_class_check_context_used (klass);
8630 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8631 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8634 MonoInst *const_ins;
8635 int type_reg = alloc_preg (cfg);
8637 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8638 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8640 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8641 } else if (cfg->compile_aot) {
8642 int const_reg = alloc_preg (cfg);
8643 int type_reg = alloc_preg (cfg);
8645 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8648 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8650 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8653 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8655 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8656 ins->type = STACK_VTYPE;
8657 ins->klass = mono_defaults.typed_reference_class;
8664 MonoClass *handle_class;
8666 CHECK_STACK_OVF (1);
8669 n = read32 (ip + 1);
8671 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8672 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8673 handle = mono_method_get_wrapper_data (method, n);
8674 handle_class = mono_method_get_wrapper_data (method, n + 1);
8675 if (handle_class == mono_defaults.typehandle_class)
8676 handle = &((MonoClass*)handle)->byval_arg;
8679 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8683 mono_class_init (handle_class);
8684 if (cfg->generic_sharing_context) {
8685 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8686 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8687 /* This case handles ldtoken
8688 of an open type, like for
8691 } else if (handle_class == mono_defaults.typehandle_class) {
8692 /* If we get a MONO_TYPE_CLASS
8693 then we need to provide the
8695 instantiation of it. */
8696 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8699 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8700 } else if (handle_class == mono_defaults.fieldhandle_class)
8701 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8702 else if (handle_class == mono_defaults.methodhandle_class)
8703 context_used = mono_method_check_context_used (handle);
8705 g_assert_not_reached ();
8708 if ((cfg->opt & MONO_OPT_SHARED) &&
8709 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8710 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8711 MonoInst *addr, *vtvar, *iargs [3];
8712 int method_context_used;
8714 if (cfg->generic_sharing_context)
8715 method_context_used = mono_method_check_context_used (method);
8717 method_context_used = 0;
8719 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8721 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8722 EMIT_NEW_ICONST (cfg, iargs [1], n);
8723 if (method_context_used) {
8724 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8725 method, MONO_RGCTX_INFO_METHOD);
8726 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8728 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8729 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8731 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8735 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8737 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8738 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8739 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8740 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8741 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8742 MonoClass *tclass = mono_class_from_mono_type (handle);
8744 mono_class_init (tclass);
8746 ins = emit_get_rgctx_klass (cfg, context_used,
8747 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8748 } else if (cfg->compile_aot) {
8749 if (method->wrapper_type) {
8750 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8751 /* Special case for static synchronized wrappers */
8752 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8754 /* FIXME: n is not a normal token */
8755 cfg->disable_aot = TRUE;
8756 EMIT_NEW_PCONST (cfg, ins, NULL);
8759 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8762 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8764 ins->type = STACK_OBJ;
8765 ins->klass = cmethod->klass;
8768 MonoInst *addr, *vtvar;
8770 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8773 if (handle_class == mono_defaults.typehandle_class) {
8774 ins = emit_get_rgctx_klass (cfg, context_used,
8775 mono_class_from_mono_type (handle),
8776 MONO_RGCTX_INFO_TYPE);
8777 } else if (handle_class == mono_defaults.methodhandle_class) {
8778 ins = emit_get_rgctx_method (cfg, context_used,
8779 handle, MONO_RGCTX_INFO_METHOD);
8780 } else if (handle_class == mono_defaults.fieldhandle_class) {
8781 ins = emit_get_rgctx_field (cfg, context_used,
8782 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8784 g_assert_not_reached ();
8786 } else if (cfg->compile_aot) {
8787 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8789 EMIT_NEW_PCONST (cfg, ins, handle);
8791 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8793 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8803 MONO_INST_NEW (cfg, ins, OP_THROW);
8805 ins->sreg1 = sp [0]->dreg;
8807 bblock->out_of_line = TRUE;
8808 MONO_ADD_INS (bblock, ins);
8809 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8810 MONO_ADD_INS (bblock, ins);
8813 link_bblock (cfg, bblock, end_bblock);
8814 start_new_bblock = 1;
8816 case CEE_ENDFINALLY:
8817 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8818 MONO_ADD_INS (bblock, ins);
8820 start_new_bblock = 1;
8823 * Control will leave the method so empty the stack, otherwise
8824 * the next basic block will start with a nonempty stack.
8826 while (sp != stack_start) {
8834 if (*ip == CEE_LEAVE) {
8836 target = ip + 5 + (gint32)read32(ip + 1);
8839 target = ip + 2 + (signed char)(ip [1]);
8842 /* empty the stack */
8843 while (sp != stack_start) {
8848 * If this leave statement is in a catch block, check for a
8849 * pending exception, and rethrow it if necessary.
8850 * We avoid doing this in runtime invoke wrappers, since those are called
8851 * by native code which excepts the wrapper to catch all exceptions.
8853 for (i = 0; i < header->num_clauses; ++i) {
8854 MonoExceptionClause *clause = &header->clauses [i];
8857 * Use <= in the final comparison to handle clauses with multiple
8858 * leave statements, like in bug #78024.
8859 * The ordering of the exception clauses guarantees that we find the
8862 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8864 MonoBasicBlock *dont_throw;
8869 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8872 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8874 NEW_BBLOCK (cfg, dont_throw);
8877 * Currently, we allways rethrow the abort exception, despite the
8878 * fact that this is not correct. See thread6.cs for an example.
8879 * But propagating the abort exception is more important than
8880 * getting the sematics right.
8882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8883 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8884 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8886 MONO_START_BB (cfg, dont_throw);
8891 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8893 for (tmp = handlers; tmp; tmp = tmp->next) {
8895 link_bblock (cfg, bblock, tblock);
8896 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8897 ins->inst_target_bb = tblock;
8898 MONO_ADD_INS (bblock, ins);
8899 bblock->has_call_handler = 1;
8900 if (COMPILE_LLVM (cfg)) {
8901 MonoBasicBlock *target_bb;
8904 * Link the finally bblock with the target, since it will
8905 * conceptually branch there.
8906 * FIXME: Have to link the bblock containing the endfinally.
8908 GET_BBLOCK (cfg, target_bb, target);
8909 link_bblock (cfg, tblock, target_bb);
8912 g_list_free (handlers);
8915 MONO_INST_NEW (cfg, ins, OP_BR);
8916 MONO_ADD_INS (bblock, ins);
8917 GET_BBLOCK (cfg, tblock, target);
8918 link_bblock (cfg, bblock, tblock);
8919 ins->inst_target_bb = tblock;
8920 start_new_bblock = 1;
8922 if (*ip == CEE_LEAVE)
8931 * Mono specific opcodes
8933 case MONO_CUSTOM_PREFIX: {
8935 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8939 case CEE_MONO_ICALL: {
8941 MonoJitICallInfo *info;
8943 token = read32 (ip + 2);
8944 func = mono_method_get_wrapper_data (method, token);
8945 info = mono_find_jit_icall_by_addr (func);
8948 CHECK_STACK (info->sig->param_count);
8949 sp -= info->sig->param_count;
8951 ins = mono_emit_jit_icall (cfg, info->func, sp);
8952 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8956 inline_costs += 10 * num_calls++;
8960 case CEE_MONO_LDPTR: {
8963 CHECK_STACK_OVF (1);
8965 token = read32 (ip + 2);
8967 ptr = mono_method_get_wrapper_data (method, token);
8968 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8969 MonoJitICallInfo *callinfo;
8970 const char *icall_name;
8972 icall_name = method->name + strlen ("__icall_wrapper_");
8973 g_assert (icall_name);
8974 callinfo = mono_find_jit_icall_by_name (icall_name);
8975 g_assert (callinfo);
8977 if (ptr == callinfo->func) {
8978 /* Will be transformed into an AOTCONST later */
8979 EMIT_NEW_PCONST (cfg, ins, ptr);
8985 /* FIXME: Generalize this */
8986 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8987 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8992 EMIT_NEW_PCONST (cfg, ins, ptr);
8995 inline_costs += 10 * num_calls++;
8996 /* Can't embed random pointers into AOT code */
8997 cfg->disable_aot = 1;
9000 case CEE_MONO_ICALL_ADDR: {
9001 MonoMethod *cmethod;
9004 CHECK_STACK_OVF (1);
9006 token = read32 (ip + 2);
9008 cmethod = mono_method_get_wrapper_data (method, token);
9010 if (cfg->compile_aot) {
9011 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9013 ptr = mono_lookup_internal_call (cmethod);
9015 EMIT_NEW_PCONST (cfg, ins, ptr);
9021 case CEE_MONO_VTADDR: {
9022 MonoInst *src_var, *src;
9028 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9029 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9034 case CEE_MONO_NEWOBJ: {
9035 MonoInst *iargs [2];
9037 CHECK_STACK_OVF (1);
9039 token = read32 (ip + 2);
9040 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9041 mono_class_init (klass);
9042 NEW_DOMAINCONST (cfg, iargs [0]);
9043 MONO_ADD_INS (cfg->cbb, iargs [0]);
9044 NEW_CLASSCONST (cfg, iargs [1], klass);
9045 MONO_ADD_INS (cfg->cbb, iargs [1]);
9046 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9048 inline_costs += 10 * num_calls++;
9051 case CEE_MONO_OBJADDR:
9054 MONO_INST_NEW (cfg, ins, OP_MOVE);
9055 ins->dreg = alloc_preg (cfg);
9056 ins->sreg1 = sp [0]->dreg;
9057 ins->type = STACK_MP;
9058 MONO_ADD_INS (cfg->cbb, ins);
9062 case CEE_MONO_LDNATIVEOBJ:
9064 * Similar to LDOBJ, but instead load the unmanaged
9065 * representation of the vtype to the stack.
9070 token = read32 (ip + 2);
9071 klass = mono_method_get_wrapper_data (method, token);
9072 g_assert (klass->valuetype);
9073 mono_class_init (klass);
9076 MonoInst *src, *dest, *temp;
9079 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9080 temp->backend.is_pinvoke = 1;
9081 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9082 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9084 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9085 dest->type = STACK_VTYPE;
9086 dest->klass = klass;
9092 case CEE_MONO_RETOBJ: {
9094 * Same as RET, but return the native representation of a vtype
9097 g_assert (cfg->ret);
9098 g_assert (mono_method_signature (method)->pinvoke);
9103 token = read32 (ip + 2);
9104 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9106 if (!cfg->vret_addr) {
9107 g_assert (cfg->ret_var_is_local);
9109 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9111 EMIT_NEW_RETLOADA (cfg, ins);
9113 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9115 if (sp != stack_start)
9118 MONO_INST_NEW (cfg, ins, OP_BR);
9119 ins->inst_target_bb = end_bblock;
9120 MONO_ADD_INS (bblock, ins);
9121 link_bblock (cfg, bblock, end_bblock);
9122 start_new_bblock = 1;
9126 case CEE_MONO_CISINST:
9127 case CEE_MONO_CCASTCLASS: {
9132 token = read32 (ip + 2);
9133 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9134 if (ip [1] == CEE_MONO_CISINST)
9135 ins = handle_cisinst (cfg, klass, sp [0]);
9137 ins = handle_ccastclass (cfg, klass, sp [0]);
9143 case CEE_MONO_SAVE_LMF:
9144 case CEE_MONO_RESTORE_LMF:
9145 #ifdef MONO_ARCH_HAVE_LMF_OPS
9146 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9147 MONO_ADD_INS (bblock, ins);
9148 cfg->need_lmf_area = TRUE;
9152 case CEE_MONO_CLASSCONST:
9153 CHECK_STACK_OVF (1);
9155 token = read32 (ip + 2);
9156 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9159 inline_costs += 10 * num_calls++;
9161 case CEE_MONO_NOT_TAKEN:
9162 bblock->out_of_line = TRUE;
9166 CHECK_STACK_OVF (1);
9168 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9169 ins->dreg = alloc_preg (cfg);
9170 ins->inst_offset = (gint32)read32 (ip + 2);
9171 ins->type = STACK_PTR;
9172 MONO_ADD_INS (bblock, ins);
9176 case CEE_MONO_DYN_CALL: {
9179 /* It would be easier to call a trampoline, but that would put an
9180 * extra frame on the stack, confusing exception handling. So
9181 * implement it inline using an opcode for now.
9184 if (!cfg->dyn_call_var) {
9185 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9186 /* prevent it from being register allocated */
9187 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9190 /* Has to use a call inst since it local regalloc expects it */
9191 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9192 ins = (MonoInst*)call;
9194 ins->sreg1 = sp [0]->dreg;
9195 ins->sreg2 = sp [1]->dreg;
9196 MONO_ADD_INS (bblock, ins);
9198 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9199 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9203 inline_costs += 10 * num_calls++;
9208 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9218 /* somewhat similar to LDTOKEN */
9219 MonoInst *addr, *vtvar;
9220 CHECK_STACK_OVF (1);
9221 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9223 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9224 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9226 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9227 ins->type = STACK_VTYPE;
9228 ins->klass = mono_defaults.argumenthandle_class;
9241 * The following transforms:
9242 * CEE_CEQ into OP_CEQ
9243 * CEE_CGT into OP_CGT
9244 * CEE_CGT_UN into OP_CGT_UN
9245 * CEE_CLT into OP_CLT
9246 * CEE_CLT_UN into OP_CLT_UN
9248 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9250 MONO_INST_NEW (cfg, ins, cmp->opcode);
9252 cmp->sreg1 = sp [0]->dreg;
9253 cmp->sreg2 = sp [1]->dreg;
9254 type_from_op (cmp, sp [0], sp [1]);
9256 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9257 cmp->opcode = OP_LCOMPARE;
9258 else if (sp [0]->type == STACK_R8)
9259 cmp->opcode = OP_FCOMPARE;
9261 cmp->opcode = OP_ICOMPARE;
9262 MONO_ADD_INS (bblock, cmp);
9263 ins->type = STACK_I4;
9264 ins->dreg = alloc_dreg (cfg, ins->type);
9265 type_from_op (ins, sp [0], sp [1]);
9267 if (cmp->opcode == OP_FCOMPARE) {
9269 * The backends expect the fceq opcodes to do the
9272 cmp->opcode = OP_NOP;
9273 ins->sreg1 = cmp->sreg1;
9274 ins->sreg2 = cmp->sreg2;
9276 MONO_ADD_INS (bblock, ins);
9283 MonoMethod *cil_method;
9284 gboolean needs_static_rgctx_invoke;
9286 CHECK_STACK_OVF (1);
9288 n = read32 (ip + 2);
9289 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9292 mono_class_init (cmethod->klass);
9294 mono_save_token_info (cfg, image, n, cmethod);
9296 if (cfg->generic_sharing_context)
9297 context_used = mono_method_check_context_used (cmethod);
9299 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9301 cil_method = cmethod;
9302 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9303 METHOD_ACCESS_FAILURE;
9305 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9306 if (check_linkdemand (cfg, method, cmethod))
9308 CHECK_CFG_EXCEPTION;
9309 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9310 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9314 * Optimize the common case of ldftn+delegate creation
9316 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9317 /* FIXME: SGEN support */
9318 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9319 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9320 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9321 MonoInst *target_ins;
9323 int invoke_context_used = 0;
9325 invoke = mono_get_delegate_invoke (ctor_method->klass);
9326 if (!invoke || !mono_method_signature (invoke))
9329 if (cfg->generic_sharing_context)
9330 invoke_context_used = mono_method_check_context_used (invoke);
9332 if (invoke_context_used == 0) {
9334 if (cfg->verbose_level > 3)
9335 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9336 target_ins = sp [-1];
9338 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9339 CHECK_CFG_EXCEPTION;
9348 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9349 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9353 inline_costs += 10 * num_calls++;
9356 case CEE_LDVIRTFTN: {
9361 n = read32 (ip + 2);
9362 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9365 mono_class_init (cmethod->klass);
9367 if (cfg->generic_sharing_context)
9368 context_used = mono_method_check_context_used (cmethod);
9370 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9371 if (check_linkdemand (cfg, method, cmethod))
9373 CHECK_CFG_EXCEPTION;
9374 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9375 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9381 args [1] = emit_get_rgctx_method (cfg, context_used,
9382 cmethod, MONO_RGCTX_INFO_METHOD);
9385 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9387 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9390 inline_costs += 10 * num_calls++;
9394 CHECK_STACK_OVF (1);
9396 n = read16 (ip + 2);
9398 EMIT_NEW_ARGLOAD (cfg, ins, n);
9403 CHECK_STACK_OVF (1);
9405 n = read16 (ip + 2);
9407 NEW_ARGLOADA (cfg, ins, n);
9408 MONO_ADD_INS (cfg->cbb, ins);
9416 n = read16 (ip + 2);
9418 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9420 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9424 CHECK_STACK_OVF (1);
9426 n = read16 (ip + 2);
9428 EMIT_NEW_LOCLOAD (cfg, ins, n);
9433 unsigned char *tmp_ip;
9434 CHECK_STACK_OVF (1);
9436 n = read16 (ip + 2);
9439 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9445 EMIT_NEW_LOCLOADA (cfg, ins, n);
9454 n = read16 (ip + 2);
9456 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9458 emit_stloc_ir (cfg, sp, header, n);
9465 if (sp != stack_start)
9467 if (cfg->method != method)
9469 * Inlining this into a loop in a parent could lead to
9470 * stack overflows which is different behavior than the
9471 * non-inlined case, thus disable inlining in this case.
9473 goto inline_failure;
9475 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9476 ins->dreg = alloc_preg (cfg);
9477 ins->sreg1 = sp [0]->dreg;
9478 ins->type = STACK_PTR;
9479 MONO_ADD_INS (cfg->cbb, ins);
9481 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9483 ins->flags |= MONO_INST_INIT;
9488 case CEE_ENDFILTER: {
9489 MonoExceptionClause *clause, *nearest;
9490 int cc, nearest_num;
9494 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9496 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9497 ins->sreg1 = (*sp)->dreg;
9498 MONO_ADD_INS (bblock, ins);
9499 start_new_bblock = 1;
9504 for (cc = 0; cc < header->num_clauses; ++cc) {
9505 clause = &header->clauses [cc];
9506 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9507 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9508 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9514 if ((ip - header->code) != nearest->handler_offset)
9519 case CEE_UNALIGNED_:
9520 ins_flag |= MONO_INST_UNALIGNED;
9521 /* FIXME: record alignment? we can assume 1 for now */
9526 ins_flag |= MONO_INST_VOLATILE;
9530 ins_flag |= MONO_INST_TAILCALL;
9531 cfg->flags |= MONO_CFG_HAS_TAIL;
9532 /* Can't inline tail calls at this time */
9533 inline_costs += 100000;
9540 token = read32 (ip + 2);
9541 klass = mini_get_class (method, token, generic_context);
9542 CHECK_TYPELOAD (klass);
9543 if (generic_class_is_reference_type (cfg, klass))
9544 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9546 mini_emit_initobj (cfg, *sp, NULL, klass);
9550 case CEE_CONSTRAINED_:
9552 token = read32 (ip + 2);
9553 if (method->wrapper_type != MONO_WRAPPER_NONE)
9554 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9556 constrained_call = mono_class_get_full (image, token, generic_context);
9557 CHECK_TYPELOAD (constrained_call);
9562 MonoInst *iargs [3];
9566 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9567 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9568 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9569 /* emit_memset only works when val == 0 */
9570 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9575 if (ip [1] == CEE_CPBLK) {
9576 MonoMethod *memcpy_method = get_memcpy_method ();
9577 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9579 MonoMethod *memset_method = get_memset_method ();
9580 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9590 ins_flag |= MONO_INST_NOTYPECHECK;
9592 ins_flag |= MONO_INST_NORANGECHECK;
9593 /* we ignore the no-nullcheck for now since we
9594 * really do it explicitly only when doing callvirt->call
9600 int handler_offset = -1;
9602 for (i = 0; i < header->num_clauses; ++i) {
9603 MonoExceptionClause *clause = &header->clauses [i];
9604 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9605 handler_offset = clause->handler_offset;
9610 bblock->flags |= BB_EXCEPTION_UNSAFE;
9612 g_assert (handler_offset != -1);
9614 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9615 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9616 ins->sreg1 = load->dreg;
9617 MONO_ADD_INS (bblock, ins);
9619 link_bblock (cfg, bblock, end_bblock);
9620 start_new_bblock = 1;
9628 CHECK_STACK_OVF (1);
9630 token = read32 (ip + 2);
9631 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9632 MonoType *type = mono_type_create_from_typespec (image, token);
9633 token = mono_type_size (type, &ialign);
9635 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9636 CHECK_TYPELOAD (klass);
9637 mono_class_init (klass);
9638 token = mono_class_value_size (klass, &align);
9640 EMIT_NEW_ICONST (cfg, ins, token);
9645 case CEE_REFANYTYPE: {
9646 MonoInst *src_var, *src;
9652 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9654 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9655 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9656 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9674 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9684 g_warning ("opcode 0x%02x not handled", *ip);
9688 if (start_new_bblock != 1)
9691 bblock->cil_length = ip - bblock->cil_code;
9692 bblock->next_bb = end_bblock;
9694 if (cfg->method == method && cfg->domainvar) {
9696 MonoInst *get_domain;
9698 cfg->cbb = init_localsbb;
9700 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9701 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9704 get_domain->dreg = alloc_preg (cfg);
9705 MONO_ADD_INS (cfg->cbb, get_domain);
9707 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9708 MONO_ADD_INS (cfg->cbb, store);
9711 #ifdef TARGET_POWERPC
9712 if (cfg->compile_aot)
9713 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9714 mono_get_got_var (cfg);
9717 if (cfg->method == method && cfg->got_var)
9718 mono_emit_load_got_addr (cfg);
9723 cfg->cbb = init_localsbb;
9725 for (i = 0; i < header->num_locals; ++i) {
9726 MonoType *ptype = header->locals [i];
9727 int t = ptype->type;
9728 dreg = cfg->locals [i]->dreg;
9730 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9731 t = mono_class_enum_basetype (ptype->data.klass)->type;
9733 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9734 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9735 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9736 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9737 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9738 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9739 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9740 ins->type = STACK_R8;
9741 ins->inst_p0 = (void*)&r8_0;
9742 ins->dreg = alloc_dreg (cfg, STACK_R8);
9743 MONO_ADD_INS (init_localsbb, ins);
9744 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9745 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9746 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9747 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9749 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9754 if (cfg->init_ref_vars && cfg->method == method) {
9755 /* Emit initialization for ref vars */
9756 // FIXME: Avoid duplication initialization for IL locals.
9757 for (i = 0; i < cfg->num_varinfo; ++i) {
9758 MonoInst *ins = cfg->varinfo [i];
9760 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9761 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9765 /* Add a sequence point for method entry/exit events */
9767 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9768 MONO_ADD_INS (init_localsbb, ins);
9769 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9770 MONO_ADD_INS (cfg->bb_exit, ins);
9775 if (cfg->method == method) {
9777 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9778 bb->region = mono_find_block_region (cfg, bb->real_offset);
9780 mono_create_spvar_for_region (cfg, bb->region);
9781 if (cfg->verbose_level > 2)
9782 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9786 g_slist_free (class_inits);
9787 dont_inline = g_list_remove (dont_inline, method);
9789 if (inline_costs < 0) {
9792 /* Method is too large */
9793 mname = mono_method_full_name (method, TRUE);
9794 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9795 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9800 if ((cfg->verbose_level > 2) && (cfg->method == method))
9801 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9803 return inline_costs;
9806 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9807 g_slist_free (class_inits);
9808 mono_basic_block_free (bb);
9809 dont_inline = g_list_remove (dont_inline, method);
9813 g_slist_free (class_inits);
9814 mono_basic_block_free (bb);
9815 dont_inline = g_list_remove (dont_inline, method);
9819 g_slist_free (class_inits);
9820 mono_basic_block_free (bb);
9821 dont_inline = g_list_remove (dont_inline, method);
9822 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9826 g_slist_free (class_inits);
9827 mono_basic_block_free (bb);
9828 dont_inline = g_list_remove (dont_inline, method);
9829 set_exception_type_from_invalid_il (cfg, method, ip);
9834 store_membase_reg_to_store_membase_imm (int opcode)
9837 case OP_STORE_MEMBASE_REG:
9838 return OP_STORE_MEMBASE_IMM;
9839 case OP_STOREI1_MEMBASE_REG:
9840 return OP_STOREI1_MEMBASE_IMM;
9841 case OP_STOREI2_MEMBASE_REG:
9842 return OP_STOREI2_MEMBASE_IMM;
9843 case OP_STOREI4_MEMBASE_REG:
9844 return OP_STOREI4_MEMBASE_IMM;
9845 case OP_STOREI8_MEMBASE_REG:
9846 return OP_STOREI8_MEMBASE_IMM;
9848 g_assert_not_reached ();
9854 #endif /* DISABLE_JIT */
9857 mono_op_to_op_imm (int opcode)
9867 return OP_IDIV_UN_IMM;
9871 return OP_IREM_UN_IMM;
9885 return OP_ISHR_UN_IMM;
9902 return OP_LSHR_UN_IMM;
9905 return OP_COMPARE_IMM;
9907 return OP_ICOMPARE_IMM;
9909 return OP_LCOMPARE_IMM;
9911 case OP_STORE_MEMBASE_REG:
9912 return OP_STORE_MEMBASE_IMM;
9913 case OP_STOREI1_MEMBASE_REG:
9914 return OP_STOREI1_MEMBASE_IMM;
9915 case OP_STOREI2_MEMBASE_REG:
9916 return OP_STOREI2_MEMBASE_IMM;
9917 case OP_STOREI4_MEMBASE_REG:
9918 return OP_STOREI4_MEMBASE_IMM;
9920 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9922 return OP_X86_PUSH_IMM;
9923 case OP_X86_COMPARE_MEMBASE_REG:
9924 return OP_X86_COMPARE_MEMBASE_IMM;
9926 #if defined(TARGET_AMD64)
9927 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9928 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9930 case OP_VOIDCALL_REG:
9939 return OP_LOCALLOC_IMM;
9946 ldind_to_load_membase (int opcode)
9950 return OP_LOADI1_MEMBASE;
9952 return OP_LOADU1_MEMBASE;
9954 return OP_LOADI2_MEMBASE;
9956 return OP_LOADU2_MEMBASE;
9958 return OP_LOADI4_MEMBASE;
9960 return OP_LOADU4_MEMBASE;
9962 return OP_LOAD_MEMBASE;
9964 return OP_LOAD_MEMBASE;
9966 return OP_LOADI8_MEMBASE;
9968 return OP_LOADR4_MEMBASE;
9970 return OP_LOADR8_MEMBASE;
9972 g_assert_not_reached ();
9979 stind_to_store_membase (int opcode)
9983 return OP_STOREI1_MEMBASE_REG;
9985 return OP_STOREI2_MEMBASE_REG;
9987 return OP_STOREI4_MEMBASE_REG;
9990 return OP_STORE_MEMBASE_REG;
9992 return OP_STOREI8_MEMBASE_REG;
9994 return OP_STORER4_MEMBASE_REG;
9996 return OP_STORER8_MEMBASE_REG;
9998 g_assert_not_reached ();
10005 mono_load_membase_to_load_mem (int opcode)
10007 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10008 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10010 case OP_LOAD_MEMBASE:
10011 return OP_LOAD_MEM;
10012 case OP_LOADU1_MEMBASE:
10013 return OP_LOADU1_MEM;
10014 case OP_LOADU2_MEMBASE:
10015 return OP_LOADU2_MEM;
10016 case OP_LOADI4_MEMBASE:
10017 return OP_LOADI4_MEM;
10018 case OP_LOADU4_MEMBASE:
10019 return OP_LOADU4_MEM;
10020 #if SIZEOF_REGISTER == 8
10021 case OP_LOADI8_MEMBASE:
10022 return OP_LOADI8_MEM;
10031 op_to_op_dest_membase (int store_opcode, int opcode)
10033 #if defined(TARGET_X86)
10034 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10039 return OP_X86_ADD_MEMBASE_REG;
10041 return OP_X86_SUB_MEMBASE_REG;
10043 return OP_X86_AND_MEMBASE_REG;
10045 return OP_X86_OR_MEMBASE_REG;
10047 return OP_X86_XOR_MEMBASE_REG;
10050 return OP_X86_ADD_MEMBASE_IMM;
10053 return OP_X86_SUB_MEMBASE_IMM;
10056 return OP_X86_AND_MEMBASE_IMM;
10059 return OP_X86_OR_MEMBASE_IMM;
10062 return OP_X86_XOR_MEMBASE_IMM;
10068 #if defined(TARGET_AMD64)
10069 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10074 return OP_X86_ADD_MEMBASE_REG;
10076 return OP_X86_SUB_MEMBASE_REG;
10078 return OP_X86_AND_MEMBASE_REG;
10080 return OP_X86_OR_MEMBASE_REG;
10082 return OP_X86_XOR_MEMBASE_REG;
10084 return OP_X86_ADD_MEMBASE_IMM;
10086 return OP_X86_SUB_MEMBASE_IMM;
10088 return OP_X86_AND_MEMBASE_IMM;
10090 return OP_X86_OR_MEMBASE_IMM;
10092 return OP_X86_XOR_MEMBASE_IMM;
10094 return OP_AMD64_ADD_MEMBASE_REG;
10096 return OP_AMD64_SUB_MEMBASE_REG;
10098 return OP_AMD64_AND_MEMBASE_REG;
10100 return OP_AMD64_OR_MEMBASE_REG;
10102 return OP_AMD64_XOR_MEMBASE_REG;
10105 return OP_AMD64_ADD_MEMBASE_IMM;
10108 return OP_AMD64_SUB_MEMBASE_IMM;
10111 return OP_AMD64_AND_MEMBASE_IMM;
10114 return OP_AMD64_OR_MEMBASE_IMM;
10117 return OP_AMD64_XOR_MEMBASE_IMM;
10127 op_to_op_store_membase (int store_opcode, int opcode)
10129 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10132 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10133 return OP_X86_SETEQ_MEMBASE;
10135 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10136 return OP_X86_SETNE_MEMBASE;
10144 op_to_op_src1_membase (int load_opcode, int opcode)
10147 /* FIXME: This has sign extension issues */
10149 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10150 return OP_X86_COMPARE_MEMBASE8_IMM;
10153 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10158 return OP_X86_PUSH_MEMBASE;
10159 case OP_COMPARE_IMM:
10160 case OP_ICOMPARE_IMM:
10161 return OP_X86_COMPARE_MEMBASE_IMM;
10164 return OP_X86_COMPARE_MEMBASE_REG;
10168 #ifdef TARGET_AMD64
10169 /* FIXME: This has sign extension issues */
10171 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10172 return OP_X86_COMPARE_MEMBASE8_IMM;
10177 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10178 return OP_X86_PUSH_MEMBASE;
10180 /* FIXME: This only works for 32 bit immediates
10181 case OP_COMPARE_IMM:
10182 case OP_LCOMPARE_IMM:
10183 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10184 return OP_AMD64_COMPARE_MEMBASE_IMM;
10186 case OP_ICOMPARE_IMM:
10187 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10188 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10192 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10193 return OP_AMD64_COMPARE_MEMBASE_REG;
10196 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10197 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10206 op_to_op_src2_membase (int load_opcode, int opcode)
10209 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10215 return OP_X86_COMPARE_REG_MEMBASE;
10217 return OP_X86_ADD_REG_MEMBASE;
10219 return OP_X86_SUB_REG_MEMBASE;
10221 return OP_X86_AND_REG_MEMBASE;
10223 return OP_X86_OR_REG_MEMBASE;
10225 return OP_X86_XOR_REG_MEMBASE;
10229 #ifdef TARGET_AMD64
10232 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10233 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10237 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10238 return OP_AMD64_COMPARE_REG_MEMBASE;
10241 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10242 return OP_X86_ADD_REG_MEMBASE;
10244 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10245 return OP_X86_SUB_REG_MEMBASE;
10247 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10248 return OP_X86_AND_REG_MEMBASE;
10250 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10251 return OP_X86_OR_REG_MEMBASE;
10253 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10254 return OP_X86_XOR_REG_MEMBASE;
10256 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10257 return OP_AMD64_ADD_REG_MEMBASE;
10259 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10260 return OP_AMD64_SUB_REG_MEMBASE;
10262 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10263 return OP_AMD64_AND_REG_MEMBASE;
10265 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10266 return OP_AMD64_OR_REG_MEMBASE;
10268 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10269 return OP_AMD64_XOR_REG_MEMBASE;
10277 mono_op_to_op_imm_noemul (int opcode)
10280 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10285 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10293 return mono_op_to_op_imm (opcode);
10297 #ifndef DISABLE_JIT
10300 * mono_handle_global_vregs:
10302 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10306 mono_handle_global_vregs (MonoCompile *cfg)
10308 gint32 *vreg_to_bb;
10309 MonoBasicBlock *bb;
10312 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10314 #ifdef MONO_ARCH_SIMD_INTRINSICS
10315 if (cfg->uses_simd_intrinsics)
10316 mono_simd_simplify_indirection (cfg);
10319 /* Find local vregs used in more than one bb */
10320 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10321 MonoInst *ins = bb->code;
10322 int block_num = bb->block_num;
10324 if (cfg->verbose_level > 2)
10325 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10328 for (; ins; ins = ins->next) {
10329 const char *spec = INS_INFO (ins->opcode);
10330 int regtype = 0, regindex;
10333 if (G_UNLIKELY (cfg->verbose_level > 2))
10334 mono_print_ins (ins);
10336 g_assert (ins->opcode >= MONO_CEE_LAST);
10338 for (regindex = 0; regindex < 4; regindex ++) {
10341 if (regindex == 0) {
10342 regtype = spec [MONO_INST_DEST];
10343 if (regtype == ' ')
10346 } else if (regindex == 1) {
10347 regtype = spec [MONO_INST_SRC1];
10348 if (regtype == ' ')
10351 } else if (regindex == 2) {
10352 regtype = spec [MONO_INST_SRC2];
10353 if (regtype == ' ')
10356 } else if (regindex == 3) {
10357 regtype = spec [MONO_INST_SRC3];
10358 if (regtype == ' ')
10363 #if SIZEOF_REGISTER == 4
10364 /* In the LLVM case, the long opcodes are not decomposed */
10365 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10367 * Since some instructions reference the original long vreg,
10368 * and some reference the two component vregs, it is quite hard
10369 * to determine when it needs to be global. So be conservative.
10371 if (!get_vreg_to_inst (cfg, vreg)) {
10372 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10374 if (cfg->verbose_level > 2)
10375 printf ("LONG VREG R%d made global.\n", vreg);
10379 * Make the component vregs volatile since the optimizations can
10380 * get confused otherwise.
10382 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10383 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10387 g_assert (vreg != -1);
10389 prev_bb = vreg_to_bb [vreg];
10390 if (prev_bb == 0) {
10391 /* 0 is a valid block num */
10392 vreg_to_bb [vreg] = block_num + 1;
10393 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10394 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10397 if (!get_vreg_to_inst (cfg, vreg)) {
10398 if (G_UNLIKELY (cfg->verbose_level > 2))
10399 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10403 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10406 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10409 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10412 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10415 g_assert_not_reached ();
10419 /* Flag as having been used in more than one bb */
10420 vreg_to_bb [vreg] = -1;
10426 /* If a variable is used in only one bblock, convert it into a local vreg */
10427 for (i = 0; i < cfg->num_varinfo; i++) {
10428 MonoInst *var = cfg->varinfo [i];
10429 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10431 switch (var->type) {
10437 #if SIZEOF_REGISTER == 8
10440 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10441 /* Enabling this screws up the fp stack on x86 */
10444 /* Arguments are implicitly global */
10445 /* Putting R4 vars into registers doesn't work currently */
10446 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10448 * Make that the variable's liveness interval doesn't contain a call, since
10449 * that would cause the lvreg to be spilled, making the whole optimization
10452 /* This is too slow for JIT compilation */
10454 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10456 int def_index, call_index, ins_index;
10457 gboolean spilled = FALSE;
10462 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10463 const char *spec = INS_INFO (ins->opcode);
10465 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10466 def_index = ins_index;
10468 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10469 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10470 if (call_index > def_index) {
10476 if (MONO_IS_CALL (ins))
10477 call_index = ins_index;
10487 if (G_UNLIKELY (cfg->verbose_level > 2))
10488 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10489 var->flags |= MONO_INST_IS_DEAD;
10490 cfg->vreg_to_inst [var->dreg] = NULL;
10497 * Compress the varinfo and vars tables so the liveness computation is faster and
10498 * takes up less space.
10501 for (i = 0; i < cfg->num_varinfo; ++i) {
10502 MonoInst *var = cfg->varinfo [i];
10503 if (pos < i && cfg->locals_start == i)
10504 cfg->locals_start = pos;
10505 if (!(var->flags & MONO_INST_IS_DEAD)) {
10507 cfg->varinfo [pos] = cfg->varinfo [i];
10508 cfg->varinfo [pos]->inst_c0 = pos;
10509 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10510 cfg->vars [pos].idx = pos;
10511 #if SIZEOF_REGISTER == 4
10512 if (cfg->varinfo [pos]->type == STACK_I8) {
10513 /* Modify the two component vars too */
10516 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10517 var1->inst_c0 = pos;
10518 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10519 var1->inst_c0 = pos;
10526 cfg->num_varinfo = pos;
10527 if (cfg->locals_start > cfg->num_varinfo)
10528 cfg->locals_start = cfg->num_varinfo;
10532 * mono_spill_global_vars:
10534 * Generate spill code for variables which are not allocated to registers,
10535 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10536 * code is generated which could be optimized by the local optimization passes.
10539 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10541 MonoBasicBlock *bb;
10543 int orig_next_vreg;
10544 guint32 *vreg_to_lvreg;
10546 guint32 i, lvregs_len;
10547 gboolean dest_has_lvreg = FALSE;
10548 guint32 stacktypes [128];
10549 MonoInst **live_range_start, **live_range_end;
10550 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10552 *need_local_opts = FALSE;
10554 memset (spec2, 0, sizeof (spec2));
10556 /* FIXME: Move this function to mini.c */
10557 stacktypes ['i'] = STACK_PTR;
10558 stacktypes ['l'] = STACK_I8;
10559 stacktypes ['f'] = STACK_R8;
10560 #ifdef MONO_ARCH_SIMD_INTRINSICS
10561 stacktypes ['x'] = STACK_VTYPE;
10564 #if SIZEOF_REGISTER == 4
10565 /* Create MonoInsts for longs */
10566 for (i = 0; i < cfg->num_varinfo; i++) {
10567 MonoInst *ins = cfg->varinfo [i];
10569 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10570 switch (ins->type) {
10575 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10578 g_assert (ins->opcode == OP_REGOFFSET);
10580 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10582 tree->opcode = OP_REGOFFSET;
10583 tree->inst_basereg = ins->inst_basereg;
10584 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10586 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10588 tree->opcode = OP_REGOFFSET;
10589 tree->inst_basereg = ins->inst_basereg;
10590 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10600 /* FIXME: widening and truncation */
10603 * As an optimization, when a variable allocated to the stack is first loaded into
10604 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10605 * the variable again.
10607 orig_next_vreg = cfg->next_vreg;
10608 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10609 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10613 * These arrays contain the first and last instructions accessing a given
10615 * Since we emit bblocks in the same order we process them here, and we
10616 * don't split live ranges, these will precisely describe the live range of
10617 * the variable, i.e. the instruction range where a valid value can be found
10618 * in the variables location.
10619 * The live range is computed using the liveness info computed by the liveness pass.
10620 * We can't use vmv->range, since that is an abstract live range, and we need
10621 * one which is instruction precise.
10622 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10624 /* FIXME: Only do this if debugging info is requested */
10625 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10626 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10627 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10628 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10630 /* Add spill loads/stores */
10631 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10634 if (cfg->verbose_level > 2)
10635 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10637 /* Clear vreg_to_lvreg array */
10638 for (i = 0; i < lvregs_len; i++)
10639 vreg_to_lvreg [lvregs [i]] = 0;
10643 MONO_BB_FOR_EACH_INS (bb, ins) {
10644 const char *spec = INS_INFO (ins->opcode);
10645 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10646 gboolean store, no_lvreg;
10647 int sregs [MONO_MAX_SRC_REGS];
10649 if (G_UNLIKELY (cfg->verbose_level > 2))
10650 mono_print_ins (ins);
10652 if (ins->opcode == OP_NOP)
10656 * We handle LDADDR here as well, since it can only be decomposed
10657 * when variable addresses are known.
10659 if (ins->opcode == OP_LDADDR) {
10660 MonoInst *var = ins->inst_p0;
10662 if (var->opcode == OP_VTARG_ADDR) {
10663 /* Happens on SPARC/S390 where vtypes are passed by reference */
10664 MonoInst *vtaddr = var->inst_left;
10665 if (vtaddr->opcode == OP_REGVAR) {
10666 ins->opcode = OP_MOVE;
10667 ins->sreg1 = vtaddr->dreg;
10669 else if (var->inst_left->opcode == OP_REGOFFSET) {
10670 ins->opcode = OP_LOAD_MEMBASE;
10671 ins->inst_basereg = vtaddr->inst_basereg;
10672 ins->inst_offset = vtaddr->inst_offset;
10676 g_assert (var->opcode == OP_REGOFFSET);
10678 ins->opcode = OP_ADD_IMM;
10679 ins->sreg1 = var->inst_basereg;
10680 ins->inst_imm = var->inst_offset;
10683 *need_local_opts = TRUE;
10684 spec = INS_INFO (ins->opcode);
10687 if (ins->opcode < MONO_CEE_LAST) {
10688 mono_print_ins (ins);
10689 g_assert_not_reached ();
10693 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10697 if (MONO_IS_STORE_MEMBASE (ins)) {
10698 tmp_reg = ins->dreg;
10699 ins->dreg = ins->sreg2;
10700 ins->sreg2 = tmp_reg;
10703 spec2 [MONO_INST_DEST] = ' ';
10704 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10705 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10706 spec2 [MONO_INST_SRC3] = ' ';
10708 } else if (MONO_IS_STORE_MEMINDEX (ins))
10709 g_assert_not_reached ();
10714 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10715 printf ("\t %.3s %d", spec, ins->dreg);
10716 num_sregs = mono_inst_get_src_registers (ins, sregs);
10717 for (srcindex = 0; srcindex < 3; ++srcindex)
10718 printf (" %d", sregs [srcindex]);
10725 regtype = spec [MONO_INST_DEST];
10726 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10729 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10730 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10731 MonoInst *store_ins;
10733 MonoInst *def_ins = ins;
10734 int dreg = ins->dreg; /* The original vreg */
10736 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10738 if (var->opcode == OP_REGVAR) {
10739 ins->dreg = var->dreg;
10740 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10742 * Instead of emitting a load+store, use a _membase opcode.
10744 g_assert (var->opcode == OP_REGOFFSET);
10745 if (ins->opcode == OP_MOVE) {
10749 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10750 ins->inst_basereg = var->inst_basereg;
10751 ins->inst_offset = var->inst_offset;
10754 spec = INS_INFO (ins->opcode);
10758 g_assert (var->opcode == OP_REGOFFSET);
10760 prev_dreg = ins->dreg;
10762 /* Invalidate any previous lvreg for this vreg */
10763 vreg_to_lvreg [ins->dreg] = 0;
10767 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10769 store_opcode = OP_STOREI8_MEMBASE_REG;
10772 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10774 if (regtype == 'l') {
10775 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10776 mono_bblock_insert_after_ins (bb, ins, store_ins);
10777 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10778 mono_bblock_insert_after_ins (bb, ins, store_ins);
10779 def_ins = store_ins;
10782 g_assert (store_opcode != OP_STOREV_MEMBASE);
10784 /* Try to fuse the store into the instruction itself */
10785 /* FIXME: Add more instructions */
10786 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10787 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10788 ins->inst_imm = ins->inst_c0;
10789 ins->inst_destbasereg = var->inst_basereg;
10790 ins->inst_offset = var->inst_offset;
10791 spec = INS_INFO (ins->opcode);
10792 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10793 ins->opcode = store_opcode;
10794 ins->inst_destbasereg = var->inst_basereg;
10795 ins->inst_offset = var->inst_offset;
10799 tmp_reg = ins->dreg;
10800 ins->dreg = ins->sreg2;
10801 ins->sreg2 = tmp_reg;
10804 spec2 [MONO_INST_DEST] = ' ';
10805 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10806 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10807 spec2 [MONO_INST_SRC3] = ' ';
10809 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10810 // FIXME: The backends expect the base reg to be in inst_basereg
10811 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10813 ins->inst_basereg = var->inst_basereg;
10814 ins->inst_offset = var->inst_offset;
10815 spec = INS_INFO (ins->opcode);
10817 /* printf ("INS: "); mono_print_ins (ins); */
10818 /* Create a store instruction */
10819 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10821 /* Insert it after the instruction */
10822 mono_bblock_insert_after_ins (bb, ins, store_ins);
10824 def_ins = store_ins;
10827 * We can't assign ins->dreg to var->dreg here, since the
10828 * sregs could use it. So set a flag, and do it after
10831 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10832 dest_has_lvreg = TRUE;
10837 if (def_ins && !live_range_start [dreg]) {
10838 live_range_start [dreg] = def_ins;
10839 live_range_start_bb [dreg] = bb;
10846 num_sregs = mono_inst_get_src_registers (ins, sregs);
10847 for (srcindex = 0; srcindex < 3; ++srcindex) {
10848 regtype = spec [MONO_INST_SRC1 + srcindex];
10849 sreg = sregs [srcindex];
10851 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10852 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10853 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10854 MonoInst *use_ins = ins;
10855 MonoInst *load_ins;
10856 guint32 load_opcode;
10858 if (var->opcode == OP_REGVAR) {
10859 sregs [srcindex] = var->dreg;
10860 //mono_inst_set_src_registers (ins, sregs);
10861 live_range_end [sreg] = use_ins;
10862 live_range_end_bb [sreg] = bb;
10866 g_assert (var->opcode == OP_REGOFFSET);
10868 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10870 g_assert (load_opcode != OP_LOADV_MEMBASE);
10872 if (vreg_to_lvreg [sreg]) {
10873 g_assert (vreg_to_lvreg [sreg] != -1);
10875 /* The variable is already loaded to an lvreg */
10876 if (G_UNLIKELY (cfg->verbose_level > 2))
10877 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10878 sregs [srcindex] = vreg_to_lvreg [sreg];
10879 //mono_inst_set_src_registers (ins, sregs);
10883 /* Try to fuse the load into the instruction */
10884 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10885 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10886 sregs [0] = var->inst_basereg;
10887 //mono_inst_set_src_registers (ins, sregs);
10888 ins->inst_offset = var->inst_offset;
10889 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10890 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10891 sregs [1] = var->inst_basereg;
10892 //mono_inst_set_src_registers (ins, sregs);
10893 ins->inst_offset = var->inst_offset;
10895 if (MONO_IS_REAL_MOVE (ins)) {
10896 ins->opcode = OP_NOP;
10899 //printf ("%d ", srcindex); mono_print_ins (ins);
10901 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10903 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10904 if (var->dreg == prev_dreg) {
10906 * sreg refers to the value loaded by the load
10907 * emitted below, but we need to use ins->dreg
10908 * since it refers to the store emitted earlier.
10912 g_assert (sreg != -1);
10913 vreg_to_lvreg [var->dreg] = sreg;
10914 g_assert (lvregs_len < 1024);
10915 lvregs [lvregs_len ++] = var->dreg;
10919 sregs [srcindex] = sreg;
10920 //mono_inst_set_src_registers (ins, sregs);
10922 if (regtype == 'l') {
10923 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10924 mono_bblock_insert_before_ins (bb, ins, load_ins);
10925 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10926 mono_bblock_insert_before_ins (bb, ins, load_ins);
10927 use_ins = load_ins;
10930 #if SIZEOF_REGISTER == 4
10931 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10933 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10934 mono_bblock_insert_before_ins (bb, ins, load_ins);
10935 use_ins = load_ins;
10939 if (var->dreg < orig_next_vreg) {
10940 live_range_end [var->dreg] = use_ins;
10941 live_range_end_bb [var->dreg] = bb;
10945 mono_inst_set_src_registers (ins, sregs);
10947 if (dest_has_lvreg) {
10948 g_assert (ins->dreg != -1);
10949 vreg_to_lvreg [prev_dreg] = ins->dreg;
10950 g_assert (lvregs_len < 1024);
10951 lvregs [lvregs_len ++] = prev_dreg;
10952 dest_has_lvreg = FALSE;
10956 tmp_reg = ins->dreg;
10957 ins->dreg = ins->sreg2;
10958 ins->sreg2 = tmp_reg;
10961 if (MONO_IS_CALL (ins)) {
10962 /* Clear vreg_to_lvreg array */
10963 for (i = 0; i < lvregs_len; i++)
10964 vreg_to_lvreg [lvregs [i]] = 0;
10966 } else if (ins->opcode == OP_NOP) {
10968 MONO_INST_NULLIFY_SREGS (ins);
10971 if (cfg->verbose_level > 2)
10972 mono_print_ins_index (1, ins);
10975 /* Extend the live range based on the liveness info */
10976 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
10977 for (i = 0; i < cfg->num_varinfo; i ++) {
10978 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
10980 if (vreg_is_volatile (cfg, vi->vreg))
10981 /* The liveness info is incomplete */
10984 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
10985 /* Live from at least the first ins of this bb */
10986 live_range_start [vi->vreg] = bb->code;
10987 live_range_start_bb [vi->vreg] = bb;
10990 if (mono_bitset_test_fast (bb->live_out_set, i)) {
10991 /* Live at least until the last ins of this bb */
10992 live_range_end [vi->vreg] = bb->last_ins;
10993 live_range_end_bb [vi->vreg] = bb;
10999 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11001 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11002 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11004 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11005 for (i = 0; i < cfg->num_varinfo; ++i) {
11006 int vreg = MONO_VARINFO (cfg, i)->vreg;
11009 if (live_range_start [vreg]) {
11010 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11012 ins->inst_c1 = vreg;
11013 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11015 if (live_range_end [vreg]) {
11016 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11018 ins->inst_c1 = vreg;
11019 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11020 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11022 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11028 g_free (live_range_start);
11029 g_free (live_range_end);
11030 g_free (live_range_start_bb);
11031 g_free (live_range_end_bb);
11036 * - use 'iadd' instead of 'int_add'
11037 * - handling ovf opcodes: decompose in method_to_ir.
11038 * - unify iregs/fregs
11039 * -> partly done, the missing parts are:
11040 * - a more complete unification would involve unifying the hregs as well, so
11041 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11042 * would no longer map to the machine hregs, so the code generators would need to
11043 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11044 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11045 * fp/non-fp branches speeds it up by about 15%.
11046 * - use sext/zext opcodes instead of shifts
11048 * - get rid of TEMPLOADs if possible and use vregs instead
11049 * - clean up usage of OP_P/OP_ opcodes
11050 * - cleanup usage of DUMMY_USE
11051 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11053 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11054 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11055 * - make sure handle_stack_args () is called before the branch is emitted
11056 * - when the new IR is done, get rid of all unused stuff
11057 * - COMPARE/BEQ as separate instructions or unify them ?
11058 * - keeping them separate allows specialized compare instructions like
11059 * compare_imm, compare_membase
11060 * - most back ends unify fp compare+branch, fp compare+ceq
11061 * - integrate mono_save_args into inline_method
11062 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11063 * - handle long shift opts on 32 bit platforms somehow: they require
11064 * 3 sregs (2 for arg1 and 1 for arg2)
11065 * - make byref a 'normal' type.
11066 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11067 * variable if needed.
11068 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11069 * like inline_method.
11070 * - remove inlining restrictions
11071 * - fix LNEG and enable cfold of INEG
11072 * - generalize x86 optimizations like ldelema as a peephole optimization
11073 * - add store_mem_imm for amd64
11074 * - optimize the loading of the interruption flag in the managed->native wrappers
11075 * - avoid special handling of OP_NOP in passes
11076 * - move code inserting instructions into one function/macro.
11077 * - try a coalescing phase after liveness analysis
11078 * - add float -> vreg conversion + local optimizations on !x86
11079 * - figure out how to handle decomposed branches during optimizations, ie.
11080 * compare+branch, op_jump_table+op_br etc.
11081 * - promote RuntimeXHandles to vregs
11082 * - vtype cleanups:
11083 * - add a NEW_VARLOADA_VREG macro
11084 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11085 * accessing vtype fields.
11086 * - get rid of I8CONST on 64 bit platforms
11087 * - dealing with the increase in code size due to branches created during opcode
11089 * - use extended basic blocks
11090 * - all parts of the JIT
11091 * - handle_global_vregs () && local regalloc
11092 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11093 * - sources of increase in code size:
11096 * - isinst and castclass
11097 * - lvregs not allocated to global registers even if used multiple times
11098 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11100 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11101 * - add all micro optimizations from the old JIT
11102 * - put tree optimizations into the deadce pass
11103 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11104 * specific function.
11105 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11106 * fcompare + branchCC.
11107 * - create a helper function for allocating a stack slot, taking into account
11108 * MONO_CFG_HAS_SPILLUP.
11110 * - merge the ia64 switch changes.
11111 * - optimize mono_regstate2_alloc_int/float.
11112 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11113 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11114 * parts of the tree could be separated by other instructions, killing the tree
11115 * arguments, or stores killing loads etc. Also, should we fold loads into other
11116 * instructions if the result of the load is used multiple times ?
11117 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11118 * - LAST MERGE: 108395.
11119 * - when returning vtypes in registers, generate IR and append it to the end of the
11120 * last bb instead of doing it in the epilog.
11121 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11129 - When to decompose opcodes:
11130 - earlier: this makes some optimizations hard to implement, since the low level IR
11131 no longer contains the neccessary information. But it is easier to do.
11132 - later: harder to implement, enables more optimizations.
11133 - Branches inside bblocks:
11134 - created when decomposing complex opcodes.
11135 - branches to another bblock: harmless, but not tracked by the branch
11136 optimizations, so need to branch to a label at the start of the bblock.
11137 - branches to inside the same bblock: very problematic, trips up the local
11138 reg allocator. Can be fixed by spitting the current bblock, but that is a
11139 complex operation, since some local vregs can become global vregs etc.
11140 - Local/global vregs:
11141 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11142 local register allocator.
11143 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11144 structure, created by mono_create_var (). Assigned to hregs or the stack by
11145 the global register allocator.
11146 - When to do optimizations like alu->alu_imm:
11147 - earlier -> saves work later on since the IR will be smaller/simpler
11148 - later -> can work on more instructions
11149 - Handling of valuetypes:
11150 - When a vtype is pushed on the stack, a new temporary is created, an
11151 instruction computing its address (LDADDR) is emitted and pushed on
11152 the stack. Need to optimize cases when the vtype is used immediately as in
11153 argument passing, stloc etc.
11154 - Instead of the to_end stuff in the old JIT, simply call the function handling
11155 the values on the stack before emitting the last instruction of the bb.
11158 #endif /* DISABLE_JIT */