2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
511 res = g_list_append (res, handler);
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
567 inst->klass = klass = mono_class_from_mono_type (type);
569 inst->type = STACK_MP;
574 switch (type->type) {
576 inst->type = STACK_INV;
580 case MONO_TYPE_BOOLEAN:
586 inst->type = STACK_I4;
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
603 inst->type = STACK_I8;
607 inst->type = STACK_R8;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
615 inst->type = STACK_VTYPE;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
789 ins->opcode = OP_ICOMPARE;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
806 ins->opcode += beqops_op_map [src1->type];
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
839 ins->type = STACK_R8;
840 switch (src1->type) {
843 ins->opcode = OP_ICONV_TO_R_UN;
846 ins->opcode = OP_LCONV_TO_R_UN;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
874 ins->type = STACK_PTR;
875 switch (src1->type) {
877 ins->opcode = OP_ICONV_TO_U;
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
884 ins->opcode = OP_MOVE;
888 ins->opcode = OP_LCONV_TO_U;
891 ins->opcode = OP_FCONV_TO_U;
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
916 ins->type = STACK_R8;
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
975 param_table [STACK_MAX] [STACK_MAX] = {
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
984 switch (args->type) {
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
999 if (!sig->params [i]->byref)
1003 if (sig->params [i]->byref)
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1017 if (sig->params [i]->byref)
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1105 case MONO_TYPE_BOOLEAN:
1108 case MONO_TYPE_CHAR:
1115 case MONO_TYPE_FNPTR:
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1139 g_assert_not_reached ();
1146 array_access_to_klass (int opcode)
1150 return mono_defaults.byte_class;
1152 return mono_defaults.uint16_class;
1155 return mono_defaults.int_class;
1158 return mono_defaults.sbyte_class;
1161 return mono_defaults.int16_class;
1164 return mono_defaults.int32_class;
1166 return mono_defaults.uint32_class;
1169 return mono_defaults.int64_class;
1172 return mono_defaults.single_class;
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1180 g_assert_not_reached ();
1186 * We try to share variables when possible
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1271 bb->out_stack = outb->in_stack;
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1334 /* Find a bblock which has a different in_stack */
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1489 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1504 } else if (cfg->compile_aot) {
1505 int const_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1515 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1521 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1523 int intf_reg = alloc_preg (cfg);
1525 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1526 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1531 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1535 * Variant of the above that takes a register to the class, not the vtable.
1538 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int intf_bit_reg = alloc_preg (cfg);
1542 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1543 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1552 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1556 } else if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1567 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1569 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1573 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1575 if (cfg->compile_aot) {
1576 int const_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1586 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1589 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1592 int rank_reg = alloc_preg (cfg);
1593 int eclass_reg = alloc_preg (cfg);
1595 g_assert (!klass_inst);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1601 if (klass->cast_class == mono_defaults.object_class) {
1602 int parent_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1604 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1607 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class == mono_defaults.enum_class) {
1610 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1611 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1612 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1614 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1615 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1618 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1619 /* Check that the object is a vector too */
1620 int bounds_reg = alloc_preg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1626 int idepth_reg = alloc_preg (cfg);
1627 int stypes_reg = alloc_preg (cfg);
1628 int stype = alloc_preg (cfg);
1630 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1631 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1637 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1644 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1648 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1652 g_assert (val == 0);
1657 if ((size <= 4) && (size <= align)) {
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1666 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1668 #if SIZEOF_REGISTER == 8
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1676 val_reg = alloc_preg (cfg);
1678 if (SIZEOF_REGISTER == 8)
1679 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1681 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1684 /* This could be optimized further if neccesary */
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1693 #if !NO_UNALIGNED_ACCESS
1694 if (SIZEOF_REGISTER == 8) {
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1725 #endif /* DISABLE_JIT */
1728 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1735 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1736 g_assert (size < 10000);
1739 /* This could be optimized further if neccesary */
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1750 #if !NO_UNALIGNED_ACCESS
1751 if (SIZEOF_REGISTER == 8) {
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1792 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1798 type = mini_get_basic_type_from_generic (gsctx, type);
1799 switch (type->type) {
1800 case MONO_TYPE_VOID:
1801 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1804 case MONO_TYPE_BOOLEAN:
1807 case MONO_TYPE_CHAR:
1810 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1814 case MONO_TYPE_FNPTR:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1816 case MONO_TYPE_CLASS:
1817 case MONO_TYPE_STRING:
1818 case MONO_TYPE_OBJECT:
1819 case MONO_TYPE_SZARRAY:
1820 case MONO_TYPE_ARRAY:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1824 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1827 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1828 case MONO_TYPE_VALUETYPE:
1829 if (type->data.klass->enumtype) {
1830 type = mono_class_enum_basetype (type->data.klass);
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_TYPEDBYREF:
1835 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1836 case MONO_TYPE_GENERICINST:
1837 type = &type->data.generic_class->container_class->byval_arg;
1840 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1854 * Returns: non-0 value if arg can't be stored on a target.
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP)
1865 return arg->klass != mono_class_from_mono_type (target);
1866 if (arg->type == STACK_PTR)
1871 simple_type = mono_type_get_underlying_type (target);
1872 switch (simple_type->type) {
1873 case MONO_TYPE_VOID:
1877 case MONO_TYPE_BOOLEAN:
1880 case MONO_TYPE_CHAR:
1883 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1887 /* STACK_MP is needed when setting pinned locals */
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1893 case MONO_TYPE_FNPTR:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1897 case MONO_TYPE_CLASS:
1898 case MONO_TYPE_STRING:
1899 case MONO_TYPE_OBJECT:
1900 case MONO_TYPE_SZARRAY:
1901 case MONO_TYPE_ARRAY:
1902 if (arg->type != STACK_OBJ)
1904 /* FIXME: check type compatibility */
1908 if (arg->type != STACK_I8)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1934 klass = mono_class_from_mono_type (simple_type);
1935 if (klass != arg->klass)
1939 if (arg->type != STACK_OBJ)
1941 /* FIXME: check type compatibility */
1945 case MONO_TYPE_MVAR:
1946 /* FIXME: all the arguments must be references for now,
1947 * later look inside cfg and see if the arg num is
1948 * really a reference
1950 g_assert (cfg->generic_sharing_context);
1951 if (arg->type != STACK_OBJ)
1955 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1961 * Prepare arguments for passing to a function call.
1962 * Return a non-zero value if the arguments can't be passed to the given
1964 * The type checks are not yet complete and some conversions may need
1965 * casts on 32 or 64 bit architectures.
1967 * FIXME: implement this using target_type_is_incompatible ()
1970 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1972 MonoType *simple_type;
1976 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1980 for (i = 0; i < sig->param_count; ++i) {
1981 if (sig->params [i]->byref) {
1982 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1986 simple_type = sig->params [i];
1987 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1989 switch (simple_type->type) {
1990 case MONO_TYPE_VOID:
1995 case MONO_TYPE_BOOLEAN:
1998 case MONO_TYPE_CHAR:
2001 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2007 case MONO_TYPE_FNPTR:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2011 case MONO_TYPE_CLASS:
2012 case MONO_TYPE_STRING:
2013 case MONO_TYPE_OBJECT:
2014 case MONO_TYPE_SZARRAY:
2015 case MONO_TYPE_ARRAY:
2016 if (args [i]->type != STACK_OBJ)
2021 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != STACK_R8)
2029 case MONO_TYPE_VALUETYPE:
2030 if (simple_type->data.klass->enumtype) {
2031 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2034 if (args [i]->type != STACK_VTYPE)
2037 case MONO_TYPE_TYPEDBYREF:
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_GENERICINST:
2042 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2046 g_error ("unknown type 0x%02x in check_call_signature",
2054 callvirt_to_call (int opcode)
2059 case OP_VOIDCALLVIRT:
2068 g_assert_not_reached ();
2075 callvirt_to_call_membase (int opcode)
2079 return OP_CALL_MEMBASE;
2080 case OP_VOIDCALLVIRT:
2081 return OP_VOIDCALL_MEMBASE;
2083 return OP_FCALL_MEMBASE;
2085 return OP_LCALL_MEMBASE;
2087 return OP_VCALL_MEMBASE;
2089 g_assert_not_reached ();
2095 #ifdef MONO_ARCH_HAVE_IMT
2097 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2099 #ifdef MONO_ARCH_IMT_REG
2100 int method_reg = alloc_preg (cfg);
2103 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2104 } else if (cfg->compile_aot) {
2105 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2108 MONO_INST_NEW (cfg, ins, OP_PCONST);
2109 ins->inst_p0 = call->method;
2110 ins->dreg = method_reg;
2111 MONO_ADD_INS (cfg->cbb, ins);
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2116 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2121 static MonoJumpInfo *
2122 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2124 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2128 ji->data.target = target;
2133 inline static MonoCallInst *
2134 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2135 MonoInst **args, int calli, int virtual, int tail)
2138 #ifdef MONO_ARCH_SOFT_FLOAT
2143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2145 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2148 call->signature = sig;
2150 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2153 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2154 call->vret_var = cfg->vret_addr;
2155 //g_assert_not_reached ();
2157 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2158 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2161 temp->backend.is_pinvoke = sig->pinvoke;
2164 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2165 * address of return value to increase optimization opportunities.
2166 * Before vtype decomposition, the dreg of the call ins itself represents the
2167 * fact the call modifies the return value. After decomposition, the call will
2168 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2169 * will be transformed into an LDADDR.
2171 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2172 loada->dreg = alloc_preg (cfg);
2173 loada->inst_p0 = temp;
2174 /* We reference the call too since call->dreg could change during optimization */
2175 loada->inst_p1 = call;
2176 MONO_ADD_INS (cfg->cbb, loada);
2178 call->inst.dreg = temp->dreg;
2180 call->vret_var = loada;
2181 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2182 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2185 if (COMPILE_SOFT_FLOAT (cfg)) {
2187 * If the call has a float argument, we would need to do an r8->r4 conversion using
2188 * an icall, but that cannot be done during the call sequence since it would clobber
2189 * the call registers + the stack. So we do it before emitting the call.
2191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2193 MonoInst *in = call->args [i];
2195 if (i >= sig->hasthis)
2196 t = sig->params [i - sig->hasthis];
2198 t = &mono_defaults.int_class->byval_arg;
2199 t = mono_type_get_underlying_type (t);
2201 if (!t->byref && t->type == MONO_TYPE_R4) {
2202 MonoInst *iargs [1];
2206 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2208 /* The result will be in an int vreg */
2209 call->args [i] = conv;
2216 if (COMPILE_LLVM (cfg))
2217 mono_llvm_emit_call (cfg, call);
2219 mono_arch_emit_call (cfg, call);
2221 mono_arch_emit_call (cfg, call);
2224 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2225 cfg->flags |= MONO_CFG_HAS_CALLS;
2230 inline static MonoInst*
2231 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2233 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2235 call->inst.sreg1 = addr->dreg;
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2242 inline static MonoInst*
2243 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2245 #ifdef MONO_ARCH_RGCTX_REG
2250 rgctx_reg = mono_alloc_preg (cfg);
2251 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2253 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2255 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2256 cfg->uses_rgctx_reg = TRUE;
2257 call->rgctx_reg = TRUE;
2259 return (MonoInst*)call;
2261 g_assert_not_reached ();
2267 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2269 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2272 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2273 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2275 gboolean might_be_remote;
2276 gboolean virtual = this != NULL;
2277 gboolean enable_for_aot = TRUE;
2281 if (method->string_ctor) {
2282 /* Create the real signature */
2283 /* FIXME: Cache these */
2284 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2285 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2290 might_be_remote = this && sig->hasthis &&
2291 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2292 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2294 context_used = mono_method_check_context_used (method);
2295 if (might_be_remote && context_used) {
2298 g_assert (cfg->generic_sharing_context);
2300 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2302 return mono_emit_calli (cfg, sig, args, addr);
2305 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2307 if (might_be_remote)
2308 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2310 call->method = method;
2311 call->inst.flags |= MONO_INST_HAS_METHOD;
2312 call->inst.inst_left = this;
2315 int vtable_reg, slot_reg, this_reg;
2317 this_reg = this->dreg;
2319 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2320 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2321 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2323 /* Make a call to delegate->invoke_impl */
2324 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2325 call->inst.inst_basereg = this_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2327 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2329 return (MonoInst*)call;
2333 if ((!cfg->compile_aot || enable_for_aot) &&
2334 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2335 (MONO_METHOD_IS_FINAL (method) &&
2336 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2337 !(method->klass->marshalbyref && context_used)) {
2339 * the method is not virtual, we just need to ensure this is not null
2340 * and then we can call the method directly.
2342 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2344 * The check above ensures method is not gshared, this is needed since
2345 * gshared methods can't have wrappers.
2347 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2350 if (!method->string_ctor)
2351 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2353 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2360 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2362 * the method is virtual, but we can statically dispatch since either
2363 * it's class or the method itself are sealed.
2364 * But first we need to ensure it's not a null reference.
2366 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2368 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2374 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2376 vtable_reg = alloc_preg (cfg);
2377 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2378 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2380 #ifdef MONO_ARCH_HAVE_IMT
2382 guint32 imt_slot = mono_method_get_imt_slot (method);
2383 emit_imt_argument (cfg, call, imt_arg);
2384 slot_reg = vtable_reg;
2385 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2388 if (slot_reg == -1) {
2389 slot_reg = alloc_preg (cfg);
2390 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2391 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2394 slot_reg = vtable_reg;
2395 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2396 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2397 #ifdef MONO_ARCH_HAVE_IMT
2399 g_assert (mono_method_signature (method)->generic_param_count);
2400 emit_imt_argument (cfg, call, imt_arg);
2405 call->inst.sreg1 = slot_reg;
2406 call->virtual = TRUE;
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2415 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2416 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2418 #ifdef MONO_ARCH_RGCTX_REG
2425 #ifdef MONO_ARCH_RGCTX_REG
2426 rgctx_reg = mono_alloc_preg (cfg);
2427 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2432 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2434 call = (MonoCallInst*)ins;
2436 #ifdef MONO_ARCH_RGCTX_REG
2437 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2438 cfg->uses_rgctx_reg = TRUE;
2439 call->rgctx_reg = TRUE;
2449 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2451 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2455 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2462 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2465 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2467 return (MonoInst*)call;
2471 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2473 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2477 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2481 * mono_emit_abs_call:
2483 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2485 inline static MonoInst*
2486 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2487 MonoMethodSignature *sig, MonoInst **args)
2489 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2493 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2496 if (cfg->abs_patches == NULL)
2497 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2498 g_hash_table_insert (cfg->abs_patches, ji, ji);
2499 ins = mono_emit_native_call (cfg, ji, sig, args);
2500 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2505 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2508 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2512 * Native code might return non register sized integers
2513 * without initializing the upper bits.
2515 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2516 case OP_LOADI1_MEMBASE:
2517 widen_op = OP_ICONV_TO_I1;
2519 case OP_LOADU1_MEMBASE:
2520 widen_op = OP_ICONV_TO_U1;
2522 case OP_LOADI2_MEMBASE:
2523 widen_op = OP_ICONV_TO_I2;
2525 case OP_LOADU2_MEMBASE:
2526 widen_op = OP_ICONV_TO_U2;
2532 if (widen_op != -1) {
2533 int dreg = alloc_preg (cfg);
2536 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2537 widen->type = ins->type;
2547 get_memcpy_method (void)
2549 static MonoMethod *memcpy_method = NULL;
2550 if (!memcpy_method) {
2551 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2553 g_error ("Old corlib found. Install a new one");
2555 return memcpy_method;
2559 * Emit code to copy a valuetype of type @klass whose address is stored in
2560 * @src->dreg to memory whose address is stored at @dest->dreg.
2563 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2565 MonoInst *iargs [3];
2568 MonoMethod *memcpy_method;
2572 * This check breaks with spilled vars... need to handle it during verification anyway.
2573 * g_assert (klass && klass == src->klass && klass == dest->klass);
2577 n = mono_class_native_size (klass, &align);
2579 n = mono_class_value_size (klass, &align);
2581 #if HAVE_WRITE_BARRIERS
2582 /* if native is true there should be no references in the struct */
2583 if (klass->has_references && !native) {
2584 /* Avoid barriers when storing to the stack */
2585 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2586 (dest->opcode == OP_LDADDR))) {
2587 int context_used = 0;
2592 if (cfg->generic_sharing_context)
2593 context_used = mono_class_check_context_used (klass);
2595 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2597 if (cfg->compile_aot) {
2598 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2600 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2601 mono_class_compute_gc_descriptor (klass);
2605 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2610 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2611 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2612 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2616 EMIT_NEW_ICONST (cfg, iargs [2], n);
2618 memcpy_method = get_memcpy_method ();
2619 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2624 get_memset_method (void)
2626 static MonoMethod *memset_method = NULL;
2627 if (!memset_method) {
2628 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2630 g_error ("Old corlib found. Install a new one");
2632 return memset_method;
2636 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2638 MonoInst *iargs [3];
2641 MonoMethod *memset_method;
2643 /* FIXME: Optimize this for the case when dest is an LDADDR */
2645 mono_class_init (klass);
2646 n = mono_class_value_size (klass, &align);
2648 if (n <= sizeof (gpointer) * 5) {
2649 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2652 memset_method = get_memset_method ();
2654 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2655 EMIT_NEW_ICONST (cfg, iargs [2], n);
2656 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2661 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2663 MonoInst *this = NULL;
2665 g_assert (cfg->generic_sharing_context);
2667 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2668 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2669 !method->klass->valuetype)
2670 EMIT_NEW_ARGLOAD (cfg, this, 0);
2672 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2673 MonoInst *mrgctx_loc, *mrgctx_var;
2676 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2678 mrgctx_loc = mono_get_vtable_var (cfg);
2679 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2682 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2683 MonoInst *vtable_loc, *vtable_var;
2687 vtable_loc = mono_get_vtable_var (cfg);
2688 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2690 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2691 MonoInst *mrgctx_var = vtable_var;
2694 vtable_reg = alloc_preg (cfg);
2695 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2696 vtable_var->type = STACK_PTR;
2702 int vtable_reg, res_reg;
2704 vtable_reg = alloc_preg (cfg);
2705 res_reg = alloc_preg (cfg);
2706 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2711 static MonoJumpInfoRgctxEntry *
2712 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2714 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2715 res->method = method;
2716 res->in_mrgctx = in_mrgctx;
2717 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2718 res->data->type = patch_type;
2719 res->data->data.target = patch_data;
2720 res->info_type = info_type;
2725 static inline MonoInst*
2726 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2728 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2732 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2733 MonoClass *klass, int rgctx_type)
2735 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2736 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2738 return emit_rgctx_fetch (cfg, rgctx, entry);
2742 * emit_get_rgctx_method:
2744 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2745 * normal constants, else emit a load from the rgctx.
2748 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2749 MonoMethod *cmethod, int rgctx_type)
2751 if (!context_used) {
2754 switch (rgctx_type) {
2755 case MONO_RGCTX_INFO_METHOD:
2756 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2758 case MONO_RGCTX_INFO_METHOD_RGCTX:
2759 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2762 g_assert_not_reached ();
2765 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2766 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2768 return emit_rgctx_fetch (cfg, rgctx, entry);
2773 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2774 MonoClassField *field, int rgctx_type)
2776 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2779 return emit_rgctx_fetch (cfg, rgctx, entry);
2783 * On return the caller must check @klass for load errors.
2786 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2788 MonoInst *vtable_arg;
2790 int context_used = 0;
2792 if (cfg->generic_sharing_context)
2793 context_used = mono_class_check_context_used (klass);
2796 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2797 klass, MONO_RGCTX_INFO_VTABLE);
2799 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2803 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2806 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2807 #ifdef MONO_ARCH_VTABLE_REG
2808 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2809 cfg->uses_vtable_reg = TRUE;
2816 * On return the caller must check @array_class for load errors
2819 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2821 int vtable_reg = alloc_preg (cfg);
2822 int context_used = 0;
2824 if (cfg->generic_sharing_context)
2825 context_used = mono_class_check_context_used (array_class);
2827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2829 if (cfg->opt & MONO_OPT_SHARED) {
2830 int class_reg = alloc_preg (cfg);
2831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2832 if (cfg->compile_aot) {
2833 int klass_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2839 } else if (context_used) {
2840 MonoInst *vtable_ins;
2842 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2845 if (cfg->compile_aot) {
2849 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2851 vt_reg = alloc_preg (cfg);
2852 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2853 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2856 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2858 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2862 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2866 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2868 if (mini_get_debug_options ()->better_cast_details) {
2869 int to_klass_reg = alloc_preg (cfg);
2870 int vtable_reg = alloc_preg (cfg);
2871 int klass_reg = alloc_preg (cfg);
2872 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2875 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2879 MONO_ADD_INS (cfg->cbb, tls_get);
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2884 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2890 reset_cast_details (MonoCompile *cfg)
2892 /* Reset the variables holding the cast details */
2893 if (mini_get_debug_options ()->better_cast_details) {
2894 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2896 MONO_ADD_INS (cfg->cbb, tls_get);
2897 /* It is enough to reset the from field */
2898 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2903 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2904 * generic code is generated.
2907 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2909 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2912 MonoInst *rgctx, *addr;
2914 /* FIXME: What if the class is shared? We might not
2915 have to get the address of the method from the
2917 addr = emit_get_rgctx_method (cfg, context_used, method,
2918 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2920 rgctx = emit_get_rgctx (cfg, method, context_used);
2922 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2924 return mono_emit_method_call (cfg, method, &val, NULL);
2929 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2933 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2934 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2935 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2936 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2938 obj_reg = sp [0]->dreg;
2939 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2942 /* FIXME: generics */
2943 g_assert (klass->rank == 0);
2946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2947 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2953 MonoInst *element_class;
2955 /* This assertion is from the unboxcast insn */
2956 g_assert (klass->rank == 0);
2958 element_class = emit_get_rgctx_klass (cfg, context_used,
2959 klass->element_class, MONO_RGCTX_INFO_KLASS);
2961 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2962 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2964 save_cast_details (cfg, klass->element_class, obj_reg);
2965 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2966 reset_cast_details (cfg);
2969 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2970 MONO_ADD_INS (cfg->cbb, add);
2971 add->type = STACK_MP;
2978 * Returns NULL and set the cfg exception on error.
2981 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2983 MonoInst *iargs [2];
2986 if (cfg->opt & MONO_OPT_SHARED) {
2987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2988 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2990 alloc_ftn = mono_object_new;
2991 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2992 /* This happens often in argument checking code, eg. throw new FooException... */
2993 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2994 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2995 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2997 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2998 MonoMethod *managed_alloc = NULL;
3002 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3003 cfg->exception_ptr = klass;
3007 #ifndef MONO_CROSS_COMPILE
3008 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3011 if (managed_alloc) {
3012 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3013 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3015 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3017 guint32 lw = vtable->klass->instance_size;
3018 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3019 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3020 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3023 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3027 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3031 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3034 MonoInst *iargs [2];
3035 MonoMethod *managed_alloc = NULL;
3039 FIXME: we cannot get managed_alloc here because we can't get
3040 the class's vtable (because it's not a closed class)
3042 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3043 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3046 if (cfg->opt & MONO_OPT_SHARED) {
3047 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3048 iargs [1] = data_inst;
3049 alloc_ftn = mono_object_new;
3051 if (managed_alloc) {
3052 iargs [0] = data_inst;
3053 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3056 iargs [0] = data_inst;
3057 alloc_ftn = mono_object_new_specific;
3060 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3064 * Returns NULL and set the cfg exception on error.
3067 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3069 MonoInst *alloc, *ins;
3071 if (mono_class_is_nullable (klass)) {
3072 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3073 return mono_emit_method_call (cfg, method, &val, NULL);
3076 alloc = handle_alloc (cfg, klass, TRUE);
3080 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3086 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3088 MonoInst *alloc, *ins;
3090 if (mono_class_is_nullable (klass)) {
3091 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3092 /* FIXME: What if the class is shared? We might not
3093 have to get the method address from the RGCTX. */
3094 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3096 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3098 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3100 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3102 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3109 * Returns NULL and set the cfg exception on error.
3112 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3114 MonoBasicBlock *is_null_bb;
3115 int obj_reg = src->dreg;
3116 int vtable_reg = alloc_preg (cfg);
3117 MonoInst *klass_inst = NULL;
3122 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3123 klass, MONO_RGCTX_INFO_KLASS);
3125 // FIXME: This doesn't work yet (mcs/tests/gtest-304.cs fails)
3126 if (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass)) {
3127 /* Complex case, handle by an icall */
3133 args [1] = klass_inst;
3135 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3137 /* Simple case, handled by the code below */
3141 NEW_BBLOCK (cfg, is_null_bb);
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3146 save_cast_details (cfg, klass, obj_reg);
3148 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3150 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3152 int klass_reg = alloc_preg (cfg);
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3157 /* the remoting code is broken, access the class for now */
3158 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3159 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3161 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3162 cfg->exception_ptr = klass;
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3170 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3173 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3177 MONO_START_BB (cfg, is_null_bb);
3179 reset_cast_details (cfg);
3185 * Returns NULL and set the cfg exception on error.
3188 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3191 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3192 int obj_reg = src->dreg;
3193 int vtable_reg = alloc_preg (cfg);
3194 int res_reg = alloc_preg (cfg);
3195 MonoInst *klass_inst = NULL;
3198 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3200 // FIXME: This doesn't work yet (mcs/tests/gtest-304.cs fails)
3201 if (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass)) {
3204 /* Complex case, handle by an icall */
3210 args [1] = klass_inst;
3212 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3214 /* Simple case, the code below can handle it */
3218 NEW_BBLOCK (cfg, is_null_bb);
3219 NEW_BBLOCK (cfg, false_bb);
3220 NEW_BBLOCK (cfg, end_bb);
3222 /* Do the assignment at the beginning, so the other assignment can be if converted */
3223 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3224 ins->type = STACK_OBJ;
3227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3232 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3233 g_assert (!context_used);
3234 /* the is_null_bb target simply copies the input register to the output */
3235 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3237 int klass_reg = alloc_preg (cfg);
3240 int rank_reg = alloc_preg (cfg);
3241 int eclass_reg = alloc_preg (cfg);
3243 g_assert (!context_used);
3244 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3246 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3249 if (klass->cast_class == mono_defaults.object_class) {
3250 int parent_reg = alloc_preg (cfg);
3251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3252 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3253 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3254 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3255 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3256 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3257 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3259 } else if (klass->cast_class == mono_defaults.enum_class) {
3260 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3261 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3262 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3263 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3265 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3266 /* Check that the object is a vector too */
3267 int bounds_reg = alloc_preg (cfg);
3268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3270 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3273 /* the is_null_bb target simply copies the input register to the output */
3274 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3276 } else if (mono_class_is_nullable (klass)) {
3277 g_assert (!context_used);
3278 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3279 /* the is_null_bb target simply copies the input register to the output */
3280 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3282 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3283 g_assert (!context_used);
3284 /* the remoting code is broken, access the class for now */
3285 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3286 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3288 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3289 cfg->exception_ptr = klass;
3292 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3295 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3301 /* the is_null_bb target simply copies the input register to the output */
3302 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3307 MONO_START_BB (cfg, false_bb);
3309 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3310 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3312 MONO_START_BB (cfg, is_null_bb);
3314 MONO_START_BB (cfg, end_bb);
3320 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3322 /* This opcode takes as input an object reference and a class, and returns:
3323 0) if the object is an instance of the class,
3324 1) if the object is not instance of the class,
3325 2) if the object is a proxy whose type cannot be determined */
3328 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3329 int obj_reg = src->dreg;
3330 int dreg = alloc_ireg (cfg);
3332 int klass_reg = alloc_preg (cfg);
3334 NEW_BBLOCK (cfg, true_bb);
3335 NEW_BBLOCK (cfg, false_bb);
3336 NEW_BBLOCK (cfg, false2_bb);
3337 NEW_BBLOCK (cfg, end_bb);
3338 NEW_BBLOCK (cfg, no_proxy_bb);
3340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3343 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3344 NEW_BBLOCK (cfg, interface_fail_bb);
3346 tmp_reg = alloc_preg (cfg);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3348 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3349 MONO_START_BB (cfg, interface_fail_bb);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3352 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3354 tmp_reg = alloc_preg (cfg);
3355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3359 tmp_reg = alloc_preg (cfg);
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3363 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3364 tmp_reg = alloc_preg (cfg);
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3368 tmp_reg = alloc_preg (cfg);
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3371 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3373 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3376 MONO_START_BB (cfg, no_proxy_bb);
3378 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3381 MONO_START_BB (cfg, false_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, false2_bb);
3388 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3389 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3391 MONO_START_BB (cfg, true_bb);
3393 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3395 MONO_START_BB (cfg, end_bb);
3398 MONO_INST_NEW (cfg, ins, OP_ICONST);
3400 ins->type = STACK_I4;
3406 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3408 /* This opcode takes as input an object reference and a class, and returns:
3409 0) if the object is an instance of the class,
3410 1) if the object is a proxy whose type cannot be determined
3411 an InvalidCastException exception is thrown otherwhise*/
3414 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3415 int obj_reg = src->dreg;
3416 int dreg = alloc_ireg (cfg);
3417 int tmp_reg = alloc_preg (cfg);
3418 int klass_reg = alloc_preg (cfg);
3420 NEW_BBLOCK (cfg, end_bb);
3421 NEW_BBLOCK (cfg, ok_result_bb);
3423 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3426 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3427 NEW_BBLOCK (cfg, interface_fail_bb);
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3430 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3431 MONO_START_BB (cfg, interface_fail_bb);
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3434 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3436 tmp_reg = alloc_preg (cfg);
3437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3439 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3441 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3445 NEW_BBLOCK (cfg, no_proxy_bb);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3449 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3451 tmp_reg = alloc_preg (cfg);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3455 tmp_reg = alloc_preg (cfg);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3460 NEW_BBLOCK (cfg, fail_1_bb);
3462 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3464 MONO_START_BB (cfg, fail_1_bb);
3466 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3469 MONO_START_BB (cfg, no_proxy_bb);
3471 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3474 MONO_START_BB (cfg, ok_result_bb);
3476 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3478 MONO_START_BB (cfg, end_bb);
3481 MONO_INST_NEW (cfg, ins, OP_ICONST);
3483 ins->type = STACK_I4;
3489 * Returns NULL and set the cfg exception on error.
3491 static G_GNUC_UNUSED MonoInst*
3492 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3494 gpointer *trampoline;
3495 MonoInst *obj, *method_ins, *tramp_ins;
3499 obj = handle_alloc (cfg, klass, FALSE);
3503 /* Inline the contents of mono_delegate_ctor */
3505 /* Set target field */
3506 /* Optimize away setting of NULL target */
3507 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3510 /* Set method field */
3511 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3515 * To avoid looking up the compiled code belonging to the target method
3516 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3517 * store it, and we fill it after the method has been compiled.
3519 if (!cfg->compile_aot && !method->dynamic) {
3520 MonoInst *code_slot_ins;
3523 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3525 domain = mono_domain_get ();
3526 mono_domain_lock (domain);
3527 if (!domain_jit_info (domain)->method_code_hash)
3528 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3529 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3531 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3532 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3534 mono_domain_unlock (domain);
3536 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3538 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3541 /* Set invoke_impl field */
3542 if (cfg->compile_aot) {
3543 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3545 trampoline = mono_create_delegate_trampoline (klass);
3546 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3548 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3550 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3556 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3558 MonoJitICallInfo *info;
3560 /* Need to register the icall so it gets an icall wrapper */
3561 info = mono_get_array_new_va_icall (rank);
3563 cfg->flags |= MONO_CFG_HAS_VARARGS;
3565 /* mono_array_new_va () needs a vararg calling convention */
3566 cfg->disable_llvm = TRUE;
3568 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3569 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3573 mono_emit_load_got_addr (MonoCompile *cfg)
3575 MonoInst *getaddr, *dummy_use;
3577 if (!cfg->got_var || cfg->got_var_allocated)
3580 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3581 getaddr->dreg = cfg->got_var->dreg;
3583 /* Add it to the start of the first bblock */
3584 if (cfg->bb_entry->code) {
3585 getaddr->next = cfg->bb_entry->code;
3586 cfg->bb_entry->code = getaddr;
3589 MONO_ADD_INS (cfg->bb_entry, getaddr);
3591 cfg->got_var_allocated = TRUE;
3594 * Add a dummy use to keep the got_var alive, since real uses might
3595 * only be generated by the back ends.
3596 * Add it to end_bblock, so the variable's lifetime covers the whole
3598 * It would be better to make the usage of the got var explicit in all
3599 * cases when the backend needs it (i.e. calls, throw etc.), so this
3600 * wouldn't be needed.
3602 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3603 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3606 static int inline_limit;
3607 static gboolean inline_limit_inited;
3610 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3612 MonoMethodHeader *header;
3614 #ifdef MONO_ARCH_SOFT_FLOAT
3615 MonoMethodSignature *sig = mono_method_signature (method);
3619 if (cfg->generic_sharing_context)
3622 if (cfg->inline_depth > 10)
3625 #ifdef MONO_ARCH_HAVE_LMF_OPS
3626 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3627 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3628 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3632 if (method->is_inflated)
3633 /* Avoid inflating the header */
3634 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3636 header = mono_method_get_header (method);
3638 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3639 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3640 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3641 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3642 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3643 (method->klass->marshalbyref) ||
3644 !header || header->num_clauses)
3647 /* also consider num_locals? */
3648 /* Do the size check early to avoid creating vtables */
3649 if (!inline_limit_inited) {
3650 if (getenv ("MONO_INLINELIMIT"))
3651 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3653 inline_limit = INLINE_LENGTH_LIMIT;
3654 inline_limit_inited = TRUE;
3656 if (header->code_size >= inline_limit)
3660 * if we can initialize the class of the method right away, we do,
3661 * otherwise we don't allow inlining if the class needs initialization,
3662 * since it would mean inserting a call to mono_runtime_class_init()
3663 * inside the inlined code
3665 if (!(cfg->opt & MONO_OPT_SHARED)) {
3666 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3667 if (cfg->run_cctors && method->klass->has_cctor) {
3668 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3669 if (!method->klass->runtime_info)
3670 /* No vtable created yet */
3672 vtable = mono_class_vtable (cfg->domain, method->klass);
3675 /* This makes so that inline cannot trigger */
3676 /* .cctors: too many apps depend on them */
3677 /* running with a specific order... */
3678 if (! vtable->initialized)
3680 mono_runtime_class_init (vtable);
3682 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3683 if (!method->klass->runtime_info)
3684 /* No vtable created yet */
3686 vtable = mono_class_vtable (cfg->domain, method->klass);
3689 if (!vtable->initialized)
3694 * If we're compiling for shared code
3695 * the cctor will need to be run at aot method load time, for example,
3696 * or at the end of the compilation of the inlining method.
3698 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3703 * CAS - do not inline methods with declarative security
3704 * Note: this has to be before any possible return TRUE;
3706 if (mono_method_has_declsec (method))
3709 #ifdef MONO_ARCH_SOFT_FLOAT
3711 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3713 for (i = 0; i < sig->param_count; ++i)
3714 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3722 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3724 if (vtable->initialized && !cfg->compile_aot)
3727 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3730 if (!mono_class_needs_cctor_run (vtable->klass, method))
3733 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3734 /* The initialization is already done before the method is called */
3741 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3745 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3747 mono_class_init (klass);
3748 size = mono_class_array_element_size (klass);
3750 mult_reg = alloc_preg (cfg);
3751 array_reg = arr->dreg;
3752 index_reg = index->dreg;
3754 #if SIZEOF_REGISTER == 8
3755 /* The array reg is 64 bits but the index reg is only 32 */
3756 if (COMPILE_LLVM (cfg)) {
3758 index2_reg = index_reg;
3760 index2_reg = alloc_preg (cfg);
3761 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3764 if (index->type == STACK_I8) {
3765 index2_reg = alloc_preg (cfg);
3766 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3768 index2_reg = index_reg;
3772 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3774 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3775 if (size == 1 || size == 2 || size == 4 || size == 8) {
3776 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3778 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3779 ins->type = STACK_PTR;
3785 add_reg = alloc_preg (cfg);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3788 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3789 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3790 ins->type = STACK_PTR;
3791 MONO_ADD_INS (cfg->cbb, ins);
3796 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3798 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3800 int bounds_reg = alloc_preg (cfg);
3801 int add_reg = alloc_preg (cfg);
3802 int mult_reg = alloc_preg (cfg);
3803 int mult2_reg = alloc_preg (cfg);
3804 int low1_reg = alloc_preg (cfg);
3805 int low2_reg = alloc_preg (cfg);
3806 int high1_reg = alloc_preg (cfg);
3807 int high2_reg = alloc_preg (cfg);
3808 int realidx1_reg = alloc_preg (cfg);
3809 int realidx2_reg = alloc_preg (cfg);
3810 int sum_reg = alloc_preg (cfg);
3815 mono_class_init (klass);
3816 size = mono_class_array_element_size (klass);
3818 index1 = index_ins1->dreg;
3819 index2 = index_ins2->dreg;
3821 /* range checking */
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3823 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3827 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3829 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3830 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3831 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3837 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3838 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3839 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3841 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3844 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3845 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3847 ins->type = STACK_MP;
3849 MONO_ADD_INS (cfg->cbb, ins);
3856 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3860 MonoMethod *addr_method;
3863 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3866 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3868 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3869 /* emit_ldelema_2 depends on OP_LMUL */
3870 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3871 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3875 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3876 addr_method = mono_marshal_get_array_address (rank, element_size);
3877 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3882 static MonoBreakPolicy
3883 always_insert_breakpoint (MonoMethod *method)
3885 return MONO_BREAK_POLICY_ALWAYS;
3888 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3891 * mono_set_break_policy:
3892 * policy_callback: the new callback function
3894 * Allow embedders to decide wherther to actually obey breakpoint instructions
3895 * (both break IL instructions and Debugger.Break () method calls), for example
3896 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3897 * untrusted or semi-trusted code.
3899 * @policy_callback will be called every time a break point instruction needs to
3900 * be inserted with the method argument being the method that calls Debugger.Break()
3901 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3902 * if it wants the breakpoint to not be effective in the given method.
3903 * #MONO_BREAK_POLICY_ALWAYS is the default.
3906 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3908 if (policy_callback)
3909 break_policy_func = policy_callback;
3911 break_policy_func = always_insert_breakpoint;
3915 should_insert_brekpoint (MonoMethod *method) {
3916 switch (break_policy_func (method)) {
3917 case MONO_BREAK_POLICY_ALWAYS:
3919 case MONO_BREAK_POLICY_NEVER:
3921 case MONO_BREAK_POLICY_ON_DBG:
3922 return mono_debug_using_mono_debugger ();
3924 g_warning ("Incorrect value returned from break policy callback");
3930 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3932 MonoInst *ins = NULL;
3934 static MonoClass *runtime_helpers_class = NULL;
3935 if (! runtime_helpers_class)
3936 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3937 "System.Runtime.CompilerServices", "RuntimeHelpers");
3939 if (cmethod->klass == mono_defaults.string_class) {
3940 if (strcmp (cmethod->name, "get_Chars") == 0) {
3941 int dreg = alloc_ireg (cfg);
3942 int index_reg = alloc_preg (cfg);
3943 int mult_reg = alloc_preg (cfg);
3944 int add_reg = alloc_preg (cfg);
3946 #if SIZEOF_REGISTER == 8
3947 /* The array reg is 64 bits but the index reg is only 32 */
3948 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3950 index_reg = args [1]->dreg;
3952 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3954 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3955 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3956 add_reg = ins->dreg;
3957 /* Avoid a warning */
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3963 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3964 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3965 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3967 type_from_op (ins, NULL, NULL);
3969 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3970 int dreg = alloc_ireg (cfg);
3971 /* Decompose later to allow more optimizations */
3972 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3973 ins->type = STACK_I4;
3974 cfg->cbb->has_array_access = TRUE;
3975 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3978 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3979 int mult_reg = alloc_preg (cfg);
3980 int add_reg = alloc_preg (cfg);
3982 /* The corlib functions check for oob already. */
3983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3984 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3988 } else if (cmethod->klass == mono_defaults.object_class) {
3990 if (strcmp (cmethod->name, "GetType") == 0) {
3991 int dreg = alloc_preg (cfg);
3992 int vt_reg = alloc_preg (cfg);
3993 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3994 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3995 type_from_op (ins, NULL, NULL);
3998 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3999 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4000 int dreg = alloc_ireg (cfg);
4001 int t1 = alloc_ireg (cfg);
4003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4004 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4005 ins->type = STACK_I4;
4009 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4010 MONO_INST_NEW (cfg, ins, OP_NOP);
4011 MONO_ADD_INS (cfg->cbb, ins);
4015 } else if (cmethod->klass == mono_defaults.array_class) {
4016 if (cmethod->name [0] != 'g')
4019 if (strcmp (cmethod->name, "get_Rank") == 0) {
4020 int dreg = alloc_ireg (cfg);
4021 int vtable_reg = alloc_preg (cfg);
4022 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4023 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4024 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4025 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4026 type_from_op (ins, NULL, NULL);
4029 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4030 int dreg = alloc_ireg (cfg);
4032 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4033 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4034 type_from_op (ins, NULL, NULL);
4039 } else if (cmethod->klass == runtime_helpers_class) {
4041 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4042 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4046 } else if (cmethod->klass == mono_defaults.thread_class) {
4047 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4048 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4049 MONO_ADD_INS (cfg->cbb, ins);
4051 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4052 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4053 MONO_ADD_INS (cfg->cbb, ins);
4056 } else if (cmethod->klass == mono_defaults.monitor_class) {
4057 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4058 if (strcmp (cmethod->name, "Enter") == 0) {
4061 if (COMPILE_LLVM (cfg)) {
4063 * Pass the argument normally, the LLVM backend will handle the
4064 * calling convention problems.
4066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4069 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4070 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4071 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4074 return (MonoInst*)call;
4075 } else if (strcmp (cmethod->name, "Exit") == 0) {
4078 if (COMPILE_LLVM (cfg)) {
4079 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4081 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4082 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4083 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4084 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4087 return (MonoInst*)call;
4089 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4090 MonoMethod *fast_method = NULL;
4092 /* Avoid infinite recursion */
4093 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4094 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4095 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4098 if (strcmp (cmethod->name, "Enter") == 0 ||
4099 strcmp (cmethod->name, "Exit") == 0)
4100 fast_method = mono_monitor_get_fast_path (cmethod);
4104 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4106 } else if (mini_class_is_system_array (cmethod->klass) &&
4107 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4108 MonoInst *addr, *store, *load;
4109 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4111 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4112 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4113 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4115 } else if (cmethod->klass->image == mono_defaults.corlib &&
4116 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4117 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4120 #if SIZEOF_REGISTER == 8
4121 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4122 /* 64 bit reads are already atomic */
4123 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4124 ins->dreg = mono_alloc_preg (cfg);
4125 ins->inst_basereg = args [0]->dreg;
4126 ins->inst_offset = 0;
4127 MONO_ADD_INS (cfg->cbb, ins);
4131 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4132 if (strcmp (cmethod->name, "Increment") == 0) {
4133 MonoInst *ins_iconst;
4136 if (fsig->params [0]->type == MONO_TYPE_I4)
4137 opcode = OP_ATOMIC_ADD_NEW_I4;
4138 #if SIZEOF_REGISTER == 8
4139 else if (fsig->params [0]->type == MONO_TYPE_I8)
4140 opcode = OP_ATOMIC_ADD_NEW_I8;
4143 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4144 ins_iconst->inst_c0 = 1;
4145 ins_iconst->dreg = mono_alloc_ireg (cfg);
4146 MONO_ADD_INS (cfg->cbb, ins_iconst);
4148 MONO_INST_NEW (cfg, ins, opcode);
4149 ins->dreg = mono_alloc_ireg (cfg);
4150 ins->inst_basereg = args [0]->dreg;
4151 ins->inst_offset = 0;
4152 ins->sreg2 = ins_iconst->dreg;
4153 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4154 MONO_ADD_INS (cfg->cbb, ins);
4156 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4157 MonoInst *ins_iconst;
4160 if (fsig->params [0]->type == MONO_TYPE_I4)
4161 opcode = OP_ATOMIC_ADD_NEW_I4;
4162 #if SIZEOF_REGISTER == 8
4163 else if (fsig->params [0]->type == MONO_TYPE_I8)
4164 opcode = OP_ATOMIC_ADD_NEW_I8;
4167 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4168 ins_iconst->inst_c0 = -1;
4169 ins_iconst->dreg = mono_alloc_ireg (cfg);
4170 MONO_ADD_INS (cfg->cbb, ins_iconst);
4172 MONO_INST_NEW (cfg, ins, opcode);
4173 ins->dreg = mono_alloc_ireg (cfg);
4174 ins->inst_basereg = args [0]->dreg;
4175 ins->inst_offset = 0;
4176 ins->sreg2 = ins_iconst->dreg;
4177 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4178 MONO_ADD_INS (cfg->cbb, ins);
4180 } else if (strcmp (cmethod->name, "Add") == 0) {
4183 if (fsig->params [0]->type == MONO_TYPE_I4)
4184 opcode = OP_ATOMIC_ADD_NEW_I4;
4185 #if SIZEOF_REGISTER == 8
4186 else if (fsig->params [0]->type == MONO_TYPE_I8)
4187 opcode = OP_ATOMIC_ADD_NEW_I8;
4191 MONO_INST_NEW (cfg, ins, opcode);
4192 ins->dreg = mono_alloc_ireg (cfg);
4193 ins->inst_basereg = args [0]->dreg;
4194 ins->inst_offset = 0;
4195 ins->sreg2 = args [1]->dreg;
4196 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4197 MONO_ADD_INS (cfg->cbb, ins);
4200 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4202 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4203 if (strcmp (cmethod->name, "Exchange") == 0) {
4205 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4207 if (fsig->params [0]->type == MONO_TYPE_I4)
4208 opcode = OP_ATOMIC_EXCHANGE_I4;
4209 #if SIZEOF_REGISTER == 8
4210 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4211 (fsig->params [0]->type == MONO_TYPE_I))
4212 opcode = OP_ATOMIC_EXCHANGE_I8;
4214 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4215 opcode = OP_ATOMIC_EXCHANGE_I4;
4220 MONO_INST_NEW (cfg, ins, opcode);
4221 ins->dreg = mono_alloc_ireg (cfg);
4222 ins->inst_basereg = args [0]->dreg;
4223 ins->inst_offset = 0;
4224 ins->sreg2 = args [1]->dreg;
4225 MONO_ADD_INS (cfg->cbb, ins);
4227 switch (fsig->params [0]->type) {
4229 ins->type = STACK_I4;
4233 ins->type = STACK_I8;
4235 case MONO_TYPE_OBJECT:
4236 ins->type = STACK_OBJ;
4239 g_assert_not_reached ();
4242 #if HAVE_WRITE_BARRIERS
4244 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4245 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4249 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4251 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4252 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4254 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4255 if (fsig->params [1]->type == MONO_TYPE_I4)
4257 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4258 size = sizeof (gpointer);
4259 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4262 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4263 ins->dreg = alloc_ireg (cfg);
4264 ins->sreg1 = args [0]->dreg;
4265 ins->sreg2 = args [1]->dreg;
4266 ins->sreg3 = args [2]->dreg;
4267 ins->type = STACK_I4;
4268 MONO_ADD_INS (cfg->cbb, ins);
4269 } else if (size == 8) {
4270 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4271 ins->dreg = alloc_ireg (cfg);
4272 ins->sreg1 = args [0]->dreg;
4273 ins->sreg2 = args [1]->dreg;
4274 ins->sreg3 = args [2]->dreg;
4275 ins->type = STACK_I8;
4276 MONO_ADD_INS (cfg->cbb, ins);
4278 /* g_assert_not_reached (); */
4280 #if HAVE_WRITE_BARRIERS
4282 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4283 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4287 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4291 } else if (cmethod->klass->image == mono_defaults.corlib) {
4292 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4293 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4294 if (should_insert_brekpoint (cfg->method))
4295 MONO_INST_NEW (cfg, ins, OP_BREAK);
4297 MONO_INST_NEW (cfg, ins, OP_NOP);
4298 MONO_ADD_INS (cfg->cbb, ins);
4301 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4302 && strcmp (cmethod->klass->name, "Environment") == 0) {
4304 EMIT_NEW_ICONST (cfg, ins, 1);
4306 EMIT_NEW_ICONST (cfg, ins, 0);
4310 } else if (cmethod->klass == mono_defaults.math_class) {
4312 * There is general branches code for Min/Max, but it does not work for
4314 * http://everything2.com/?node_id=1051618
4318 #ifdef MONO_ARCH_SIMD_INTRINSICS
4319 if (cfg->opt & MONO_OPT_SIMD) {
4320 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4326 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4330 * This entry point could be used later for arbitrary method
4333 inline static MonoInst*
4334 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4335 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4337 if (method->klass == mono_defaults.string_class) {
4338 /* managed string allocation support */
4339 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4340 MonoInst *iargs [2];
4341 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4342 MonoMethod *managed_alloc = NULL;
4344 g_assert (vtable); /*Should not fail since it System.String*/
4345 #ifndef MONO_CROSS_COMPILE
4346 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4350 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4351 iargs [1] = args [0];
4352 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4359 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4361 MonoInst *store, *temp;
4364 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4365 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4368 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4369 * would be different than the MonoInst's used to represent arguments, and
4370 * the ldelema implementation can't deal with that.
4371 * Solution: When ldelema is used on an inline argument, create a var for
4372 * it, emit ldelema on that var, and emit the saving code below in
4373 * inline_method () if needed.
4375 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4376 cfg->args [i] = temp;
4377 /* This uses cfg->args [i] which is set by the preceeding line */
4378 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4379 store->cil_code = sp [0]->cil_code;
4384 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4385 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4387 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4389 check_inline_called_method_name_limit (MonoMethod *called_method)
4392 static char *limit = NULL;
4394 if (limit == NULL) {
4395 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4397 if (limit_string != NULL)
4398 limit = limit_string;
4400 limit = (char *) "";
4403 if (limit [0] != '\0') {
4404 char *called_method_name = mono_method_full_name (called_method, TRUE);
4406 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4407 g_free (called_method_name);
4409 //return (strncmp_result <= 0);
4410 return (strncmp_result == 0);
4417 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4419 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4422 static char *limit = NULL;
4424 if (limit == NULL) {
4425 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4426 if (limit_string != NULL) {
4427 limit = limit_string;
4429 limit = (char *) "";
4433 if (limit [0] != '\0') {
4434 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4436 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4437 g_free (caller_method_name);
4439 //return (strncmp_result <= 0);
4440 return (strncmp_result == 0);
4448 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4449 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4451 MonoInst *ins, *rvar = NULL;
4452 MonoMethodHeader *cheader;
4453 MonoBasicBlock *ebblock, *sbblock;
4455 MonoMethod *prev_inlined_method;
4456 MonoInst **prev_locals, **prev_args;
4457 MonoType **prev_arg_types;
4458 guint prev_real_offset;
4459 GHashTable *prev_cbb_hash;
4460 MonoBasicBlock **prev_cil_offset_to_bb;
4461 MonoBasicBlock *prev_cbb;
4462 unsigned char* prev_cil_start;
4463 guint32 prev_cil_offset_to_bb_len;
4464 MonoMethod *prev_current_method;
4465 MonoGenericContext *prev_generic_context;
4466 gboolean ret_var_set, prev_ret_var_set;
4468 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4470 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4471 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4474 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4475 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4479 if (cfg->verbose_level > 2)
4480 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4482 if (!cmethod->inline_info) {
4483 mono_jit_stats.inlineable_methods++;
4484 cmethod->inline_info = 1;
4486 /* allocate space to store the return value */
4487 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4488 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4491 /* allocate local variables */
4492 cheader = mono_method_get_header (cmethod);
4493 prev_locals = cfg->locals;
4494 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4495 for (i = 0; i < cheader->num_locals; ++i)
4496 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4498 /* allocate start and end blocks */
4499 /* This is needed so if the inline is aborted, we can clean up */
4500 NEW_BBLOCK (cfg, sbblock);
4501 sbblock->real_offset = real_offset;
4503 NEW_BBLOCK (cfg, ebblock);
4504 ebblock->block_num = cfg->num_bblocks++;
4505 ebblock->real_offset = real_offset;
4507 prev_args = cfg->args;
4508 prev_arg_types = cfg->arg_types;
4509 prev_inlined_method = cfg->inlined_method;
4510 cfg->inlined_method = cmethod;
4511 cfg->ret_var_set = FALSE;
4512 cfg->inline_depth ++;
4513 prev_real_offset = cfg->real_offset;
4514 prev_cbb_hash = cfg->cbb_hash;
4515 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4516 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4517 prev_cil_start = cfg->cil_start;
4518 prev_cbb = cfg->cbb;
4519 prev_current_method = cfg->current_method;
4520 prev_generic_context = cfg->generic_context;
4521 prev_ret_var_set = cfg->ret_var_set;
4523 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4525 ret_var_set = cfg->ret_var_set;
4527 cfg->inlined_method = prev_inlined_method;
4528 cfg->real_offset = prev_real_offset;
4529 cfg->cbb_hash = prev_cbb_hash;
4530 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4531 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4532 cfg->cil_start = prev_cil_start;
4533 cfg->locals = prev_locals;
4534 cfg->args = prev_args;
4535 cfg->arg_types = prev_arg_types;
4536 cfg->current_method = prev_current_method;
4537 cfg->generic_context = prev_generic_context;
4538 cfg->ret_var_set = prev_ret_var_set;
4539 cfg->inline_depth --;
4541 if ((costs >= 0 && costs < 60) || inline_allways) {
4542 if (cfg->verbose_level > 2)
4543 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4545 mono_jit_stats.inlined_methods++;
4547 /* always add some code to avoid block split failures */
4548 MONO_INST_NEW (cfg, ins, OP_NOP);
4549 MONO_ADD_INS (prev_cbb, ins);
4551 prev_cbb->next_bb = sbblock;
4552 link_bblock (cfg, prev_cbb, sbblock);
4555 * Get rid of the begin and end bblocks if possible to aid local
4558 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4560 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4561 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4563 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4564 MonoBasicBlock *prev = ebblock->in_bb [0];
4565 mono_merge_basic_blocks (cfg, prev, ebblock);
4567 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4568 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4569 cfg->cbb = prev_cbb;
4577 * If the inlined method contains only a throw, then the ret var is not
4578 * set, so set it to a dummy value.
4581 static double r8_0 = 0.0;
4583 switch (rvar->type) {
4585 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4588 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4593 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4596 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4597 ins->type = STACK_R8;
4598 ins->inst_p0 = (void*)&r8_0;
4599 ins->dreg = rvar->dreg;
4600 MONO_ADD_INS (cfg->cbb, ins);
4603 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4606 g_assert_not_reached ();
4610 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4615 if (cfg->verbose_level > 2)
4616 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4617 cfg->exception_type = MONO_EXCEPTION_NONE;
4618 mono_loader_clear_error ();
4620 /* This gets rid of the newly added bblocks */
4621 cfg->cbb = prev_cbb;
4627 * Some of these comments may well be out-of-date.
4628 * Design decisions: we do a single pass over the IL code (and we do bblock
4629 * splitting/merging in the few cases when it's required: a back jump to an IL
4630 * address that was not already seen as bblock starting point).
4631 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4632 * Complex operations are decomposed in simpler ones right away. We need to let the
4633 * arch-specific code peek and poke inside this process somehow (except when the
4634 * optimizations can take advantage of the full semantic info of coarse opcodes).
4635 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4636 * MonoInst->opcode initially is the IL opcode or some simplification of that
4637 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4638 * opcode with value bigger than OP_LAST.
4639 * At this point the IR can be handed over to an interpreter, a dumb code generator
4640 * or to the optimizing code generator that will translate it to SSA form.
4642 * Profiling directed optimizations.
4643 * We may compile by default with few or no optimizations and instrument the code
4644 * or the user may indicate what methods to optimize the most either in a config file
4645 * or through repeated runs where the compiler applies offline the optimizations to
4646 * each method and then decides if it was worth it.
4649 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4650 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4651 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4652 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4653 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4654 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4655 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4656 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4658 /* offset from br.s -> br like opcodes */
4659 #define BIG_BRANCH_OFFSET 13
4662 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4664 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4666 return b == NULL || b == bb;
4670 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4672 unsigned char *ip = start;
4673 unsigned char *target;
4676 MonoBasicBlock *bblock;
4677 const MonoOpcode *opcode;
4680 cli_addr = ip - start;
4681 i = mono_opcode_value ((const guint8 **)&ip, end);
4684 opcode = &mono_opcodes [i];
4685 switch (opcode->argument) {
4686 case MonoInlineNone:
4689 case MonoInlineString:
4690 case MonoInlineType:
4691 case MonoInlineField:
4692 case MonoInlineMethod:
4695 case MonoShortInlineR:
4702 case MonoShortInlineVar:
4703 case MonoShortInlineI:
4706 case MonoShortInlineBrTarget:
4707 target = start + cli_addr + 2 + (signed char)ip [1];
4708 GET_BBLOCK (cfg, bblock, target);
4711 GET_BBLOCK (cfg, bblock, ip);
4713 case MonoInlineBrTarget:
4714 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4715 GET_BBLOCK (cfg, bblock, target);
4718 GET_BBLOCK (cfg, bblock, ip);
4720 case MonoInlineSwitch: {
4721 guint32 n = read32 (ip + 1);
4724 cli_addr += 5 + 4 * n;
4725 target = start + cli_addr;
4726 GET_BBLOCK (cfg, bblock, target);
4728 for (j = 0; j < n; ++j) {
4729 target = start + cli_addr + (gint32)read32 (ip);
4730 GET_BBLOCK (cfg, bblock, target);
4740 g_assert_not_reached ();
4743 if (i == CEE_THROW) {
4744 unsigned char *bb_start = ip - 1;
4746 /* Find the start of the bblock containing the throw */
4748 while ((bb_start >= start) && !bblock) {
4749 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4753 bblock->out_of_line = 1;
4762 static inline MonoMethod *
4763 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4767 if (m->wrapper_type != MONO_WRAPPER_NONE)
4768 return mono_method_get_wrapper_data (m, token);
4770 method = mono_get_method_full (m->klass->image, token, klass, context);
4775 static inline MonoMethod *
4776 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4778 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4780 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4786 static inline MonoClass*
4787 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4791 if (method->wrapper_type != MONO_WRAPPER_NONE)
4792 klass = mono_method_get_wrapper_data (method, token);
4794 klass = mono_class_get_full (method->klass->image, token, context);
4796 mono_class_init (klass);
4801 * Returns TRUE if the JIT should abort inlining because "callee"
4802 * is influenced by security attributes.
4805 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4809 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4813 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4814 if (result == MONO_JIT_SECURITY_OK)
4817 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4818 /* Generate code to throw a SecurityException before the actual call/link */
4819 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4822 NEW_ICONST (cfg, args [0], 4);
4823 NEW_METHODCONST (cfg, args [1], caller);
4824 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4825 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4826 /* don't hide previous results */
4827 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4828 cfg->exception_data = result;
4836 throw_exception (void)
4838 static MonoMethod *method = NULL;
4841 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4842 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4849 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4851 MonoMethod *thrower = throw_exception ();
4854 EMIT_NEW_PCONST (cfg, args [0], ex);
4855 mono_emit_method_call (cfg, thrower, args, NULL);
4859 * Return the original method is a wrapper is specified. We can only access
4860 * the custom attributes from the original method.
4863 get_original_method (MonoMethod *method)
4865 if (method->wrapper_type == MONO_WRAPPER_NONE)
4868 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4869 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4872 /* in other cases we need to find the original method */
4873 return mono_marshal_method_from_wrapper (method);
4877 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4878 MonoBasicBlock *bblock, unsigned char *ip)
4880 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4881 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4884 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4885 caller = get_original_method (caller);
4889 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4890 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4891 emit_throw_exception (cfg, mono_get_exception_field_access ());
4895 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4896 MonoBasicBlock *bblock, unsigned char *ip)
4898 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4899 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4902 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4903 caller = get_original_method (caller);
4907 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4908 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4909 emit_throw_exception (cfg, mono_get_exception_method_access ());
4913 * Check that the IL instructions at ip are the array initialization
4914 * sequence and return the pointer to the data and the size.
4917 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4920 * newarr[System.Int32]
4922 * ldtoken field valuetype ...
4923 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4925 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4926 guint32 token = read32 (ip + 7);
4927 guint32 field_token = read32 (ip + 2);
4928 guint32 field_index = field_token & 0xffffff;
4930 const char *data_ptr;
4932 MonoMethod *cmethod;
4933 MonoClass *dummy_class;
4934 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4940 *out_field_token = field_token;
4942 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4945 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4947 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4948 case MONO_TYPE_BOOLEAN:
4952 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4953 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4954 case MONO_TYPE_CHAR:
4964 return NULL; /* stupid ARM FP swapped format */
4974 if (size > mono_type_size (field->type, &dummy_align))
4977 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4978 if (!method->klass->image->dynamic) {
4979 field_index = read32 (ip + 2) & 0xffffff;
4980 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4981 data_ptr = mono_image_rva_map (method->klass->image, rva);
4982 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4983 /* for aot code we do the lookup on load */
4984 if (aot && data_ptr)
4985 return GUINT_TO_POINTER (rva);
4987 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4989 data_ptr = mono_field_get_data (field);
4997 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4999 char *method_fname = mono_method_full_name (method, TRUE);
5002 if (mono_method_get_header (method)->code_size == 0)
5003 method_code = g_strdup ("method body is empty.");
5005 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5006 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5007 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5008 g_free (method_fname);
5009 g_free (method_code);
5013 set_exception_object (MonoCompile *cfg, MonoException *exception)
5015 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5016 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5017 cfg->exception_ptr = exception;
5021 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5025 if (cfg->generic_sharing_context)
5026 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5028 type = &klass->byval_arg;
5029 return MONO_TYPE_IS_REFERENCE (type);
5033 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5036 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5037 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5038 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5039 /* Optimize reg-reg moves away */
5041 * Can't optimize other opcodes, since sp[0] might point to
5042 * the last ins of a decomposed opcode.
5044 sp [0]->dreg = (cfg)->locals [n]->dreg;
5046 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5051 * ldloca inhibits many optimizations so try to get rid of it in common
5054 static inline unsigned char *
5055 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5064 local = read16 (ip + 2);
5068 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5069 gboolean skip = FALSE;
5071 /* From the INITOBJ case */
5072 token = read32 (ip + 2);
5073 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5074 CHECK_TYPELOAD (klass);
5075 if (generic_class_is_reference_type (cfg, klass)) {
5076 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5077 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5078 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5079 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5080 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5093 is_exception_class (MonoClass *class)
5096 if (class == mono_defaults.exception_class)
5098 class = class->parent;
5104 * mono_method_to_ir:
5106 * Translate the .net IL into linear IR.
5109 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5110 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5111 guint inline_offset, gboolean is_virtual_call)
5114 MonoInst *ins, **sp, **stack_start;
5115 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5116 MonoSimpleBasicBlock *bb = NULL;
5117 MonoMethod *cmethod, *method_definition;
5118 MonoInst **arg_array;
5119 MonoMethodHeader *header;
5121 guint32 token, ins_flag;
5123 MonoClass *constrained_call = NULL;
5124 unsigned char *ip, *end, *target, *err_pos;
5125 static double r8_0 = 0.0;
5126 MonoMethodSignature *sig;
5127 MonoGenericContext *generic_context = NULL;
5128 MonoGenericContainer *generic_container = NULL;
5129 MonoType **param_types;
5130 int i, n, start_new_bblock, dreg;
5131 int num_calls = 0, inline_costs = 0;
5132 int breakpoint_id = 0;
5134 MonoBoolean security, pinvoke;
5135 MonoSecurityManager* secman = NULL;
5136 MonoDeclSecurityActions actions;
5137 GSList *class_inits = NULL;
5138 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5140 gboolean init_locals, seq_points, skip_dead_blocks;
5142 /* serialization and xdomain stuff may need access to private fields and methods */
5143 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5144 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5145 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5146 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5147 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5148 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5150 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5152 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5153 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5154 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5155 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5157 image = method->klass->image;
5158 header = mono_method_get_header (method);
5159 generic_container = mono_method_get_generic_container (method);
5160 sig = mono_method_signature (method);
5161 num_args = sig->hasthis + sig->param_count;
5162 ip = (unsigned char*)header->code;
5163 cfg->cil_start = ip;
5164 end = ip + header->code_size;
5165 mono_jit_stats.cil_code_size += header->code_size;
5166 init_locals = header->init_locals;
5168 seq_points = cfg->gen_seq_points && cfg->method == method;
5171 * Methods without init_locals set could cause asserts in various passes
5176 method_definition = method;
5177 while (method_definition->is_inflated) {
5178 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5179 method_definition = imethod->declaring;
5182 /* SkipVerification is not allowed if core-clr is enabled */
5183 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5185 dont_verify_stloc = TRUE;
5188 if (!dont_verify && mini_method_verify (cfg, method_definition))
5189 goto exception_exit;
5191 if (mono_debug_using_mono_debugger ())
5192 cfg->keep_cil_nops = TRUE;
5194 if (sig->is_inflated)
5195 generic_context = mono_method_get_context (method);
5196 else if (generic_container)
5197 generic_context = &generic_container->context;
5198 cfg->generic_context = generic_context;
5200 if (!cfg->generic_sharing_context)
5201 g_assert (!sig->has_type_parameters);
5203 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5204 g_assert (method->is_inflated);
5205 g_assert (mono_method_get_context (method)->method_inst);
5207 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5208 g_assert (sig->generic_param_count);
5210 if (cfg->method == method) {
5211 cfg->real_offset = 0;
5213 cfg->real_offset = inline_offset;
5216 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5217 cfg->cil_offset_to_bb_len = header->code_size;
5219 cfg->current_method = method;
5221 if (cfg->verbose_level > 2)
5222 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5224 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5226 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5227 for (n = 0; n < sig->param_count; ++n)
5228 param_types [n + sig->hasthis] = sig->params [n];
5229 cfg->arg_types = param_types;
5231 dont_inline = g_list_prepend (dont_inline, method);
5232 if (cfg->method == method) {
5234 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5235 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5238 NEW_BBLOCK (cfg, start_bblock);
5239 cfg->bb_entry = start_bblock;
5240 start_bblock->cil_code = NULL;
5241 start_bblock->cil_length = 0;
5244 NEW_BBLOCK (cfg, end_bblock);
5245 cfg->bb_exit = end_bblock;
5246 end_bblock->cil_code = NULL;
5247 end_bblock->cil_length = 0;
5248 g_assert (cfg->num_bblocks == 2);
5250 arg_array = cfg->args;
5252 if (header->num_clauses) {
5253 cfg->spvars = g_hash_table_new (NULL, NULL);
5254 cfg->exvars = g_hash_table_new (NULL, NULL);
5256 /* handle exception clauses */
5257 for (i = 0; i < header->num_clauses; ++i) {
5258 MonoBasicBlock *try_bb;
5259 MonoExceptionClause *clause = &header->clauses [i];
5260 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5261 try_bb->real_offset = clause->try_offset;
5262 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5263 tblock->real_offset = clause->handler_offset;
5264 tblock->flags |= BB_EXCEPTION_HANDLER;
5266 link_bblock (cfg, try_bb, tblock);
5268 if (*(ip + clause->handler_offset) == CEE_POP)
5269 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5271 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5272 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5273 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5274 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5275 MONO_ADD_INS (tblock, ins);
5277 /* todo: is a fault block unsafe to optimize? */
5278 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5279 tblock->flags |= BB_EXCEPTION_UNSAFE;
5283 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5285 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5287 /* catch and filter blocks get the exception object on the stack */
5288 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5289 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5290 MonoInst *dummy_use;
5292 /* mostly like handle_stack_args (), but just sets the input args */
5293 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5294 tblock->in_scount = 1;
5295 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5296 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5299 * Add a dummy use for the exvar so its liveness info will be
5303 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5305 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5306 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5307 tblock->flags |= BB_EXCEPTION_HANDLER;
5308 tblock->real_offset = clause->data.filter_offset;
5309 tblock->in_scount = 1;
5310 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5311 /* The filter block shares the exvar with the handler block */
5312 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5313 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5314 MONO_ADD_INS (tblock, ins);
5318 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5319 clause->data.catch_class &&
5320 cfg->generic_sharing_context &&
5321 mono_class_check_context_used (clause->data.catch_class)) {
5323 * In shared generic code with catch
5324 * clauses containing type variables
5325 * the exception handling code has to
5326 * be able to get to the rgctx.
5327 * Therefore we have to make sure that
5328 * the vtable/mrgctx argument (for
5329 * static or generic methods) or the
5330 * "this" argument (for non-static
5331 * methods) are live.
5333 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5334 mini_method_get_context (method)->method_inst ||
5335 method->klass->valuetype) {
5336 mono_get_vtable_var (cfg);
5338 MonoInst *dummy_use;
5340 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5345 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5346 cfg->cbb = start_bblock;
5347 cfg->args = arg_array;
5348 mono_save_args (cfg, sig, inline_args);
5351 /* FIRST CODE BLOCK */
5352 NEW_BBLOCK (cfg, bblock);
5353 bblock->cil_code = ip;
5357 ADD_BBLOCK (cfg, bblock);
5359 if (cfg->method == method) {
5360 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5361 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5362 MONO_INST_NEW (cfg, ins, OP_BREAK);
5363 MONO_ADD_INS (bblock, ins);
5367 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5368 secman = mono_security_manager_get_methods ();
5370 security = (secman && mono_method_has_declsec (method));
5371 /* at this point having security doesn't mean we have any code to generate */
5372 if (security && (cfg->method == method)) {
5373 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5374 * And we do not want to enter the next section (with allocation) if we
5375 * have nothing to generate */
5376 security = mono_declsec_get_demands (method, &actions);
5379 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5380 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5382 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5383 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5384 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5386 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5387 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5391 mono_custom_attrs_free (custom);
5394 custom = mono_custom_attrs_from_class (wrapped->klass);
5395 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5399 mono_custom_attrs_free (custom);
5402 /* not a P/Invoke after all */
5407 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5408 /* we use a separate basic block for the initialization code */
5409 NEW_BBLOCK (cfg, init_localsbb);
5410 cfg->bb_init = init_localsbb;
5411 init_localsbb->real_offset = cfg->real_offset;
5412 start_bblock->next_bb = init_localsbb;
5413 init_localsbb->next_bb = bblock;
5414 link_bblock (cfg, start_bblock, init_localsbb);
5415 link_bblock (cfg, init_localsbb, bblock);
5417 cfg->cbb = init_localsbb;
5419 start_bblock->next_bb = bblock;
5420 link_bblock (cfg, start_bblock, bblock);
5423 /* at this point we know, if security is TRUE, that some code needs to be generated */
5424 if (security && (cfg->method == method)) {
5427 mono_jit_stats.cas_demand_generation++;
5429 if (actions.demand.blob) {
5430 /* Add code for SecurityAction.Demand */
5431 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5432 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5433 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5434 mono_emit_method_call (cfg, secman->demand, args, NULL);
5436 if (actions.noncasdemand.blob) {
5437 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5438 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5439 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5440 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5441 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5442 mono_emit_method_call (cfg, secman->demand, args, NULL);
5444 if (actions.demandchoice.blob) {
5445 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5446 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5447 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5448 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5449 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5453 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5455 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5458 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5459 /* check if this is native code, e.g. an icall or a p/invoke */
5460 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5461 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5463 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5464 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5466 /* if this ia a native call then it can only be JITted from platform code */
5467 if ((icall || pinvk) && method->klass && method->klass->image) {
5468 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5469 MonoException *ex = icall ? mono_get_exception_security () :
5470 mono_get_exception_method_access ();
5471 emit_throw_exception (cfg, ex);
5478 if (header->code_size == 0)
5481 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5486 if (cfg->method == method)
5487 mono_debug_init_method (cfg, bblock, breakpoint_id);
5489 for (n = 0; n < header->num_locals; ++n) {
5490 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5495 /* We force the vtable variable here for all shared methods
5496 for the possibility that they might show up in a stack
5497 trace where their exact instantiation is needed. */
5498 if (cfg->generic_sharing_context && method == cfg->method) {
5499 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5500 mini_method_get_context (method)->method_inst ||
5501 method->klass->valuetype) {
5502 mono_get_vtable_var (cfg);
5504 /* FIXME: Is there a better way to do this?
5505 We need the variable live for the duration
5506 of the whole method. */
5507 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5511 /* add a check for this != NULL to inlined methods */
5512 if (is_virtual_call) {
5515 NEW_ARGLOAD (cfg, arg_ins, 0);
5516 MONO_ADD_INS (cfg->cbb, arg_ins);
5517 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5520 skip_dead_blocks = !dont_verify;
5521 if (skip_dead_blocks) {
5522 bb = mono_basic_block_split (method, &error);
5523 if (!mono_error_ok (&error)) {
5524 mono_error_cleanup (&error);
5530 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5531 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5534 start_new_bblock = 0;
5537 if (cfg->method == method)
5538 cfg->real_offset = ip - header->code;
5540 cfg->real_offset = inline_offset;
5545 if (start_new_bblock) {
5546 bblock->cil_length = ip - bblock->cil_code;
5547 if (start_new_bblock == 2) {
5548 g_assert (ip == tblock->cil_code);
5550 GET_BBLOCK (cfg, tblock, ip);
5552 bblock->next_bb = tblock;
5555 start_new_bblock = 0;
5556 for (i = 0; i < bblock->in_scount; ++i) {
5557 if (cfg->verbose_level > 3)
5558 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5559 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5563 g_slist_free (class_inits);
5566 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5567 link_bblock (cfg, bblock, tblock);
5568 if (sp != stack_start) {
5569 handle_stack_args (cfg, stack_start, sp - stack_start);
5571 CHECK_UNVERIFIABLE (cfg);
5573 bblock->next_bb = tblock;
5576 for (i = 0; i < bblock->in_scount; ++i) {
5577 if (cfg->verbose_level > 3)
5578 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5579 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5582 g_slist_free (class_inits);
5587 if (skip_dead_blocks) {
5588 int ip_offset = ip - header->code;
5590 if (ip_offset == bb->end)
5594 int op_size = mono_opcode_size (ip, end);
5595 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5597 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5599 if (ip_offset + op_size == bb->end) {
5600 MONO_INST_NEW (cfg, ins, OP_NOP);
5601 MONO_ADD_INS (bblock, ins);
5602 start_new_bblock = 1;
5610 * Sequence points are points where the debugger can place a breakpoint.
5611 * Currently, we generate these automatically at points where the IL
5614 if (seq_points && sp == stack_start) {
5615 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5616 MONO_ADD_INS (cfg->cbb, ins);
5619 bblock->real_offset = cfg->real_offset;
5621 if ((cfg->method == method) && cfg->coverage_info) {
5622 guint32 cil_offset = ip - header->code;
5623 cfg->coverage_info->data [cil_offset].cil_code = ip;
5625 /* TODO: Use an increment here */
5626 #if defined(TARGET_X86)
5627 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5628 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5630 MONO_ADD_INS (cfg->cbb, ins);
5632 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5637 if (cfg->verbose_level > 3)
5638 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5642 if (cfg->keep_cil_nops)
5643 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5645 MONO_INST_NEW (cfg, ins, OP_NOP);
5647 MONO_ADD_INS (bblock, ins);
5650 if (should_insert_brekpoint (cfg->method))
5651 MONO_INST_NEW (cfg, ins, OP_BREAK);
5653 MONO_INST_NEW (cfg, ins, OP_NOP);
5655 MONO_ADD_INS (bblock, ins);
5661 CHECK_STACK_OVF (1);
5662 n = (*ip)-CEE_LDARG_0;
5664 EMIT_NEW_ARGLOAD (cfg, ins, n);
5672 CHECK_STACK_OVF (1);
5673 n = (*ip)-CEE_LDLOC_0;
5675 EMIT_NEW_LOCLOAD (cfg, ins, n);
5684 n = (*ip)-CEE_STLOC_0;
5687 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5689 emit_stloc_ir (cfg, sp, header, n);
5696 CHECK_STACK_OVF (1);
5699 EMIT_NEW_ARGLOAD (cfg, ins, n);
5705 CHECK_STACK_OVF (1);
5708 NEW_ARGLOADA (cfg, ins, n);
5709 MONO_ADD_INS (cfg->cbb, ins);
5719 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5721 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5726 CHECK_STACK_OVF (1);
5729 EMIT_NEW_LOCLOAD (cfg, ins, n);
5733 case CEE_LDLOCA_S: {
5734 unsigned char *tmp_ip;
5736 CHECK_STACK_OVF (1);
5737 CHECK_LOCAL (ip [1]);
5739 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5745 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5754 CHECK_LOCAL (ip [1]);
5755 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5757 emit_stloc_ir (cfg, sp, header, ip [1]);
5762 CHECK_STACK_OVF (1);
5763 EMIT_NEW_PCONST (cfg, ins, NULL);
5764 ins->type = STACK_OBJ;
5769 CHECK_STACK_OVF (1);
5770 EMIT_NEW_ICONST (cfg, ins, -1);
5783 CHECK_STACK_OVF (1);
5784 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5790 CHECK_STACK_OVF (1);
5792 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5798 CHECK_STACK_OVF (1);
5799 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5805 CHECK_STACK_OVF (1);
5806 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5807 ins->type = STACK_I8;
5808 ins->dreg = alloc_dreg (cfg, STACK_I8);
5810 ins->inst_l = (gint64)read64 (ip);
5811 MONO_ADD_INS (bblock, ins);
5817 gboolean use_aotconst = FALSE;
5819 #ifdef TARGET_POWERPC
5820 /* FIXME: Clean this up */
5821 if (cfg->compile_aot)
5822 use_aotconst = TRUE;
5825 /* FIXME: we should really allocate this only late in the compilation process */
5826 f = mono_domain_alloc (cfg->domain, sizeof (float));
5828 CHECK_STACK_OVF (1);
5834 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5836 dreg = alloc_freg (cfg);
5837 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5838 ins->type = STACK_R8;
5840 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5841 ins->type = STACK_R8;
5842 ins->dreg = alloc_dreg (cfg, STACK_R8);
5844 MONO_ADD_INS (bblock, ins);
5854 gboolean use_aotconst = FALSE;
5856 #ifdef TARGET_POWERPC
5857 /* FIXME: Clean this up */
5858 if (cfg->compile_aot)
5859 use_aotconst = TRUE;
5862 /* FIXME: we should really allocate this only late in the compilation process */
5863 d = mono_domain_alloc (cfg->domain, sizeof (double));
5865 CHECK_STACK_OVF (1);
5871 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5873 dreg = alloc_freg (cfg);
5874 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5875 ins->type = STACK_R8;
5877 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5878 ins->type = STACK_R8;
5879 ins->dreg = alloc_dreg (cfg, STACK_R8);
5881 MONO_ADD_INS (bblock, ins);
5890 MonoInst *temp, *store;
5892 CHECK_STACK_OVF (1);
5896 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5897 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5899 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5902 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5915 if (sp [0]->type == STACK_R8)
5916 /* we need to pop the value from the x86 FP stack */
5917 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5926 if (stack_start != sp)
5928 token = read32 (ip + 1);
5929 /* FIXME: check the signature matches */
5930 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5935 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5936 GENERIC_SHARING_FAILURE (CEE_JMP);
5938 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5939 CHECK_CFG_EXCEPTION;
5941 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5943 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5946 /* Handle tail calls similarly to calls */
5947 n = fsig->param_count + fsig->hasthis;
5949 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5950 call->method = cmethod;
5951 call->tail_call = TRUE;
5952 call->signature = mono_method_signature (cmethod);
5953 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5954 call->inst.inst_p0 = cmethod;
5955 for (i = 0; i < n; ++i)
5956 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5958 mono_arch_emit_call (cfg, call);
5959 MONO_ADD_INS (bblock, (MonoInst*)call);
5962 for (i = 0; i < num_args; ++i)
5963 /* Prevent arguments from being optimized away */
5964 arg_array [i]->flags |= MONO_INST_VOLATILE;
5966 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5967 ins = (MonoInst*)call;
5968 ins->inst_p0 = cmethod;
5969 MONO_ADD_INS (bblock, ins);
5973 start_new_bblock = 1;
5978 case CEE_CALLVIRT: {
5979 MonoInst *addr = NULL;
5980 MonoMethodSignature *fsig = NULL;
5982 int virtual = *ip == CEE_CALLVIRT;
5983 int calli = *ip == CEE_CALLI;
5984 gboolean pass_imt_from_rgctx = FALSE;
5985 MonoInst *imt_arg = NULL;
5986 gboolean pass_vtable = FALSE;
5987 gboolean pass_mrgctx = FALSE;
5988 MonoInst *vtable_arg = NULL;
5989 gboolean check_this = FALSE;
5990 gboolean supported_tail_call = FALSE;
5993 token = read32 (ip + 1);
6000 if (method->wrapper_type != MONO_WRAPPER_NONE)
6001 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6003 fsig = mono_metadata_parse_signature (image, token);
6005 n = fsig->param_count + fsig->hasthis;
6007 if (method->dynamic && fsig->pinvoke) {
6011 * This is a call through a function pointer using a pinvoke
6012 * signature. Have to create a wrapper and call that instead.
6013 * FIXME: This is very slow, need to create a wrapper at JIT time
6014 * instead based on the signature.
6016 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6017 EMIT_NEW_PCONST (cfg, args [1], fsig);
6019 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6022 MonoMethod *cil_method;
6024 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6025 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6026 cil_method = cmethod;
6027 } else if (constrained_call) {
6028 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6030 * This is needed since get_method_constrained can't find
6031 * the method in klass representing a type var.
6032 * The type var is guaranteed to be a reference type in this
6035 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6036 cil_method = cmethod;
6037 g_assert (!cmethod->klass->valuetype);
6039 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6042 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6043 cil_method = cmethod;
6048 if (!dont_verify && !cfg->skip_visibility) {
6049 MonoMethod *target_method = cil_method;
6050 if (method->is_inflated) {
6051 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6053 if (!mono_method_can_access_method (method_definition, target_method) &&
6054 !mono_method_can_access_method (method, cil_method))
6055 METHOD_ACCESS_FAILURE;
6058 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6059 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6061 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6062 /* MS.NET seems to silently convert this to a callvirt */
6065 if (!cmethod->klass->inited)
6066 if (!mono_class_init (cmethod->klass))
6069 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6070 mini_class_is_system_array (cmethod->klass)) {
6071 array_rank = cmethod->klass->rank;
6072 fsig = mono_method_signature (cmethod);
6074 if (mono_method_signature (cmethod)->pinvoke) {
6075 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6076 check_for_pending_exc, FALSE);
6077 fsig = mono_method_signature (wrapper);
6078 } else if (constrained_call) {
6079 fsig = mono_method_signature (cmethod);
6081 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6085 mono_save_token_info (cfg, image, token, cil_method);
6087 n = fsig->param_count + fsig->hasthis;
6089 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6090 if (check_linkdemand (cfg, method, cmethod))
6092 CHECK_CFG_EXCEPTION;
6095 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6096 g_assert_not_reached ();
6099 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6102 if (!cfg->generic_sharing_context && cmethod)
6103 g_assert (!mono_method_check_context_used (cmethod));
6107 //g_assert (!virtual || fsig->hasthis);
6111 if (constrained_call) {
6113 * We have the `constrained.' prefix opcode.
6115 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6117 * The type parameter is instantiated as a valuetype,
6118 * but that type doesn't override the method we're
6119 * calling, so we need to box `this'.
6121 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6122 ins->klass = constrained_call;
6123 sp [0] = handle_box (cfg, ins, constrained_call);
6124 CHECK_CFG_EXCEPTION;
6125 } else if (!constrained_call->valuetype) {
6126 int dreg = alloc_preg (cfg);
6129 * The type parameter is instantiated as a reference
6130 * type. We have a managed pointer on the stack, so
6131 * we need to dereference it here.
6133 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6134 ins->type = STACK_OBJ;
6136 } else if (cmethod->klass->valuetype)
6138 constrained_call = NULL;
6141 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6145 * If the callee is a shared method, then its static cctor
6146 * might not get called after the call was patched.
6148 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6149 emit_generic_class_init (cfg, cmethod->klass);
6150 CHECK_TYPELOAD (cmethod->klass);
6153 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6154 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6155 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6156 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6157 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6160 * Pass vtable iff target method might
6161 * be shared, which means that sharing
6162 * is enabled for its class and its
6163 * context is sharable (and it's not a
6166 if (sharing_enabled && context_sharable &&
6167 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6171 if (cmethod && mini_method_get_context (cmethod) &&
6172 mini_method_get_context (cmethod)->method_inst) {
6173 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6174 MonoGenericContext *context = mini_method_get_context (cmethod);
6175 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6177 g_assert (!pass_vtable);
6179 if (sharing_enabled && context_sharable)
6183 if (cfg->generic_sharing_context && cmethod) {
6184 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6186 context_used = mono_method_check_context_used (cmethod);
6188 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6189 /* Generic method interface
6190 calls are resolved via a
6191 helper function and don't
6193 if (!cmethod_context || !cmethod_context->method_inst)
6194 pass_imt_from_rgctx = TRUE;
6198 * If a shared method calls another
6199 * shared method then the caller must
6200 * have a generic sharing context
6201 * because the magic trampoline
6202 * requires it. FIXME: We shouldn't
6203 * have to force the vtable/mrgctx
6204 * variable here. Instead there
6205 * should be a flag in the cfg to
6206 * request a generic sharing context.
6209 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6210 mono_get_vtable_var (cfg);
6215 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6217 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6219 CHECK_TYPELOAD (cmethod->klass);
6220 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6225 g_assert (!vtable_arg);
6227 if (!cfg->compile_aot) {
6229 * emit_get_rgctx_method () calls mono_class_vtable () so check
6230 * for type load errors before.
6232 mono_class_vtable (cfg->domain, cmethod->klass);
6233 CHECK_TYPELOAD (cmethod->klass);
6236 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6238 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6239 MONO_METHOD_IS_FINAL (cmethod)) {
6246 if (pass_imt_from_rgctx) {
6247 g_assert (!pass_vtable);
6250 imt_arg = emit_get_rgctx_method (cfg, context_used,
6251 cmethod, MONO_RGCTX_INFO_METHOD);
6255 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6257 /* Calling virtual generic methods */
6258 if (cmethod && virtual &&
6259 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6260 !(MONO_METHOD_IS_FINAL (cmethod) &&
6261 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6262 mono_method_signature (cmethod)->generic_param_count) {
6263 MonoInst *this_temp, *this_arg_temp, *store;
6264 MonoInst *iargs [4];
6266 g_assert (mono_method_signature (cmethod)->is_inflated);
6268 /* Prevent inlining of methods that contain indirect calls */
6271 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6272 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6273 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6274 g_assert (!imt_arg);
6276 g_assert (cmethod->is_inflated);
6277 imt_arg = emit_get_rgctx_method (cfg, context_used,
6278 cmethod, MONO_RGCTX_INFO_METHOD);
6279 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6283 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6284 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6285 MONO_ADD_INS (bblock, store);
6287 /* FIXME: This should be a managed pointer */
6288 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6290 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6291 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6292 cmethod, MONO_RGCTX_INFO_METHOD);
6293 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6294 addr = mono_emit_jit_icall (cfg,
6295 mono_helper_compile_generic_method, iargs);
6297 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6299 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6302 if (!MONO_TYPE_IS_VOID (fsig->ret))
6303 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6310 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6311 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6313 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6317 /* FIXME: runtime generic context pointer for jumps? */
6318 /* FIXME: handle this for generic sharing eventually */
6319 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6322 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6325 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6326 /* Handle tail calls similarly to calls */
6327 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6329 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6330 call->tail_call = TRUE;
6331 call->method = cmethod;
6332 call->signature = mono_method_signature (cmethod);
6335 * We implement tail calls by storing the actual arguments into the
6336 * argument variables, then emitting a CEE_JMP.
6338 for (i = 0; i < n; ++i) {
6339 /* Prevent argument from being register allocated */
6340 arg_array [i]->flags |= MONO_INST_VOLATILE;
6341 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6345 ins = (MonoInst*)call;
6346 ins->inst_p0 = cmethod;
6347 ins->inst_p1 = arg_array [0];
6348 MONO_ADD_INS (bblock, ins);
6349 link_bblock (cfg, bblock, end_bblock);
6350 start_new_bblock = 1;
6351 /* skip CEE_RET as well */
6357 /* Conversion to a JIT intrinsic */
6358 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6359 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6360 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6371 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6372 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6373 mono_method_check_inlining (cfg, cmethod) &&
6374 !g_list_find (dont_inline, cmethod)) {
6376 gboolean allways = FALSE;
6378 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6379 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6380 /* Prevent inlining of methods that call wrappers */
6382 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6386 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6388 cfg->real_offset += 5;
6391 if (!MONO_TYPE_IS_VOID (fsig->ret))
6392 /* *sp is already set by inline_method */
6395 inline_costs += costs;
6401 inline_costs += 10 * num_calls++;
6403 /* Tail recursion elimination */
6404 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6405 gboolean has_vtargs = FALSE;
6408 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6411 /* keep it simple */
6412 for (i = fsig->param_count - 1; i >= 0; i--) {
6413 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6418 for (i = 0; i < n; ++i)
6419 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6420 MONO_INST_NEW (cfg, ins, OP_BR);
6421 MONO_ADD_INS (bblock, ins);
6422 tblock = start_bblock->out_bb [0];
6423 link_bblock (cfg, bblock, tblock);
6424 ins->inst_target_bb = tblock;
6425 start_new_bblock = 1;
6427 /* skip the CEE_RET, too */
6428 if (ip_in_bb (cfg, bblock, ip + 5))
6438 /* Generic sharing */
6439 /* FIXME: only do this for generic methods if
6440 they are not shared! */
6441 if (context_used && !imt_arg && !array_rank &&
6442 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6443 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6444 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6445 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6448 g_assert (cfg->generic_sharing_context && cmethod);
6452 * We are compiling a call to a
6453 * generic method from shared code,
6454 * which means that we have to look up
6455 * the method in the rgctx and do an
6458 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6461 /* Indirect calls */
6463 g_assert (!imt_arg);
6465 if (*ip == CEE_CALL)
6466 g_assert (context_used);
6467 else if (*ip == CEE_CALLI)
6468 g_assert (!vtable_arg);
6470 /* FIXME: what the hell is this??? */
6471 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6472 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6474 /* Prevent inlining of methods with indirect calls */
6478 #ifdef MONO_ARCH_RGCTX_REG
6480 int rgctx_reg = mono_alloc_preg (cfg);
6482 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6483 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6484 call = (MonoCallInst*)ins;
6485 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6486 cfg->uses_rgctx_reg = TRUE;
6487 call->rgctx_reg = TRUE;
6492 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6494 * Instead of emitting an indirect call, emit a direct call
6495 * with the contents of the aotconst as the patch info.
6497 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6499 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6500 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6503 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6506 if (!MONO_TYPE_IS_VOID (fsig->ret))
6507 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6518 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6519 if (sp [fsig->param_count]->type == STACK_OBJ) {
6520 MonoInst *iargs [2];
6523 iargs [1] = sp [fsig->param_count];
6525 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6528 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6529 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6530 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6531 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6533 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6536 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6537 if (!cmethod->klass->element_class->valuetype && !readonly)
6538 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6539 CHECK_TYPELOAD (cmethod->klass);
6542 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6545 g_assert_not_reached ();
6553 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6555 if (!MONO_TYPE_IS_VOID (fsig->ret))
6556 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6566 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6568 } else if (imt_arg) {
6569 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6571 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6574 if (!MONO_TYPE_IS_VOID (fsig->ret))
6575 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6582 if (cfg->method != method) {
6583 /* return from inlined method */
6585 * If in_count == 0, that means the ret is unreachable due to
6586 * being preceeded by a throw. In that case, inline_method () will
6587 * handle setting the return value
6588 * (test case: test_0_inline_throw ()).
6590 if (return_var && cfg->cbb->in_count) {
6594 //g_assert (returnvar != -1);
6595 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6596 cfg->ret_var_set = TRUE;
6600 MonoType *ret_type = mono_method_signature (method)->ret;
6604 * Place a seq point here too even through the IL stack is not
6605 * empty, so a step over on
6608 * will work correctly.
6610 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6611 MONO_ADD_INS (cfg->cbb, ins);
6614 g_assert (!return_var);
6617 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6620 if (!cfg->vret_addr) {
6623 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6625 EMIT_NEW_RETLOADA (cfg, ret_addr);
6627 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6628 ins->klass = mono_class_from_mono_type (ret_type);
6631 #ifdef MONO_ARCH_SOFT_FLOAT
6632 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6633 MonoInst *iargs [1];
6637 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6638 mono_arch_emit_setret (cfg, method, conv);
6640 mono_arch_emit_setret (cfg, method, *sp);
6643 mono_arch_emit_setret (cfg, method, *sp);
6648 if (sp != stack_start)
6650 MONO_INST_NEW (cfg, ins, OP_BR);
6652 ins->inst_target_bb = end_bblock;
6653 MONO_ADD_INS (bblock, ins);
6654 link_bblock (cfg, bblock, end_bblock);
6655 start_new_bblock = 1;
6659 MONO_INST_NEW (cfg, ins, OP_BR);
6661 target = ip + 1 + (signed char)(*ip);
6663 GET_BBLOCK (cfg, tblock, target);
6664 link_bblock (cfg, bblock, tblock);
6665 ins->inst_target_bb = tblock;
6666 if (sp != stack_start) {
6667 handle_stack_args (cfg, stack_start, sp - stack_start);
6669 CHECK_UNVERIFIABLE (cfg);
6671 MONO_ADD_INS (bblock, ins);
6672 start_new_bblock = 1;
6673 inline_costs += BRANCH_COST;
6687 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6689 target = ip + 1 + *(signed char*)ip;
6695 inline_costs += BRANCH_COST;
6699 MONO_INST_NEW (cfg, ins, OP_BR);
6702 target = ip + 4 + (gint32)read32(ip);
6704 GET_BBLOCK (cfg, tblock, target);
6705 link_bblock (cfg, bblock, tblock);
6706 ins->inst_target_bb = tblock;
6707 if (sp != stack_start) {
6708 handle_stack_args (cfg, stack_start, sp - stack_start);
6710 CHECK_UNVERIFIABLE (cfg);
6713 MONO_ADD_INS (bblock, ins);
6715 start_new_bblock = 1;
6716 inline_costs += BRANCH_COST;
6723 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6724 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6725 guint32 opsize = is_short ? 1 : 4;
6727 CHECK_OPSIZE (opsize);
6729 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6732 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6737 GET_BBLOCK (cfg, tblock, target);
6738 link_bblock (cfg, bblock, tblock);
6739 GET_BBLOCK (cfg, tblock, ip);
6740 link_bblock (cfg, bblock, tblock);
6742 if (sp != stack_start) {
6743 handle_stack_args (cfg, stack_start, sp - stack_start);
6744 CHECK_UNVERIFIABLE (cfg);
6747 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6748 cmp->sreg1 = sp [0]->dreg;
6749 type_from_op (cmp, sp [0], NULL);
6752 #if SIZEOF_REGISTER == 4
6753 if (cmp->opcode == OP_LCOMPARE_IMM) {
6754 /* Convert it to OP_LCOMPARE */
6755 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6756 ins->type = STACK_I8;
6757 ins->dreg = alloc_dreg (cfg, STACK_I8);
6759 MONO_ADD_INS (bblock, ins);
6760 cmp->opcode = OP_LCOMPARE;
6761 cmp->sreg2 = ins->dreg;
6764 MONO_ADD_INS (bblock, cmp);
6766 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6767 type_from_op (ins, sp [0], NULL);
6768 MONO_ADD_INS (bblock, ins);
6769 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6770 GET_BBLOCK (cfg, tblock, target);
6771 ins->inst_true_bb = tblock;
6772 GET_BBLOCK (cfg, tblock, ip);
6773 ins->inst_false_bb = tblock;
6774 start_new_bblock = 2;
6777 inline_costs += BRANCH_COST;
6792 MONO_INST_NEW (cfg, ins, *ip);
6794 target = ip + 4 + (gint32)read32(ip);
6800 inline_costs += BRANCH_COST;
6804 MonoBasicBlock **targets;
6805 MonoBasicBlock *default_bblock;
6806 MonoJumpInfoBBTable *table;
6807 int offset_reg = alloc_preg (cfg);
6808 int target_reg = alloc_preg (cfg);
6809 int table_reg = alloc_preg (cfg);
6810 int sum_reg = alloc_preg (cfg);
6811 gboolean use_op_switch;
6815 n = read32 (ip + 1);
6818 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6822 CHECK_OPSIZE (n * sizeof (guint32));
6823 target = ip + n * sizeof (guint32);
6825 GET_BBLOCK (cfg, default_bblock, target);
6827 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6828 for (i = 0; i < n; ++i) {
6829 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6830 targets [i] = tblock;
6834 if (sp != stack_start) {
6836 * Link the current bb with the targets as well, so handle_stack_args
6837 * will set their in_stack correctly.
6839 link_bblock (cfg, bblock, default_bblock);
6840 for (i = 0; i < n; ++i)
6841 link_bblock (cfg, bblock, targets [i]);
6843 handle_stack_args (cfg, stack_start, sp - stack_start);
6845 CHECK_UNVERIFIABLE (cfg);
6848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6852 for (i = 0; i < n; ++i)
6853 link_bblock (cfg, bblock, targets [i]);
6855 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6856 table->table = targets;
6857 table->table_size = n;
6859 use_op_switch = FALSE;
6861 /* ARM implements SWITCH statements differently */
6862 /* FIXME: Make it use the generic implementation */
6863 if (!cfg->compile_aot)
6864 use_op_switch = TRUE;
6867 if (COMPILE_LLVM (cfg))
6868 use_op_switch = TRUE;
6870 cfg->cbb->has_jump_table = 1;
6872 if (use_op_switch) {
6873 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6874 ins->sreg1 = src1->dreg;
6875 ins->inst_p0 = table;
6876 ins->inst_many_bb = targets;
6877 ins->klass = GUINT_TO_POINTER (n);
6878 MONO_ADD_INS (cfg->cbb, ins);
6880 if (sizeof (gpointer) == 8)
6881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6885 #if SIZEOF_REGISTER == 8
6886 /* The upper word might not be zero, and we add it to a 64 bit address later */
6887 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6890 if (cfg->compile_aot) {
6891 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6893 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6894 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6895 ins->inst_p0 = table;
6896 ins->dreg = table_reg;
6897 MONO_ADD_INS (cfg->cbb, ins);
6900 /* FIXME: Use load_memindex */
6901 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6902 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6903 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6905 start_new_bblock = 1;
6906 inline_costs += (BRANCH_COST * 2);
6926 dreg = alloc_freg (cfg);
6929 dreg = alloc_lreg (cfg);
6932 dreg = alloc_preg (cfg);
6935 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6936 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6937 ins->flags |= ins_flag;
6939 MONO_ADD_INS (bblock, ins);
6954 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6955 ins->flags |= ins_flag;
6957 MONO_ADD_INS (bblock, ins);
6959 #if HAVE_WRITE_BARRIERS
6960 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6961 /* insert call to write barrier */
6962 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6963 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6974 MONO_INST_NEW (cfg, ins, (*ip));
6976 ins->sreg1 = sp [0]->dreg;
6977 ins->sreg2 = sp [1]->dreg;
6978 type_from_op (ins, sp [0], sp [1]);
6980 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6982 /* Use the immediate opcodes if possible */
6983 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6984 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6985 if (imm_opcode != -1) {
6986 ins->opcode = imm_opcode;
6987 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6990 sp [1]->opcode = OP_NOP;
6994 MONO_ADD_INS ((cfg)->cbb, (ins));
6996 *sp++ = mono_decompose_opcode (cfg, ins);
7013 MONO_INST_NEW (cfg, ins, (*ip));
7015 ins->sreg1 = sp [0]->dreg;
7016 ins->sreg2 = sp [1]->dreg;
7017 type_from_op (ins, sp [0], sp [1]);
7019 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7020 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7022 /* FIXME: Pass opcode to is_inst_imm */
7024 /* Use the immediate opcodes if possible */
7025 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7028 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7029 if (imm_opcode != -1) {
7030 ins->opcode = imm_opcode;
7031 if (sp [1]->opcode == OP_I8CONST) {
7032 #if SIZEOF_REGISTER == 8
7033 ins->inst_imm = sp [1]->inst_l;
7035 ins->inst_ls_word = sp [1]->inst_ls_word;
7036 ins->inst_ms_word = sp [1]->inst_ms_word;
7040 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7043 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7044 if (sp [1]->next == NULL)
7045 sp [1]->opcode = OP_NOP;
7048 MONO_ADD_INS ((cfg)->cbb, (ins));
7050 *sp++ = mono_decompose_opcode (cfg, ins);
7063 case CEE_CONV_OVF_I8:
7064 case CEE_CONV_OVF_U8:
7068 /* Special case this earlier so we have long constants in the IR */
7069 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7070 int data = sp [-1]->inst_c0;
7071 sp [-1]->opcode = OP_I8CONST;
7072 sp [-1]->type = STACK_I8;
7073 #if SIZEOF_REGISTER == 8
7074 if ((*ip) == CEE_CONV_U8)
7075 sp [-1]->inst_c0 = (guint32)data;
7077 sp [-1]->inst_c0 = data;
7079 sp [-1]->inst_ls_word = data;
7080 if ((*ip) == CEE_CONV_U8)
7081 sp [-1]->inst_ms_word = 0;
7083 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7085 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7092 case CEE_CONV_OVF_I4:
7093 case CEE_CONV_OVF_I1:
7094 case CEE_CONV_OVF_I2:
7095 case CEE_CONV_OVF_I:
7096 case CEE_CONV_OVF_U:
7099 if (sp [-1]->type == STACK_R8) {
7100 ADD_UNOP (CEE_CONV_OVF_I8);
7107 case CEE_CONV_OVF_U1:
7108 case CEE_CONV_OVF_U2:
7109 case CEE_CONV_OVF_U4:
7112 if (sp [-1]->type == STACK_R8) {
7113 ADD_UNOP (CEE_CONV_OVF_U8);
7120 case CEE_CONV_OVF_I1_UN:
7121 case CEE_CONV_OVF_I2_UN:
7122 case CEE_CONV_OVF_I4_UN:
7123 case CEE_CONV_OVF_I8_UN:
7124 case CEE_CONV_OVF_U1_UN:
7125 case CEE_CONV_OVF_U2_UN:
7126 case CEE_CONV_OVF_U4_UN:
7127 case CEE_CONV_OVF_U8_UN:
7128 case CEE_CONV_OVF_I_UN:
7129 case CEE_CONV_OVF_U_UN:
7139 case CEE_ADD_OVF_UN:
7141 case CEE_MUL_OVF_UN:
7143 case CEE_SUB_OVF_UN:
7151 token = read32 (ip + 1);
7152 klass = mini_get_class (method, token, generic_context);
7153 CHECK_TYPELOAD (klass);
7155 if (generic_class_is_reference_type (cfg, klass)) {
7156 MonoInst *store, *load;
7157 int dreg = alloc_preg (cfg);
7159 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7160 load->flags |= ins_flag;
7161 MONO_ADD_INS (cfg->cbb, load);
7163 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7164 store->flags |= ins_flag;
7165 MONO_ADD_INS (cfg->cbb, store);
7167 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7179 token = read32 (ip + 1);
7180 klass = mini_get_class (method, token, generic_context);
7181 CHECK_TYPELOAD (klass);
7183 /* Optimize the common ldobj+stloc combination */
7193 loc_index = ip [5] - CEE_STLOC_0;
7200 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7201 CHECK_LOCAL (loc_index);
7203 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7204 ins->dreg = cfg->locals [loc_index]->dreg;
7210 /* Optimize the ldobj+stobj combination */
7211 /* The reference case ends up being a load+store anyway */
7212 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7217 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7224 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7233 CHECK_STACK_OVF (1);
7235 n = read32 (ip + 1);
7237 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7238 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7239 ins->type = STACK_OBJ;
7242 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7243 MonoInst *iargs [1];
7245 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7246 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7248 if (cfg->opt & MONO_OPT_SHARED) {
7249 MonoInst *iargs [3];
7251 if (cfg->compile_aot) {
7252 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7254 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7255 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7256 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7257 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7258 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7260 if (bblock->out_of_line) {
7261 MonoInst *iargs [2];
7263 if (image == mono_defaults.corlib) {
7265 * Avoid relocations in AOT and save some space by using a
7266 * version of helper_ldstr specialized to mscorlib.
7268 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7269 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7271 /* Avoid creating the string object */
7272 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7273 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7274 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7278 if (cfg->compile_aot) {
7279 NEW_LDSTRCONST (cfg, ins, image, n);
7281 MONO_ADD_INS (bblock, ins);
7284 NEW_PCONST (cfg, ins, NULL);
7285 ins->type = STACK_OBJ;
7286 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7288 MONO_ADD_INS (bblock, ins);
7297 MonoInst *iargs [2];
7298 MonoMethodSignature *fsig;
7301 MonoInst *vtable_arg = NULL;
7304 token = read32 (ip + 1);
7305 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7308 fsig = mono_method_get_signature (cmethod, image, token);
7312 mono_save_token_info (cfg, image, token, cmethod);
7314 if (!mono_class_init (cmethod->klass))
7317 if (cfg->generic_sharing_context)
7318 context_used = mono_method_check_context_used (cmethod);
7320 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7321 if (check_linkdemand (cfg, method, cmethod))
7323 CHECK_CFG_EXCEPTION;
7324 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7325 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7328 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7329 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7330 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7331 mono_class_vtable (cfg->domain, cmethod->klass);
7332 CHECK_TYPELOAD (cmethod->klass);
7334 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7335 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7338 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7339 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7341 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7343 CHECK_TYPELOAD (cmethod->klass);
7344 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7349 n = fsig->param_count;
7353 * Generate smaller code for the common newobj <exception> instruction in
7354 * argument checking code.
7356 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7357 is_exception_class (cmethod->klass) && n <= 2 &&
7358 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7359 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7360 MonoInst *iargs [3];
7362 g_assert (!vtable_arg);
7366 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7369 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7373 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7378 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7381 g_assert_not_reached ();
7389 /* move the args to allow room for 'this' in the first position */
7395 /* check_call_signature () requires sp[0] to be set */
7396 this_ins.type = STACK_OBJ;
7398 if (check_call_signature (cfg, fsig, sp))
7403 if (mini_class_is_system_array (cmethod->klass)) {
7404 g_assert (!vtable_arg);
7406 *sp = emit_get_rgctx_method (cfg, context_used,
7407 cmethod, MONO_RGCTX_INFO_METHOD);
7409 /* Avoid varargs in the common case */
7410 if (fsig->param_count == 1)
7411 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7412 else if (fsig->param_count == 2)
7413 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7414 else if (fsig->param_count == 3)
7415 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7417 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7418 } else if (cmethod->string_ctor) {
7419 g_assert (!context_used);
7420 g_assert (!vtable_arg);
7421 /* we simply pass a null pointer */
7422 EMIT_NEW_PCONST (cfg, *sp, NULL);
7423 /* now call the string ctor */
7424 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7426 MonoInst* callvirt_this_arg = NULL;
7428 if (cmethod->klass->valuetype) {
7429 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7430 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7431 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7436 * The code generated by mini_emit_virtual_call () expects
7437 * iargs [0] to be a boxed instance, but luckily the vcall
7438 * will be transformed into a normal call there.
7440 } else if (context_used) {
7444 if (cfg->opt & MONO_OPT_SHARED)
7445 rgctx_info = MONO_RGCTX_INFO_KLASS;
7447 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7448 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7450 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7453 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7455 CHECK_TYPELOAD (cmethod->klass);
7458 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7459 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7460 * As a workaround, we call class cctors before allocating objects.
7462 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7463 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7464 if (cfg->verbose_level > 2)
7465 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7466 class_inits = g_slist_prepend (class_inits, vtable);
7469 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7472 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7475 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7477 /* Now call the actual ctor */
7478 /* Avoid virtual calls to ctors if possible */
7479 if (cmethod->klass->marshalbyref)
7480 callvirt_this_arg = sp [0];
7482 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7483 mono_method_check_inlining (cfg, cmethod) &&
7484 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7485 !g_list_find (dont_inline, cmethod)) {
7488 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7489 cfg->real_offset += 5;
7492 inline_costs += costs - 5;
7495 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7497 } else if (context_used &&
7498 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7499 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7500 MonoInst *cmethod_addr;
7502 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7503 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7505 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7508 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7509 callvirt_this_arg, NULL, vtable_arg);
7513 if (alloc == NULL) {
7515 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7516 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7530 token = read32 (ip + 1);
7531 klass = mini_get_class (method, token, generic_context);
7532 CHECK_TYPELOAD (klass);
7533 if (sp [0]->type != STACK_OBJ)
7536 if (cfg->generic_sharing_context)
7537 context_used = mono_class_check_context_used (klass);
7539 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7546 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7548 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7552 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7553 MonoMethod *mono_castclass;
7554 MonoInst *iargs [1];
7557 mono_castclass = mono_marshal_get_castclass (klass);
7560 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7561 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7562 g_assert (costs > 0);
7565 cfg->real_offset += 5;
7570 inline_costs += costs;
7573 ins = handle_castclass (cfg, klass, *sp, context_used);
7574 CHECK_CFG_EXCEPTION;
7584 token = read32 (ip + 1);
7585 klass = mini_get_class (method, token, generic_context);
7586 CHECK_TYPELOAD (klass);
7587 if (sp [0]->type != STACK_OBJ)
7590 if (cfg->generic_sharing_context)
7591 context_used = mono_class_check_context_used (klass);
7593 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7600 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7602 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7606 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7607 MonoMethod *mono_isinst;
7608 MonoInst *iargs [1];
7611 mono_isinst = mono_marshal_get_isinst (klass);
7614 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7615 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7616 g_assert (costs > 0);
7619 cfg->real_offset += 5;
7624 inline_costs += costs;
7627 ins = handle_isinst (cfg, klass, *sp, context_used);
7628 CHECK_CFG_EXCEPTION;
7635 case CEE_UNBOX_ANY: {
7639 token = read32 (ip + 1);
7640 klass = mini_get_class (method, token, generic_context);
7641 CHECK_TYPELOAD (klass);
7643 mono_save_token_info (cfg, image, token, klass);
7645 if (cfg->generic_sharing_context)
7646 context_used = mono_class_check_context_used (klass);
7648 if (generic_class_is_reference_type (cfg, klass)) {
7649 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7651 MonoInst *iargs [2];
7656 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7657 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7661 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7662 MonoMethod *mono_castclass;
7663 MonoInst *iargs [1];
7666 mono_castclass = mono_marshal_get_castclass (klass);
7669 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7670 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7672 g_assert (costs > 0);
7675 cfg->real_offset += 5;
7679 inline_costs += costs;
7681 ins = handle_castclass (cfg, klass, *sp, 0);
7682 CHECK_CFG_EXCEPTION;
7690 if (mono_class_is_nullable (klass)) {
7691 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7698 ins = handle_unbox (cfg, klass, sp, context_used);
7704 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7717 token = read32 (ip + 1);
7718 klass = mini_get_class (method, token, generic_context);
7719 CHECK_TYPELOAD (klass);
7721 mono_save_token_info (cfg, image, token, klass);
7723 if (cfg->generic_sharing_context)
7724 context_used = mono_class_check_context_used (klass);
7726 if (generic_class_is_reference_type (cfg, klass)) {
7732 if (klass == mono_defaults.void_class)
7734 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7736 /* frequent check in generic code: box (struct), brtrue */
7737 if (!mono_class_is_nullable (klass) &&
7738 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7739 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7741 MONO_INST_NEW (cfg, ins, OP_BR);
7742 if (*ip == CEE_BRTRUE_S) {
7745 target = ip + 1 + (signed char)(*ip);
7750 target = ip + 4 + (gint)(read32 (ip));
7753 GET_BBLOCK (cfg, tblock, target);
7754 link_bblock (cfg, bblock, tblock);
7755 ins->inst_target_bb = tblock;
7756 GET_BBLOCK (cfg, tblock, ip);
7758 * This leads to some inconsistency, since the two bblocks are
7759 * not really connected, but it is needed for handling stack
7760 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7761 * FIXME: This should only be needed if sp != stack_start, but that
7762 * doesn't work for some reason (test failure in mcs/tests on x86).
7764 link_bblock (cfg, bblock, tblock);
7765 if (sp != stack_start) {
7766 handle_stack_args (cfg, stack_start, sp - stack_start);
7768 CHECK_UNVERIFIABLE (cfg);
7770 MONO_ADD_INS (bblock, ins);
7771 start_new_bblock = 1;
7779 if (cfg->opt & MONO_OPT_SHARED)
7780 rgctx_info = MONO_RGCTX_INFO_KLASS;
7782 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7783 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7784 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7786 *sp++ = handle_box (cfg, val, klass);
7789 CHECK_CFG_EXCEPTION;
7798 token = read32 (ip + 1);
7799 klass = mini_get_class (method, token, generic_context);
7800 CHECK_TYPELOAD (klass);
7802 mono_save_token_info (cfg, image, token, klass);
7804 if (cfg->generic_sharing_context)
7805 context_used = mono_class_check_context_used (klass);
7807 if (mono_class_is_nullable (klass)) {
7810 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7811 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7815 ins = handle_unbox (cfg, klass, sp, context_used);
7825 MonoClassField *field;
7829 if (*ip == CEE_STFLD) {
7836 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7838 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7841 token = read32 (ip + 1);
7842 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7843 field = mono_method_get_wrapper_data (method, token);
7844 klass = field->parent;
7847 field = mono_field_from_token (image, token, &klass, generic_context);
7851 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7852 FIELD_ACCESS_FAILURE;
7853 mono_class_init (klass);
7855 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7856 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7857 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7858 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7861 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7862 if (*ip == CEE_STFLD) {
7863 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7865 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7866 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7867 MonoInst *iargs [5];
7870 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7871 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7872 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7876 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7877 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7878 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7879 g_assert (costs > 0);
7881 cfg->real_offset += 5;
7884 inline_costs += costs;
7886 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7891 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7893 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7895 #if HAVE_WRITE_BARRIERS
7896 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7897 /* insert call to write barrier */
7898 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7899 MonoInst *iargs [2];
7902 dreg = alloc_preg (cfg);
7903 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7905 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7909 store->flags |= ins_flag;
7916 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7917 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7918 MonoInst *iargs [4];
7921 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7922 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7923 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7924 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7925 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7926 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7928 g_assert (costs > 0);
7930 cfg->real_offset += 5;
7934 inline_costs += costs;
7936 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7940 if (sp [0]->type == STACK_VTYPE) {
7943 /* Have to compute the address of the variable */
7945 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7947 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7949 g_assert (var->klass == klass);
7951 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7955 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7957 if (*ip == CEE_LDFLDA) {
7958 dreg = alloc_preg (cfg);
7960 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7961 ins->klass = mono_class_from_mono_type (field->type);
7962 ins->type = STACK_MP;
7967 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7968 load->flags |= ins_flag;
7969 load->flags |= MONO_INST_FAULT;
7980 MonoClassField *field;
7981 gpointer addr = NULL;
7982 gboolean is_special_static;
7985 token = read32 (ip + 1);
7987 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7988 field = mono_method_get_wrapper_data (method, token);
7989 klass = field->parent;
7992 field = mono_field_from_token (image, token, &klass, generic_context);
7995 mono_class_init (klass);
7996 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7997 FIELD_ACCESS_FAILURE;
7999 /* if the class is Critical then transparent code cannot access it's fields */
8000 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8001 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8004 * We can only support shared generic static
8005 * field access on architectures where the
8006 * trampoline code has been extended to handle
8007 * the generic class init.
8009 #ifndef MONO_ARCH_VTABLE_REG
8010 GENERIC_SHARING_FAILURE (*ip);
8013 if (cfg->generic_sharing_context)
8014 context_used = mono_class_check_context_used (klass);
8016 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8018 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8019 * to be called here.
8021 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8022 mono_class_vtable (cfg->domain, klass);
8023 CHECK_TYPELOAD (klass);
8025 mono_domain_lock (cfg->domain);
8026 if (cfg->domain->special_static_fields)
8027 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8028 mono_domain_unlock (cfg->domain);
8030 is_special_static = mono_class_field_is_special_static (field);
8032 /* Generate IR to compute the field address */
8034 if ((cfg->opt & MONO_OPT_SHARED) ||
8035 (cfg->compile_aot && is_special_static) ||
8036 (context_used && is_special_static)) {
8037 MonoInst *iargs [2];
8039 g_assert (field->parent);
8040 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8042 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8043 field, MONO_RGCTX_INFO_CLASS_FIELD);
8045 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8047 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8048 } else if (context_used) {
8049 MonoInst *static_data;
8052 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8053 method->klass->name_space, method->klass->name, method->name,
8054 depth, field->offset);
8057 if (mono_class_needs_cctor_run (klass, method)) {
8061 vtable = emit_get_rgctx_klass (cfg, context_used,
8062 klass, MONO_RGCTX_INFO_VTABLE);
8064 // FIXME: This doesn't work since it tries to pass the argument
8065 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8067 * The vtable pointer is always passed in a register regardless of
8068 * the calling convention, so assign it manually, and make a call
8069 * using a signature without parameters.
8071 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8072 #ifdef MONO_ARCH_VTABLE_REG
8073 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8074 cfg->uses_vtable_reg = TRUE;
8081 * The pointer we're computing here is
8083 * super_info.static_data + field->offset
8085 static_data = emit_get_rgctx_klass (cfg, context_used,
8086 klass, MONO_RGCTX_INFO_STATIC_DATA);
8088 if (field->offset == 0) {
8091 int addr_reg = mono_alloc_preg (cfg);
8092 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8094 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8095 MonoInst *iargs [2];
8097 g_assert (field->parent);
8098 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8099 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8100 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8102 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8104 CHECK_TYPELOAD (klass);
8106 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8107 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8108 if (cfg->verbose_level > 2)
8109 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8110 class_inits = g_slist_prepend (class_inits, vtable);
8112 if (cfg->run_cctors) {
8114 /* This makes so that inline cannot trigger */
8115 /* .cctors: too many apps depend on them */
8116 /* running with a specific order... */
8117 if (! vtable->initialized)
8119 ex = mono_runtime_class_init_full (vtable, FALSE);
8121 set_exception_object (cfg, ex);
8122 goto exception_exit;
8126 addr = (char*)vtable->data + field->offset;
8128 if (cfg->compile_aot)
8129 EMIT_NEW_SFLDACONST (cfg, ins, field);
8131 EMIT_NEW_PCONST (cfg, ins, addr);
8134 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8135 * This could be later optimized to do just a couple of
8136 * memory dereferences with constant offsets.
8138 MonoInst *iargs [1];
8139 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8140 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8144 /* Generate IR to do the actual load/store operation */
8146 if (*ip == CEE_LDSFLDA) {
8147 ins->klass = mono_class_from_mono_type (field->type);
8148 ins->type = STACK_PTR;
8150 } else if (*ip == CEE_STSFLD) {
8155 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8156 store->flags |= ins_flag;
8158 gboolean is_const = FALSE;
8159 MonoVTable *vtable = NULL;
8161 if (!context_used) {
8162 vtable = mono_class_vtable (cfg->domain, klass);
8163 CHECK_TYPELOAD (klass);
8165 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8166 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8167 gpointer addr = (char*)vtable->data + field->offset;
8168 int ro_type = field->type->type;
8169 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8170 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8172 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8175 case MONO_TYPE_BOOLEAN:
8177 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8181 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8184 case MONO_TYPE_CHAR:
8186 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8190 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8195 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8199 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8202 #ifndef HAVE_MOVING_COLLECTOR
8205 case MONO_TYPE_STRING:
8206 case MONO_TYPE_OBJECT:
8207 case MONO_TYPE_CLASS:
8208 case MONO_TYPE_SZARRAY:
8210 case MONO_TYPE_FNPTR:
8211 case MONO_TYPE_ARRAY:
8212 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8213 type_to_eval_stack_type ((cfg), field->type, *sp);
8219 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8224 case MONO_TYPE_VALUETYPE:
8234 CHECK_STACK_OVF (1);
8236 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8237 load->flags |= ins_flag;
8250 token = read32 (ip + 1);
8251 klass = mini_get_class (method, token, generic_context);
8252 CHECK_TYPELOAD (klass);
8253 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8254 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8265 const char *data_ptr;
8267 guint32 field_token;
8273 token = read32 (ip + 1);
8275 klass = mini_get_class (method, token, generic_context);
8276 CHECK_TYPELOAD (klass);
8278 if (cfg->generic_sharing_context)
8279 context_used = mono_class_check_context_used (klass);
8281 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8282 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8283 ins->sreg1 = sp [0]->dreg;
8284 ins->type = STACK_I4;
8285 ins->dreg = alloc_ireg (cfg);
8286 MONO_ADD_INS (cfg->cbb, ins);
8287 *sp = mono_decompose_opcode (cfg, ins);
8292 MonoClass *array_class = mono_array_class_get (klass, 1);
8293 /* FIXME: we cannot get a managed
8294 allocator because we can't get the
8295 open generic class's vtable. We
8296 have the same problem in
8297 handle_alloc_from_inst(). This
8298 needs to be solved so that we can
8299 have managed allocs of shared
8302 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8303 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8305 MonoMethod *managed_alloc = NULL;
8307 /* FIXME: Decompose later to help abcrem */
8310 args [0] = emit_get_rgctx_klass (cfg, context_used,
8311 array_class, MONO_RGCTX_INFO_VTABLE);
8316 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8318 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8320 if (cfg->opt & MONO_OPT_SHARED) {
8321 /* Decompose now to avoid problems with references to the domainvar */
8322 MonoInst *iargs [3];
8324 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8325 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8328 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8330 /* Decompose later since it is needed by abcrem */
8331 MonoClass *array_type = mono_array_class_get (klass, 1);
8332 mono_class_vtable (cfg->domain, array_type);
8333 CHECK_TYPELOAD (array_type);
8335 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8336 ins->dreg = alloc_preg (cfg);
8337 ins->sreg1 = sp [0]->dreg;
8338 ins->inst_newa_class = klass;
8339 ins->type = STACK_OBJ;
8341 MONO_ADD_INS (cfg->cbb, ins);
8342 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8343 cfg->cbb->has_array_access = TRUE;
8345 /* Needed so mono_emit_load_get_addr () gets called */
8346 mono_get_got_var (cfg);
8356 * we inline/optimize the initialization sequence if possible.
8357 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8358 * for small sizes open code the memcpy
8359 * ensure the rva field is big enough
8361 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8362 MonoMethod *memcpy_method = get_memcpy_method ();
8363 MonoInst *iargs [3];
8364 int add_reg = alloc_preg (cfg);
8366 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8367 if (cfg->compile_aot) {
8368 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8370 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8372 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8373 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8382 if (sp [0]->type != STACK_OBJ)
8385 dreg = alloc_preg (cfg);
8386 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8387 ins->dreg = alloc_preg (cfg);
8388 ins->sreg1 = sp [0]->dreg;
8389 ins->type = STACK_I4;
8390 MONO_ADD_INS (cfg->cbb, ins);
8391 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8392 cfg->cbb->has_array_access = TRUE;
8400 if (sp [0]->type != STACK_OBJ)
8403 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8405 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8406 CHECK_TYPELOAD (klass);
8407 /* we need to make sure that this array is exactly the type it needs
8408 * to be for correctness. the wrappers are lax with their usage
8409 * so we need to ignore them here
8411 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8412 MonoClass *array_class = mono_array_class_get (klass, 1);
8413 mini_emit_check_array_type (cfg, sp [0], array_class);
8414 CHECK_TYPELOAD (array_class);
8418 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8433 case CEE_LDELEM_REF: {
8439 if (*ip == CEE_LDELEM) {
8441 token = read32 (ip + 1);
8442 klass = mini_get_class (method, token, generic_context);
8443 CHECK_TYPELOAD (klass);
8444 mono_class_init (klass);
8447 klass = array_access_to_klass (*ip);
8449 if (sp [0]->type != STACK_OBJ)
8452 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8454 if (sp [1]->opcode == OP_ICONST) {
8455 int array_reg = sp [0]->dreg;
8456 int index_reg = sp [1]->dreg;
8457 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8459 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8460 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8462 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8463 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8466 if (*ip == CEE_LDELEM)
8479 case CEE_STELEM_REF:
8486 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8488 if (*ip == CEE_STELEM) {
8490 token = read32 (ip + 1);
8491 klass = mini_get_class (method, token, generic_context);
8492 CHECK_TYPELOAD (klass);
8493 mono_class_init (klass);
8496 klass = array_access_to_klass (*ip);
8498 if (sp [0]->type != STACK_OBJ)
8501 /* storing a NULL doesn't need any of the complex checks in stelemref */
8502 if (generic_class_is_reference_type (cfg, klass) &&
8503 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8504 MonoMethod* helper = mono_marshal_get_stelemref ();
8505 MonoInst *iargs [3];
8507 if (sp [0]->type != STACK_OBJ)
8509 if (sp [2]->type != STACK_OBJ)
8516 mono_emit_method_call (cfg, helper, iargs, NULL);
8518 if (sp [1]->opcode == OP_ICONST) {
8519 int array_reg = sp [0]->dreg;
8520 int index_reg = sp [1]->dreg;
8521 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8523 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8524 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8526 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8527 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8531 if (*ip == CEE_STELEM)
8538 case CEE_CKFINITE: {
8542 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8543 ins->sreg1 = sp [0]->dreg;
8544 ins->dreg = alloc_freg (cfg);
8545 ins->type = STACK_R8;
8546 MONO_ADD_INS (bblock, ins);
8548 *sp++ = mono_decompose_opcode (cfg, ins);
8553 case CEE_REFANYVAL: {
8554 MonoInst *src_var, *src;
8556 int klass_reg = alloc_preg (cfg);
8557 int dreg = alloc_preg (cfg);
8560 MONO_INST_NEW (cfg, ins, *ip);
8563 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8564 CHECK_TYPELOAD (klass);
8565 mono_class_init (klass);
8567 if (cfg->generic_sharing_context)
8568 context_used = mono_class_check_context_used (klass);
8571 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8573 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8574 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8578 MonoInst *klass_ins;
8580 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8581 klass, MONO_RGCTX_INFO_KLASS);
8584 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8585 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8587 mini_emit_class_check (cfg, klass_reg, klass);
8589 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8590 ins->type = STACK_MP;
8595 case CEE_MKREFANY: {
8596 MonoInst *loc, *addr;
8599 MONO_INST_NEW (cfg, ins, *ip);
8602 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8603 CHECK_TYPELOAD (klass);
8604 mono_class_init (klass);
8606 if (cfg->generic_sharing_context)
8607 context_used = mono_class_check_context_used (klass);
8609 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8610 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8613 MonoInst *const_ins;
8614 int type_reg = alloc_preg (cfg);
8616 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8619 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8620 } else if (cfg->compile_aot) {
8621 int const_reg = alloc_preg (cfg);
8622 int type_reg = alloc_preg (cfg);
8624 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8625 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8629 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8630 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8632 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8634 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8635 ins->type = STACK_VTYPE;
8636 ins->klass = mono_defaults.typed_reference_class;
8643 MonoClass *handle_class;
8645 CHECK_STACK_OVF (1);
8648 n = read32 (ip + 1);
8650 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8651 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8652 handle = mono_method_get_wrapper_data (method, n);
8653 handle_class = mono_method_get_wrapper_data (method, n + 1);
8654 if (handle_class == mono_defaults.typehandle_class)
8655 handle = &((MonoClass*)handle)->byval_arg;
8658 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8662 mono_class_init (handle_class);
8663 if (cfg->generic_sharing_context) {
8664 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8665 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8666 /* This case handles ldtoken
8667 of an open type, like for
8670 } else if (handle_class == mono_defaults.typehandle_class) {
8671 /* If we get a MONO_TYPE_CLASS
8672 then we need to provide the
8674 instantiation of it. */
8675 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8678 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8679 } else if (handle_class == mono_defaults.fieldhandle_class)
8680 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8681 else if (handle_class == mono_defaults.methodhandle_class)
8682 context_used = mono_method_check_context_used (handle);
8684 g_assert_not_reached ();
8687 if ((cfg->opt & MONO_OPT_SHARED) &&
8688 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8689 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8690 MonoInst *addr, *vtvar, *iargs [3];
8691 int method_context_used;
8693 if (cfg->generic_sharing_context)
8694 method_context_used = mono_method_check_context_used (method);
8696 method_context_used = 0;
8698 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8700 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8701 EMIT_NEW_ICONST (cfg, iargs [1], n);
8702 if (method_context_used) {
8703 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8704 method, MONO_RGCTX_INFO_METHOD);
8705 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8707 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8708 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8710 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8714 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8716 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8717 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8718 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8719 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8720 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8721 MonoClass *tclass = mono_class_from_mono_type (handle);
8723 mono_class_init (tclass);
8725 ins = emit_get_rgctx_klass (cfg, context_used,
8726 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8727 } else if (cfg->compile_aot) {
8728 if (method->wrapper_type) {
8729 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8730 /* Special case for static synchronized wrappers */
8731 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8733 /* FIXME: n is not a normal token */
8734 cfg->disable_aot = TRUE;
8735 EMIT_NEW_PCONST (cfg, ins, NULL);
8738 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8741 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8743 ins->type = STACK_OBJ;
8744 ins->klass = cmethod->klass;
8747 MonoInst *addr, *vtvar;
8749 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8752 if (handle_class == mono_defaults.typehandle_class) {
8753 ins = emit_get_rgctx_klass (cfg, context_used,
8754 mono_class_from_mono_type (handle),
8755 MONO_RGCTX_INFO_TYPE);
8756 } else if (handle_class == mono_defaults.methodhandle_class) {
8757 ins = emit_get_rgctx_method (cfg, context_used,
8758 handle, MONO_RGCTX_INFO_METHOD);
8759 } else if (handle_class == mono_defaults.fieldhandle_class) {
8760 ins = emit_get_rgctx_field (cfg, context_used,
8761 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8763 g_assert_not_reached ();
8765 } else if (cfg->compile_aot) {
8766 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8768 EMIT_NEW_PCONST (cfg, ins, handle);
8770 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8772 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8782 MONO_INST_NEW (cfg, ins, OP_THROW);
8784 ins->sreg1 = sp [0]->dreg;
8786 bblock->out_of_line = TRUE;
8787 MONO_ADD_INS (bblock, ins);
8788 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8789 MONO_ADD_INS (bblock, ins);
8792 link_bblock (cfg, bblock, end_bblock);
8793 start_new_bblock = 1;
8795 case CEE_ENDFINALLY:
8796 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8797 MONO_ADD_INS (bblock, ins);
8799 start_new_bblock = 1;
8802 * Control will leave the method so empty the stack, otherwise
8803 * the next basic block will start with a nonempty stack.
8805 while (sp != stack_start) {
8813 if (*ip == CEE_LEAVE) {
8815 target = ip + 5 + (gint32)read32(ip + 1);
8818 target = ip + 2 + (signed char)(ip [1]);
8821 /* empty the stack */
8822 while (sp != stack_start) {
8827 * If this leave statement is in a catch block, check for a
8828 * pending exception, and rethrow it if necessary.
8829 * We avoid doing this in runtime invoke wrappers, since those are called
8830 * by native code which excepts the wrapper to catch all exceptions.
8832 for (i = 0; i < header->num_clauses; ++i) {
8833 MonoExceptionClause *clause = &header->clauses [i];
8836 * Use <= in the final comparison to handle clauses with multiple
8837 * leave statements, like in bug #78024.
8838 * The ordering of the exception clauses guarantees that we find the
8841 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8843 MonoBasicBlock *dont_throw;
8848 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8851 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8853 NEW_BBLOCK (cfg, dont_throw);
8856 * Currently, we allways rethrow the abort exception, despite the
8857 * fact that this is not correct. See thread6.cs for an example.
8858 * But propagating the abort exception is more important than
8859 * getting the sematics right.
8861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8863 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8865 MONO_START_BB (cfg, dont_throw);
8870 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8872 for (tmp = handlers; tmp; tmp = tmp->next) {
8874 link_bblock (cfg, bblock, tblock);
8875 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8876 ins->inst_target_bb = tblock;
8877 MONO_ADD_INS (bblock, ins);
8878 bblock->has_call_handler = 1;
8879 if (COMPILE_LLVM (cfg)) {
8880 MonoBasicBlock *target_bb;
8883 * Link the finally bblock with the target, since it will
8884 * conceptually branch there.
8885 * FIXME: Have to link the bblock containing the endfinally.
8887 GET_BBLOCK (cfg, target_bb, target);
8888 link_bblock (cfg, tblock, target_bb);
8891 g_list_free (handlers);
8894 MONO_INST_NEW (cfg, ins, OP_BR);
8895 MONO_ADD_INS (bblock, ins);
8896 GET_BBLOCK (cfg, tblock, target);
8897 link_bblock (cfg, bblock, tblock);
8898 ins->inst_target_bb = tblock;
8899 start_new_bblock = 1;
8901 if (*ip == CEE_LEAVE)
8910 * Mono specific opcodes
8912 case MONO_CUSTOM_PREFIX: {
8914 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8918 case CEE_MONO_ICALL: {
8920 MonoJitICallInfo *info;
8922 token = read32 (ip + 2);
8923 func = mono_method_get_wrapper_data (method, token);
8924 info = mono_find_jit_icall_by_addr (func);
8927 CHECK_STACK (info->sig->param_count);
8928 sp -= info->sig->param_count;
8930 ins = mono_emit_jit_icall (cfg, info->func, sp);
8931 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8935 inline_costs += 10 * num_calls++;
8939 case CEE_MONO_LDPTR: {
8942 CHECK_STACK_OVF (1);
8944 token = read32 (ip + 2);
8946 ptr = mono_method_get_wrapper_data (method, token);
8947 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8948 MonoJitICallInfo *callinfo;
8949 const char *icall_name;
8951 icall_name = method->name + strlen ("__icall_wrapper_");
8952 g_assert (icall_name);
8953 callinfo = mono_find_jit_icall_by_name (icall_name);
8954 g_assert (callinfo);
8956 if (ptr == callinfo->func) {
8957 /* Will be transformed into an AOTCONST later */
8958 EMIT_NEW_PCONST (cfg, ins, ptr);
8964 /* FIXME: Generalize this */
8965 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8966 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8971 EMIT_NEW_PCONST (cfg, ins, ptr);
8974 inline_costs += 10 * num_calls++;
8975 /* Can't embed random pointers into AOT code */
8976 cfg->disable_aot = 1;
8979 case CEE_MONO_ICALL_ADDR: {
8980 MonoMethod *cmethod;
8983 CHECK_STACK_OVF (1);
8985 token = read32 (ip + 2);
8987 cmethod = mono_method_get_wrapper_data (method, token);
8989 if (cfg->compile_aot) {
8990 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8992 ptr = mono_lookup_internal_call (cmethod);
8994 EMIT_NEW_PCONST (cfg, ins, ptr);
9000 case CEE_MONO_VTADDR: {
9001 MonoInst *src_var, *src;
9007 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9008 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9013 case CEE_MONO_NEWOBJ: {
9014 MonoInst *iargs [2];
9016 CHECK_STACK_OVF (1);
9018 token = read32 (ip + 2);
9019 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9020 mono_class_init (klass);
9021 NEW_DOMAINCONST (cfg, iargs [0]);
9022 MONO_ADD_INS (cfg->cbb, iargs [0]);
9023 NEW_CLASSCONST (cfg, iargs [1], klass);
9024 MONO_ADD_INS (cfg->cbb, iargs [1]);
9025 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9027 inline_costs += 10 * num_calls++;
9030 case CEE_MONO_OBJADDR:
9033 MONO_INST_NEW (cfg, ins, OP_MOVE);
9034 ins->dreg = alloc_preg (cfg);
9035 ins->sreg1 = sp [0]->dreg;
9036 ins->type = STACK_MP;
9037 MONO_ADD_INS (cfg->cbb, ins);
9041 case CEE_MONO_LDNATIVEOBJ:
9043 * Similar to LDOBJ, but instead load the unmanaged
9044 * representation of the vtype to the stack.
9049 token = read32 (ip + 2);
9050 klass = mono_method_get_wrapper_data (method, token);
9051 g_assert (klass->valuetype);
9052 mono_class_init (klass);
9055 MonoInst *src, *dest, *temp;
9058 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9059 temp->backend.is_pinvoke = 1;
9060 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9061 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9063 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9064 dest->type = STACK_VTYPE;
9065 dest->klass = klass;
9071 case CEE_MONO_RETOBJ: {
9073 * Same as RET, but return the native representation of a vtype
9076 g_assert (cfg->ret);
9077 g_assert (mono_method_signature (method)->pinvoke);
9082 token = read32 (ip + 2);
9083 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9085 if (!cfg->vret_addr) {
9086 g_assert (cfg->ret_var_is_local);
9088 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9090 EMIT_NEW_RETLOADA (cfg, ins);
9092 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9094 if (sp != stack_start)
9097 MONO_INST_NEW (cfg, ins, OP_BR);
9098 ins->inst_target_bb = end_bblock;
9099 MONO_ADD_INS (bblock, ins);
9100 link_bblock (cfg, bblock, end_bblock);
9101 start_new_bblock = 1;
9105 case CEE_MONO_CISINST:
9106 case CEE_MONO_CCASTCLASS: {
9111 token = read32 (ip + 2);
9112 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9113 if (ip [1] == CEE_MONO_CISINST)
9114 ins = handle_cisinst (cfg, klass, sp [0]);
9116 ins = handle_ccastclass (cfg, klass, sp [0]);
9122 case CEE_MONO_SAVE_LMF:
9123 case CEE_MONO_RESTORE_LMF:
9124 #ifdef MONO_ARCH_HAVE_LMF_OPS
9125 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9126 MONO_ADD_INS (bblock, ins);
9127 cfg->need_lmf_area = TRUE;
9131 case CEE_MONO_CLASSCONST:
9132 CHECK_STACK_OVF (1);
9134 token = read32 (ip + 2);
9135 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9138 inline_costs += 10 * num_calls++;
9140 case CEE_MONO_NOT_TAKEN:
9141 bblock->out_of_line = TRUE;
9145 CHECK_STACK_OVF (1);
9147 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9148 ins->dreg = alloc_preg (cfg);
9149 ins->inst_offset = (gint32)read32 (ip + 2);
9150 ins->type = STACK_PTR;
9151 MONO_ADD_INS (bblock, ins);
9155 case CEE_MONO_DYN_CALL: {
9158 /* It would be easier to call a trampoline, but that would put an
9159 * extra frame on the stack, confusing exception handling. So
9160 * implement it inline using an opcode for now.
9163 if (!cfg->dyn_call_var) {
9164 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9165 /* prevent it from being register allocated */
9166 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9169 /* Has to use a call inst since it local regalloc expects it */
9170 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9171 ins = (MonoInst*)call;
9173 ins->sreg1 = sp [0]->dreg;
9174 ins->sreg2 = sp [1]->dreg;
9175 MONO_ADD_INS (bblock, ins);
9177 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9178 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9182 inline_costs += 10 * num_calls++;
9187 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9197 /* somewhat similar to LDTOKEN */
9198 MonoInst *addr, *vtvar;
9199 CHECK_STACK_OVF (1);
9200 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9202 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9203 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9205 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9206 ins->type = STACK_VTYPE;
9207 ins->klass = mono_defaults.argumenthandle_class;
9220 * The following transforms:
9221 * CEE_CEQ into OP_CEQ
9222 * CEE_CGT into OP_CGT
9223 * CEE_CGT_UN into OP_CGT_UN
9224 * CEE_CLT into OP_CLT
9225 * CEE_CLT_UN into OP_CLT_UN
9227 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9229 MONO_INST_NEW (cfg, ins, cmp->opcode);
9231 cmp->sreg1 = sp [0]->dreg;
9232 cmp->sreg2 = sp [1]->dreg;
9233 type_from_op (cmp, sp [0], sp [1]);
9235 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9236 cmp->opcode = OP_LCOMPARE;
9237 else if (sp [0]->type == STACK_R8)
9238 cmp->opcode = OP_FCOMPARE;
9240 cmp->opcode = OP_ICOMPARE;
9241 MONO_ADD_INS (bblock, cmp);
9242 ins->type = STACK_I4;
9243 ins->dreg = alloc_dreg (cfg, ins->type);
9244 type_from_op (ins, sp [0], sp [1]);
9246 if (cmp->opcode == OP_FCOMPARE) {
9248 * The backends expect the fceq opcodes to do the
9251 cmp->opcode = OP_NOP;
9252 ins->sreg1 = cmp->sreg1;
9253 ins->sreg2 = cmp->sreg2;
9255 MONO_ADD_INS (bblock, ins);
9262 MonoMethod *cil_method;
9263 gboolean needs_static_rgctx_invoke;
9265 CHECK_STACK_OVF (1);
9267 n = read32 (ip + 2);
9268 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9271 mono_class_init (cmethod->klass);
9273 mono_save_token_info (cfg, image, n, cmethod);
9275 if (cfg->generic_sharing_context)
9276 context_used = mono_method_check_context_used (cmethod);
9278 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9280 cil_method = cmethod;
9281 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9282 METHOD_ACCESS_FAILURE;
9284 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9285 if (check_linkdemand (cfg, method, cmethod))
9287 CHECK_CFG_EXCEPTION;
9288 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9289 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9293 * Optimize the common case of ldftn+delegate creation
9295 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9296 /* FIXME: SGEN support */
9297 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9298 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9299 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9300 MonoInst *target_ins;
9302 int invoke_context_used = 0;
9304 invoke = mono_get_delegate_invoke (ctor_method->klass);
9305 if (!invoke || !mono_method_signature (invoke))
9308 if (cfg->generic_sharing_context)
9309 invoke_context_used = mono_method_check_context_used (invoke);
9311 if (invoke_context_used == 0) {
9313 if (cfg->verbose_level > 3)
9314 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9315 target_ins = sp [-1];
9317 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9318 CHECK_CFG_EXCEPTION;
9327 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9328 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9332 inline_costs += 10 * num_calls++;
9335 case CEE_LDVIRTFTN: {
9340 n = read32 (ip + 2);
9341 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9344 mono_class_init (cmethod->klass);
9346 if (cfg->generic_sharing_context)
9347 context_used = mono_method_check_context_used (cmethod);
9349 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9350 if (check_linkdemand (cfg, method, cmethod))
9352 CHECK_CFG_EXCEPTION;
9353 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9354 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9360 args [1] = emit_get_rgctx_method (cfg, context_used,
9361 cmethod, MONO_RGCTX_INFO_METHOD);
9364 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9366 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9369 inline_costs += 10 * num_calls++;
9373 CHECK_STACK_OVF (1);
9375 n = read16 (ip + 2);
9377 EMIT_NEW_ARGLOAD (cfg, ins, n);
9382 CHECK_STACK_OVF (1);
9384 n = read16 (ip + 2);
9386 NEW_ARGLOADA (cfg, ins, n);
9387 MONO_ADD_INS (cfg->cbb, ins);
9395 n = read16 (ip + 2);
9397 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9399 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9403 CHECK_STACK_OVF (1);
9405 n = read16 (ip + 2);
9407 EMIT_NEW_LOCLOAD (cfg, ins, n);
9412 unsigned char *tmp_ip;
9413 CHECK_STACK_OVF (1);
9415 n = read16 (ip + 2);
9418 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9424 EMIT_NEW_LOCLOADA (cfg, ins, n);
9433 n = read16 (ip + 2);
9435 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9437 emit_stloc_ir (cfg, sp, header, n);
9444 if (sp != stack_start)
9446 if (cfg->method != method)
9448 * Inlining this into a loop in a parent could lead to
9449 * stack overflows which is different behavior than the
9450 * non-inlined case, thus disable inlining in this case.
9452 goto inline_failure;
9454 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9455 ins->dreg = alloc_preg (cfg);
9456 ins->sreg1 = sp [0]->dreg;
9457 ins->type = STACK_PTR;
9458 MONO_ADD_INS (cfg->cbb, ins);
9460 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9462 ins->flags |= MONO_INST_INIT;
9467 case CEE_ENDFILTER: {
9468 MonoExceptionClause *clause, *nearest;
9469 int cc, nearest_num;
9473 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9475 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9476 ins->sreg1 = (*sp)->dreg;
9477 MONO_ADD_INS (bblock, ins);
9478 start_new_bblock = 1;
9483 for (cc = 0; cc < header->num_clauses; ++cc) {
9484 clause = &header->clauses [cc];
9485 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9486 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9487 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9493 if ((ip - header->code) != nearest->handler_offset)
9498 case CEE_UNALIGNED_:
9499 ins_flag |= MONO_INST_UNALIGNED;
9500 /* FIXME: record alignment? we can assume 1 for now */
9505 ins_flag |= MONO_INST_VOLATILE;
9509 ins_flag |= MONO_INST_TAILCALL;
9510 cfg->flags |= MONO_CFG_HAS_TAIL;
9511 /* Can't inline tail calls at this time */
9512 inline_costs += 100000;
9519 token = read32 (ip + 2);
9520 klass = mini_get_class (method, token, generic_context);
9521 CHECK_TYPELOAD (klass);
9522 if (generic_class_is_reference_type (cfg, klass))
9523 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9525 mini_emit_initobj (cfg, *sp, NULL, klass);
9529 case CEE_CONSTRAINED_:
9531 token = read32 (ip + 2);
9532 if (method->wrapper_type != MONO_WRAPPER_NONE)
9533 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9535 constrained_call = mono_class_get_full (image, token, generic_context);
9536 CHECK_TYPELOAD (constrained_call);
9541 MonoInst *iargs [3];
9545 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9546 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9547 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9548 /* emit_memset only works when val == 0 */
9549 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9554 if (ip [1] == CEE_CPBLK) {
9555 MonoMethod *memcpy_method = get_memcpy_method ();
9556 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9558 MonoMethod *memset_method = get_memset_method ();
9559 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9569 ins_flag |= MONO_INST_NOTYPECHECK;
9571 ins_flag |= MONO_INST_NORANGECHECK;
9572 /* we ignore the no-nullcheck for now since we
9573 * really do it explicitly only when doing callvirt->call
9579 int handler_offset = -1;
9581 for (i = 0; i < header->num_clauses; ++i) {
9582 MonoExceptionClause *clause = &header->clauses [i];
9583 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9584 handler_offset = clause->handler_offset;
9589 bblock->flags |= BB_EXCEPTION_UNSAFE;
9591 g_assert (handler_offset != -1);
9593 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9594 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9595 ins->sreg1 = load->dreg;
9596 MONO_ADD_INS (bblock, ins);
9598 link_bblock (cfg, bblock, end_bblock);
9599 start_new_bblock = 1;
9607 CHECK_STACK_OVF (1);
9609 token = read32 (ip + 2);
9610 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9611 MonoType *type = mono_type_create_from_typespec (image, token);
9612 token = mono_type_size (type, &ialign);
9614 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9615 CHECK_TYPELOAD (klass);
9616 mono_class_init (klass);
9617 token = mono_class_value_size (klass, &align);
9619 EMIT_NEW_ICONST (cfg, ins, token);
9624 case CEE_REFANYTYPE: {
9625 MonoInst *src_var, *src;
9631 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9633 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9634 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9635 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9653 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9663 g_warning ("opcode 0x%02x not handled", *ip);
9667 if (start_new_bblock != 1)
9670 bblock->cil_length = ip - bblock->cil_code;
9671 bblock->next_bb = end_bblock;
9673 if (cfg->method == method && cfg->domainvar) {
9675 MonoInst *get_domain;
9677 cfg->cbb = init_localsbb;
9679 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9680 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9683 get_domain->dreg = alloc_preg (cfg);
9684 MONO_ADD_INS (cfg->cbb, get_domain);
9686 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9687 MONO_ADD_INS (cfg->cbb, store);
9690 #ifdef TARGET_POWERPC
9691 if (cfg->compile_aot)
9692 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9693 mono_get_got_var (cfg);
9696 if (cfg->method == method && cfg->got_var)
9697 mono_emit_load_got_addr (cfg);
9702 cfg->cbb = init_localsbb;
9704 for (i = 0; i < header->num_locals; ++i) {
9705 MonoType *ptype = header->locals [i];
9706 int t = ptype->type;
9707 dreg = cfg->locals [i]->dreg;
9709 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9710 t = mono_class_enum_basetype (ptype->data.klass)->type;
9712 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9713 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9714 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9715 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9716 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9717 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9718 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9719 ins->type = STACK_R8;
9720 ins->inst_p0 = (void*)&r8_0;
9721 ins->dreg = alloc_dreg (cfg, STACK_R8);
9722 MONO_ADD_INS (init_localsbb, ins);
9723 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9724 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9725 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9726 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9728 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9733 if (cfg->init_ref_vars && cfg->method == method) {
9734 /* Emit initialization for ref vars */
9735 // FIXME: Avoid duplication initialization for IL locals.
9736 for (i = 0; i < cfg->num_varinfo; ++i) {
9737 MonoInst *ins = cfg->varinfo [i];
9739 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9740 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9744 /* Add a sequence point for method entry/exit events */
9746 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9747 MONO_ADD_INS (init_localsbb, ins);
9748 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9749 MONO_ADD_INS (cfg->bb_exit, ins);
9754 if (cfg->method == method) {
9756 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9757 bb->region = mono_find_block_region (cfg, bb->real_offset);
9759 mono_create_spvar_for_region (cfg, bb->region);
9760 if (cfg->verbose_level > 2)
9761 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9765 g_slist_free (class_inits);
9766 dont_inline = g_list_remove (dont_inline, method);
9768 if (inline_costs < 0) {
9771 /* Method is too large */
9772 mname = mono_method_full_name (method, TRUE);
9773 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9774 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9779 if ((cfg->verbose_level > 2) && (cfg->method == method))
9780 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9782 return inline_costs;
9785 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9786 g_slist_free (class_inits);
9787 mono_basic_block_free (bb);
9788 dont_inline = g_list_remove (dont_inline, method);
9792 g_slist_free (class_inits);
9793 mono_basic_block_free (bb);
9794 dont_inline = g_list_remove (dont_inline, method);
9798 g_slist_free (class_inits);
9799 mono_basic_block_free (bb);
9800 dont_inline = g_list_remove (dont_inline, method);
9801 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9805 g_slist_free (class_inits);
9806 mono_basic_block_free (bb);
9807 dont_inline = g_list_remove (dont_inline, method);
9808 set_exception_type_from_invalid_il (cfg, method, ip);
9813 store_membase_reg_to_store_membase_imm (int opcode)
9816 case OP_STORE_MEMBASE_REG:
9817 return OP_STORE_MEMBASE_IMM;
9818 case OP_STOREI1_MEMBASE_REG:
9819 return OP_STOREI1_MEMBASE_IMM;
9820 case OP_STOREI2_MEMBASE_REG:
9821 return OP_STOREI2_MEMBASE_IMM;
9822 case OP_STOREI4_MEMBASE_REG:
9823 return OP_STOREI4_MEMBASE_IMM;
9824 case OP_STOREI8_MEMBASE_REG:
9825 return OP_STOREI8_MEMBASE_IMM;
9827 g_assert_not_reached ();
9833 #endif /* DISABLE_JIT */
9836 mono_op_to_op_imm (int opcode)
9846 return OP_IDIV_UN_IMM;
9850 return OP_IREM_UN_IMM;
9864 return OP_ISHR_UN_IMM;
9881 return OP_LSHR_UN_IMM;
9884 return OP_COMPARE_IMM;
9886 return OP_ICOMPARE_IMM;
9888 return OP_LCOMPARE_IMM;
9890 case OP_STORE_MEMBASE_REG:
9891 return OP_STORE_MEMBASE_IMM;
9892 case OP_STOREI1_MEMBASE_REG:
9893 return OP_STOREI1_MEMBASE_IMM;
9894 case OP_STOREI2_MEMBASE_REG:
9895 return OP_STOREI2_MEMBASE_IMM;
9896 case OP_STOREI4_MEMBASE_REG:
9897 return OP_STOREI4_MEMBASE_IMM;
9899 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9901 return OP_X86_PUSH_IMM;
9902 case OP_X86_COMPARE_MEMBASE_REG:
9903 return OP_X86_COMPARE_MEMBASE_IMM;
9905 #if defined(TARGET_AMD64)
9906 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9907 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9909 case OP_VOIDCALL_REG:
9918 return OP_LOCALLOC_IMM;
9925 ldind_to_load_membase (int opcode)
9929 return OP_LOADI1_MEMBASE;
9931 return OP_LOADU1_MEMBASE;
9933 return OP_LOADI2_MEMBASE;
9935 return OP_LOADU2_MEMBASE;
9937 return OP_LOADI4_MEMBASE;
9939 return OP_LOADU4_MEMBASE;
9941 return OP_LOAD_MEMBASE;
9943 return OP_LOAD_MEMBASE;
9945 return OP_LOADI8_MEMBASE;
9947 return OP_LOADR4_MEMBASE;
9949 return OP_LOADR8_MEMBASE;
9951 g_assert_not_reached ();
9958 stind_to_store_membase (int opcode)
9962 return OP_STOREI1_MEMBASE_REG;
9964 return OP_STOREI2_MEMBASE_REG;
9966 return OP_STOREI4_MEMBASE_REG;
9969 return OP_STORE_MEMBASE_REG;
9971 return OP_STOREI8_MEMBASE_REG;
9973 return OP_STORER4_MEMBASE_REG;
9975 return OP_STORER8_MEMBASE_REG;
9977 g_assert_not_reached ();
9984 mono_load_membase_to_load_mem (int opcode)
9986 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9987 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9989 case OP_LOAD_MEMBASE:
9991 case OP_LOADU1_MEMBASE:
9992 return OP_LOADU1_MEM;
9993 case OP_LOADU2_MEMBASE:
9994 return OP_LOADU2_MEM;
9995 case OP_LOADI4_MEMBASE:
9996 return OP_LOADI4_MEM;
9997 case OP_LOADU4_MEMBASE:
9998 return OP_LOADU4_MEM;
9999 #if SIZEOF_REGISTER == 8
10000 case OP_LOADI8_MEMBASE:
10001 return OP_LOADI8_MEM;
10010 op_to_op_dest_membase (int store_opcode, int opcode)
10012 #if defined(TARGET_X86)
10013 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10018 return OP_X86_ADD_MEMBASE_REG;
10020 return OP_X86_SUB_MEMBASE_REG;
10022 return OP_X86_AND_MEMBASE_REG;
10024 return OP_X86_OR_MEMBASE_REG;
10026 return OP_X86_XOR_MEMBASE_REG;
10029 return OP_X86_ADD_MEMBASE_IMM;
10032 return OP_X86_SUB_MEMBASE_IMM;
10035 return OP_X86_AND_MEMBASE_IMM;
10038 return OP_X86_OR_MEMBASE_IMM;
10041 return OP_X86_XOR_MEMBASE_IMM;
10047 #if defined(TARGET_AMD64)
10048 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10053 return OP_X86_ADD_MEMBASE_REG;
10055 return OP_X86_SUB_MEMBASE_REG;
10057 return OP_X86_AND_MEMBASE_REG;
10059 return OP_X86_OR_MEMBASE_REG;
10061 return OP_X86_XOR_MEMBASE_REG;
10063 return OP_X86_ADD_MEMBASE_IMM;
10065 return OP_X86_SUB_MEMBASE_IMM;
10067 return OP_X86_AND_MEMBASE_IMM;
10069 return OP_X86_OR_MEMBASE_IMM;
10071 return OP_X86_XOR_MEMBASE_IMM;
10073 return OP_AMD64_ADD_MEMBASE_REG;
10075 return OP_AMD64_SUB_MEMBASE_REG;
10077 return OP_AMD64_AND_MEMBASE_REG;
10079 return OP_AMD64_OR_MEMBASE_REG;
10081 return OP_AMD64_XOR_MEMBASE_REG;
10084 return OP_AMD64_ADD_MEMBASE_IMM;
10087 return OP_AMD64_SUB_MEMBASE_IMM;
10090 return OP_AMD64_AND_MEMBASE_IMM;
10093 return OP_AMD64_OR_MEMBASE_IMM;
10096 return OP_AMD64_XOR_MEMBASE_IMM;
10106 op_to_op_store_membase (int store_opcode, int opcode)
10108 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10111 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10112 return OP_X86_SETEQ_MEMBASE;
10114 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10115 return OP_X86_SETNE_MEMBASE;
10123 op_to_op_src1_membase (int load_opcode, int opcode)
10126 /* FIXME: This has sign extension issues */
10128 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10129 return OP_X86_COMPARE_MEMBASE8_IMM;
10132 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10137 return OP_X86_PUSH_MEMBASE;
10138 case OP_COMPARE_IMM:
10139 case OP_ICOMPARE_IMM:
10140 return OP_X86_COMPARE_MEMBASE_IMM;
10143 return OP_X86_COMPARE_MEMBASE_REG;
10147 #ifdef TARGET_AMD64
10148 /* FIXME: This has sign extension issues */
10150 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10151 return OP_X86_COMPARE_MEMBASE8_IMM;
10156 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10157 return OP_X86_PUSH_MEMBASE;
10159 /* FIXME: This only works for 32 bit immediates
10160 case OP_COMPARE_IMM:
10161 case OP_LCOMPARE_IMM:
10162 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10163 return OP_AMD64_COMPARE_MEMBASE_IMM;
10165 case OP_ICOMPARE_IMM:
10166 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10167 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10171 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10172 return OP_AMD64_COMPARE_MEMBASE_REG;
10175 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10176 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10185 op_to_op_src2_membase (int load_opcode, int opcode)
10188 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10194 return OP_X86_COMPARE_REG_MEMBASE;
10196 return OP_X86_ADD_REG_MEMBASE;
10198 return OP_X86_SUB_REG_MEMBASE;
10200 return OP_X86_AND_REG_MEMBASE;
10202 return OP_X86_OR_REG_MEMBASE;
10204 return OP_X86_XOR_REG_MEMBASE;
10208 #ifdef TARGET_AMD64
10211 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10212 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10216 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10217 return OP_AMD64_COMPARE_REG_MEMBASE;
10220 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10221 return OP_X86_ADD_REG_MEMBASE;
10223 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10224 return OP_X86_SUB_REG_MEMBASE;
10226 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10227 return OP_X86_AND_REG_MEMBASE;
10229 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10230 return OP_X86_OR_REG_MEMBASE;
10232 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10233 return OP_X86_XOR_REG_MEMBASE;
10235 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10236 return OP_AMD64_ADD_REG_MEMBASE;
10238 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10239 return OP_AMD64_SUB_REG_MEMBASE;
10241 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10242 return OP_AMD64_AND_REG_MEMBASE;
10244 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10245 return OP_AMD64_OR_REG_MEMBASE;
10247 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10248 return OP_AMD64_XOR_REG_MEMBASE;
10256 mono_op_to_op_imm_noemul (int opcode)
10259 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10264 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10272 return mono_op_to_op_imm (opcode);
10276 #ifndef DISABLE_JIT
10279 * mono_handle_global_vregs:
10281 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10285 mono_handle_global_vregs (MonoCompile *cfg)
10287 gint32 *vreg_to_bb;
10288 MonoBasicBlock *bb;
10291 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10293 #ifdef MONO_ARCH_SIMD_INTRINSICS
10294 if (cfg->uses_simd_intrinsics)
10295 mono_simd_simplify_indirection (cfg);
10298 /* Find local vregs used in more than one bb */
10299 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10300 MonoInst *ins = bb->code;
10301 int block_num = bb->block_num;
10303 if (cfg->verbose_level > 2)
10304 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10307 for (; ins; ins = ins->next) {
10308 const char *spec = INS_INFO (ins->opcode);
10309 int regtype = 0, regindex;
10312 if (G_UNLIKELY (cfg->verbose_level > 2))
10313 mono_print_ins (ins);
10315 g_assert (ins->opcode >= MONO_CEE_LAST);
10317 for (regindex = 0; regindex < 4; regindex ++) {
10320 if (regindex == 0) {
10321 regtype = spec [MONO_INST_DEST];
10322 if (regtype == ' ')
10325 } else if (regindex == 1) {
10326 regtype = spec [MONO_INST_SRC1];
10327 if (regtype == ' ')
10330 } else if (regindex == 2) {
10331 regtype = spec [MONO_INST_SRC2];
10332 if (regtype == ' ')
10335 } else if (regindex == 3) {
10336 regtype = spec [MONO_INST_SRC3];
10337 if (regtype == ' ')
10342 #if SIZEOF_REGISTER == 4
10343 /* In the LLVM case, the long opcodes are not decomposed */
10344 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10346 * Since some instructions reference the original long vreg,
10347 * and some reference the two component vregs, it is quite hard
10348 * to determine when it needs to be global. So be conservative.
10350 if (!get_vreg_to_inst (cfg, vreg)) {
10351 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10353 if (cfg->verbose_level > 2)
10354 printf ("LONG VREG R%d made global.\n", vreg);
10358 * Make the component vregs volatile since the optimizations can
10359 * get confused otherwise.
10361 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10362 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10366 g_assert (vreg != -1);
10368 prev_bb = vreg_to_bb [vreg];
10369 if (prev_bb == 0) {
10370 /* 0 is a valid block num */
10371 vreg_to_bb [vreg] = block_num + 1;
10372 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10373 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10376 if (!get_vreg_to_inst (cfg, vreg)) {
10377 if (G_UNLIKELY (cfg->verbose_level > 2))
10378 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10382 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10385 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10388 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10391 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10394 g_assert_not_reached ();
10398 /* Flag as having been used in more than one bb */
10399 vreg_to_bb [vreg] = -1;
10405 /* If a variable is used in only one bblock, convert it into a local vreg */
10406 for (i = 0; i < cfg->num_varinfo; i++) {
10407 MonoInst *var = cfg->varinfo [i];
10408 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10410 switch (var->type) {
10416 #if SIZEOF_REGISTER == 8
10419 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10420 /* Enabling this screws up the fp stack on x86 */
10423 /* Arguments are implicitly global */
10424 /* Putting R4 vars into registers doesn't work currently */
10425 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10427 * Make that the variable's liveness interval doesn't contain a call, since
10428 * that would cause the lvreg to be spilled, making the whole optimization
10431 /* This is too slow for JIT compilation */
10433 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10435 int def_index, call_index, ins_index;
10436 gboolean spilled = FALSE;
10441 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10442 const char *spec = INS_INFO (ins->opcode);
10444 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10445 def_index = ins_index;
10447 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10448 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10449 if (call_index > def_index) {
10455 if (MONO_IS_CALL (ins))
10456 call_index = ins_index;
10466 if (G_UNLIKELY (cfg->verbose_level > 2))
10467 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10468 var->flags |= MONO_INST_IS_DEAD;
10469 cfg->vreg_to_inst [var->dreg] = NULL;
10476 * Compress the varinfo and vars tables so the liveness computation is faster and
10477 * takes up less space.
10480 for (i = 0; i < cfg->num_varinfo; ++i) {
10481 MonoInst *var = cfg->varinfo [i];
10482 if (pos < i && cfg->locals_start == i)
10483 cfg->locals_start = pos;
10484 if (!(var->flags & MONO_INST_IS_DEAD)) {
10486 cfg->varinfo [pos] = cfg->varinfo [i];
10487 cfg->varinfo [pos]->inst_c0 = pos;
10488 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10489 cfg->vars [pos].idx = pos;
10490 #if SIZEOF_REGISTER == 4
10491 if (cfg->varinfo [pos]->type == STACK_I8) {
10492 /* Modify the two component vars too */
10495 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10496 var1->inst_c0 = pos;
10497 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10498 var1->inst_c0 = pos;
10505 cfg->num_varinfo = pos;
10506 if (cfg->locals_start > cfg->num_varinfo)
10507 cfg->locals_start = cfg->num_varinfo;
10511 * mono_spill_global_vars:
10513 * Generate spill code for variables which are not allocated to registers,
10514 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10515 * code is generated which could be optimized by the local optimization passes.
10518 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10520 MonoBasicBlock *bb;
10522 int orig_next_vreg;
10523 guint32 *vreg_to_lvreg;
10525 guint32 i, lvregs_len;
10526 gboolean dest_has_lvreg = FALSE;
10527 guint32 stacktypes [128];
10528 MonoInst **live_range_start, **live_range_end;
10529 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10531 *need_local_opts = FALSE;
10533 memset (spec2, 0, sizeof (spec2));
10535 /* FIXME: Move this function to mini.c */
10536 stacktypes ['i'] = STACK_PTR;
10537 stacktypes ['l'] = STACK_I8;
10538 stacktypes ['f'] = STACK_R8;
10539 #ifdef MONO_ARCH_SIMD_INTRINSICS
10540 stacktypes ['x'] = STACK_VTYPE;
10543 #if SIZEOF_REGISTER == 4
10544 /* Create MonoInsts for longs */
10545 for (i = 0; i < cfg->num_varinfo; i++) {
10546 MonoInst *ins = cfg->varinfo [i];
10548 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10549 switch (ins->type) {
10554 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10557 g_assert (ins->opcode == OP_REGOFFSET);
10559 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10561 tree->opcode = OP_REGOFFSET;
10562 tree->inst_basereg = ins->inst_basereg;
10563 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10565 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10567 tree->opcode = OP_REGOFFSET;
10568 tree->inst_basereg = ins->inst_basereg;
10569 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10579 /* FIXME: widening and truncation */
10582 * As an optimization, when a variable allocated to the stack is first loaded into
10583 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10584 * the variable again.
10586 orig_next_vreg = cfg->next_vreg;
10587 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10588 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10592 * These arrays contain the first and last instructions accessing a given
10594 * Since we emit bblocks in the same order we process them here, and we
10595 * don't split live ranges, these will precisely describe the live range of
10596 * the variable, i.e. the instruction range where a valid value can be found
10597 * in the variables location.
10598 * The live range is computed using the liveness info computed by the liveness pass.
10599 * We can't use vmv->range, since that is an abstract live range, and we need
10600 * one which is instruction precise.
10601 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10603 /* FIXME: Only do this if debugging info is requested */
10604 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10605 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10606 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10607 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10609 /* Add spill loads/stores */
10610 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10613 if (cfg->verbose_level > 2)
10614 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10616 /* Clear vreg_to_lvreg array */
10617 for (i = 0; i < lvregs_len; i++)
10618 vreg_to_lvreg [lvregs [i]] = 0;
10622 MONO_BB_FOR_EACH_INS (bb, ins) {
10623 const char *spec = INS_INFO (ins->opcode);
10624 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10625 gboolean store, no_lvreg;
10626 int sregs [MONO_MAX_SRC_REGS];
10628 if (G_UNLIKELY (cfg->verbose_level > 2))
10629 mono_print_ins (ins);
10631 if (ins->opcode == OP_NOP)
10635 * We handle LDADDR here as well, since it can only be decomposed
10636 * when variable addresses are known.
10638 if (ins->opcode == OP_LDADDR) {
10639 MonoInst *var = ins->inst_p0;
10641 if (var->opcode == OP_VTARG_ADDR) {
10642 /* Happens on SPARC/S390 where vtypes are passed by reference */
10643 MonoInst *vtaddr = var->inst_left;
10644 if (vtaddr->opcode == OP_REGVAR) {
10645 ins->opcode = OP_MOVE;
10646 ins->sreg1 = vtaddr->dreg;
10648 else if (var->inst_left->opcode == OP_REGOFFSET) {
10649 ins->opcode = OP_LOAD_MEMBASE;
10650 ins->inst_basereg = vtaddr->inst_basereg;
10651 ins->inst_offset = vtaddr->inst_offset;
10655 g_assert (var->opcode == OP_REGOFFSET);
10657 ins->opcode = OP_ADD_IMM;
10658 ins->sreg1 = var->inst_basereg;
10659 ins->inst_imm = var->inst_offset;
10662 *need_local_opts = TRUE;
10663 spec = INS_INFO (ins->opcode);
10666 if (ins->opcode < MONO_CEE_LAST) {
10667 mono_print_ins (ins);
10668 g_assert_not_reached ();
10672 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10676 if (MONO_IS_STORE_MEMBASE (ins)) {
10677 tmp_reg = ins->dreg;
10678 ins->dreg = ins->sreg2;
10679 ins->sreg2 = tmp_reg;
10682 spec2 [MONO_INST_DEST] = ' ';
10683 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10684 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10685 spec2 [MONO_INST_SRC3] = ' ';
10687 } else if (MONO_IS_STORE_MEMINDEX (ins))
10688 g_assert_not_reached ();
10693 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10694 printf ("\t %.3s %d", spec, ins->dreg);
10695 num_sregs = mono_inst_get_src_registers (ins, sregs);
10696 for (srcindex = 0; srcindex < 3; ++srcindex)
10697 printf (" %d", sregs [srcindex]);
10704 regtype = spec [MONO_INST_DEST];
10705 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10708 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10709 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10710 MonoInst *store_ins;
10712 MonoInst *def_ins = ins;
10713 int dreg = ins->dreg; /* The original vreg */
10715 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10717 if (var->opcode == OP_REGVAR) {
10718 ins->dreg = var->dreg;
10719 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10721 * Instead of emitting a load+store, use a _membase opcode.
10723 g_assert (var->opcode == OP_REGOFFSET);
10724 if (ins->opcode == OP_MOVE) {
10728 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10729 ins->inst_basereg = var->inst_basereg;
10730 ins->inst_offset = var->inst_offset;
10733 spec = INS_INFO (ins->opcode);
10737 g_assert (var->opcode == OP_REGOFFSET);
10739 prev_dreg = ins->dreg;
10741 /* Invalidate any previous lvreg for this vreg */
10742 vreg_to_lvreg [ins->dreg] = 0;
10746 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10748 store_opcode = OP_STOREI8_MEMBASE_REG;
10751 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10753 if (regtype == 'l') {
10754 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10755 mono_bblock_insert_after_ins (bb, ins, store_ins);
10756 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10757 mono_bblock_insert_after_ins (bb, ins, store_ins);
10758 def_ins = store_ins;
10761 g_assert (store_opcode != OP_STOREV_MEMBASE);
10763 /* Try to fuse the store into the instruction itself */
10764 /* FIXME: Add more instructions */
10765 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10766 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10767 ins->inst_imm = ins->inst_c0;
10768 ins->inst_destbasereg = var->inst_basereg;
10769 ins->inst_offset = var->inst_offset;
10770 spec = INS_INFO (ins->opcode);
10771 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10772 ins->opcode = store_opcode;
10773 ins->inst_destbasereg = var->inst_basereg;
10774 ins->inst_offset = var->inst_offset;
10778 tmp_reg = ins->dreg;
10779 ins->dreg = ins->sreg2;
10780 ins->sreg2 = tmp_reg;
10783 spec2 [MONO_INST_DEST] = ' ';
10784 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10785 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10786 spec2 [MONO_INST_SRC3] = ' ';
10788 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10789 // FIXME: The backends expect the base reg to be in inst_basereg
10790 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10792 ins->inst_basereg = var->inst_basereg;
10793 ins->inst_offset = var->inst_offset;
10794 spec = INS_INFO (ins->opcode);
10796 /* printf ("INS: "); mono_print_ins (ins); */
10797 /* Create a store instruction */
10798 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10800 /* Insert it after the instruction */
10801 mono_bblock_insert_after_ins (bb, ins, store_ins);
10803 def_ins = store_ins;
10806 * We can't assign ins->dreg to var->dreg here, since the
10807 * sregs could use it. So set a flag, and do it after
10810 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10811 dest_has_lvreg = TRUE;
10816 if (def_ins && !live_range_start [dreg]) {
10817 live_range_start [dreg] = def_ins;
10818 live_range_start_bb [dreg] = bb;
10825 num_sregs = mono_inst_get_src_registers (ins, sregs);
10826 for (srcindex = 0; srcindex < 3; ++srcindex) {
10827 regtype = spec [MONO_INST_SRC1 + srcindex];
10828 sreg = sregs [srcindex];
10830 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10831 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10832 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10833 MonoInst *use_ins = ins;
10834 MonoInst *load_ins;
10835 guint32 load_opcode;
10837 if (var->opcode == OP_REGVAR) {
10838 sregs [srcindex] = var->dreg;
10839 //mono_inst_set_src_registers (ins, sregs);
10840 live_range_end [sreg] = use_ins;
10841 live_range_end_bb [sreg] = bb;
10845 g_assert (var->opcode == OP_REGOFFSET);
10847 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10849 g_assert (load_opcode != OP_LOADV_MEMBASE);
10851 if (vreg_to_lvreg [sreg]) {
10852 g_assert (vreg_to_lvreg [sreg] != -1);
10854 /* The variable is already loaded to an lvreg */
10855 if (G_UNLIKELY (cfg->verbose_level > 2))
10856 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10857 sregs [srcindex] = vreg_to_lvreg [sreg];
10858 //mono_inst_set_src_registers (ins, sregs);
10862 /* Try to fuse the load into the instruction */
10863 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10864 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10865 sregs [0] = var->inst_basereg;
10866 //mono_inst_set_src_registers (ins, sregs);
10867 ins->inst_offset = var->inst_offset;
10868 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10869 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10870 sregs [1] = var->inst_basereg;
10871 //mono_inst_set_src_registers (ins, sregs);
10872 ins->inst_offset = var->inst_offset;
10874 if (MONO_IS_REAL_MOVE (ins)) {
10875 ins->opcode = OP_NOP;
10878 //printf ("%d ", srcindex); mono_print_ins (ins);
10880 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10882 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10883 if (var->dreg == prev_dreg) {
10885 * sreg refers to the value loaded by the load
10886 * emitted below, but we need to use ins->dreg
10887 * since it refers to the store emitted earlier.
10891 g_assert (sreg != -1);
10892 vreg_to_lvreg [var->dreg] = sreg;
10893 g_assert (lvregs_len < 1024);
10894 lvregs [lvregs_len ++] = var->dreg;
10898 sregs [srcindex] = sreg;
10899 //mono_inst_set_src_registers (ins, sregs);
10901 if (regtype == 'l') {
10902 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10903 mono_bblock_insert_before_ins (bb, ins, load_ins);
10904 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10905 mono_bblock_insert_before_ins (bb, ins, load_ins);
10906 use_ins = load_ins;
10909 #if SIZEOF_REGISTER == 4
10910 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10912 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10913 mono_bblock_insert_before_ins (bb, ins, load_ins);
10914 use_ins = load_ins;
10918 if (var->dreg < orig_next_vreg) {
10919 live_range_end [var->dreg] = use_ins;
10920 live_range_end_bb [var->dreg] = bb;
10924 mono_inst_set_src_registers (ins, sregs);
10926 if (dest_has_lvreg) {
10927 g_assert (ins->dreg != -1);
10928 vreg_to_lvreg [prev_dreg] = ins->dreg;
10929 g_assert (lvregs_len < 1024);
10930 lvregs [lvregs_len ++] = prev_dreg;
10931 dest_has_lvreg = FALSE;
10935 tmp_reg = ins->dreg;
10936 ins->dreg = ins->sreg2;
10937 ins->sreg2 = tmp_reg;
10940 if (MONO_IS_CALL (ins)) {
10941 /* Clear vreg_to_lvreg array */
10942 for (i = 0; i < lvregs_len; i++)
10943 vreg_to_lvreg [lvregs [i]] = 0;
10945 } else if (ins->opcode == OP_NOP) {
10947 MONO_INST_NULLIFY_SREGS (ins);
10950 if (cfg->verbose_level > 2)
10951 mono_print_ins_index (1, ins);
10954 /* Extend the live range based on the liveness info */
10955 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
10956 for (i = 0; i < cfg->num_varinfo; i ++) {
10957 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
10959 if (vreg_is_volatile (cfg, vi->vreg))
10960 /* The liveness info is incomplete */
10963 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
10964 /* Live from at least the first ins of this bb */
10965 live_range_start [vi->vreg] = bb->code;
10966 live_range_start_bb [vi->vreg] = bb;
10969 if (mono_bitset_test_fast (bb->live_out_set, i)) {
10970 /* Live at least until the last ins of this bb */
10971 live_range_end [vi->vreg] = bb->last_ins;
10972 live_range_end_bb [vi->vreg] = bb;
10978 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10980 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10981 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10983 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
10984 for (i = 0; i < cfg->num_varinfo; ++i) {
10985 int vreg = MONO_VARINFO (cfg, i)->vreg;
10988 if (live_range_start [vreg]) {
10989 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10991 ins->inst_c1 = vreg;
10992 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10994 if (live_range_end [vreg]) {
10995 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10997 ins->inst_c1 = vreg;
10998 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
10999 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11001 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11007 g_free (live_range_start);
11008 g_free (live_range_end);
11009 g_free (live_range_start_bb);
11010 g_free (live_range_end_bb);
11015 * - use 'iadd' instead of 'int_add'
11016 * - handling ovf opcodes: decompose in method_to_ir.
11017 * - unify iregs/fregs
11018 * -> partly done, the missing parts are:
11019 * - a more complete unification would involve unifying the hregs as well, so
11020 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11021 * would no longer map to the machine hregs, so the code generators would need to
11022 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11023 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11024 * fp/non-fp branches speeds it up by about 15%.
11025 * - use sext/zext opcodes instead of shifts
11027 * - get rid of TEMPLOADs if possible and use vregs instead
11028 * - clean up usage of OP_P/OP_ opcodes
11029 * - cleanup usage of DUMMY_USE
11030 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11032 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11033 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11034 * - make sure handle_stack_args () is called before the branch is emitted
11035 * - when the new IR is done, get rid of all unused stuff
11036 * - COMPARE/BEQ as separate instructions or unify them ?
11037 * - keeping them separate allows specialized compare instructions like
11038 * compare_imm, compare_membase
11039 * - most back ends unify fp compare+branch, fp compare+ceq
11040 * - integrate mono_save_args into inline_method
11041 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11042 * - handle long shift opts on 32 bit platforms somehow: they require
11043 * 3 sregs (2 for arg1 and 1 for arg2)
11044 * - make byref a 'normal' type.
11045 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11046 * variable if needed.
11047 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11048 * like inline_method.
11049 * - remove inlining restrictions
11050 * - fix LNEG and enable cfold of INEG
11051 * - generalize x86 optimizations like ldelema as a peephole optimization
11052 * - add store_mem_imm for amd64
11053 * - optimize the loading of the interruption flag in the managed->native wrappers
11054 * - avoid special handling of OP_NOP in passes
11055 * - move code inserting instructions into one function/macro.
11056 * - try a coalescing phase after liveness analysis
11057 * - add float -> vreg conversion + local optimizations on !x86
11058 * - figure out how to handle decomposed branches during optimizations, ie.
11059 * compare+branch, op_jump_table+op_br etc.
11060 * - promote RuntimeXHandles to vregs
11061 * - vtype cleanups:
11062 * - add a NEW_VARLOADA_VREG macro
11063 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11064 * accessing vtype fields.
11065 * - get rid of I8CONST on 64 bit platforms
11066 * - dealing with the increase in code size due to branches created during opcode
11068 * - use extended basic blocks
11069 * - all parts of the JIT
11070 * - handle_global_vregs () && local regalloc
11071 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11072 * - sources of increase in code size:
11075 * - isinst and castclass
11076 * - lvregs not allocated to global registers even if used multiple times
11077 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11079 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11080 * - add all micro optimizations from the old JIT
11081 * - put tree optimizations into the deadce pass
11082 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11083 * specific function.
11084 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11085 * fcompare + branchCC.
11086 * - create a helper function for allocating a stack slot, taking into account
11087 * MONO_CFG_HAS_SPILLUP.
11089 * - merge the ia64 switch changes.
11090 * - optimize mono_regstate2_alloc_int/float.
11091 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11092 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11093 * parts of the tree could be separated by other instructions, killing the tree
11094 * arguments, or stores killing loads etc. Also, should we fold loads into other
11095 * instructions if the result of the load is used multiple times ?
11096 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11097 * - LAST MERGE: 108395.
11098 * - when returning vtypes in registers, generate IR and append it to the end of the
11099 * last bb instead of doing it in the epilog.
11100 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11108 - When to decompose opcodes:
11109 - earlier: this makes some optimizations hard to implement, since the low level IR
11110 no longer contains the neccessary information. But it is easier to do.
11111 - later: harder to implement, enables more optimizations.
11112 - Branches inside bblocks:
11113 - created when decomposing complex opcodes.
11114 - branches to another bblock: harmless, but not tracked by the branch
11115 optimizations, so need to branch to a label at the start of the bblock.
11116 - branches to inside the same bblock: very problematic, trips up the local
11117 reg allocator. Can be fixed by spitting the current bblock, but that is a
11118 complex operation, since some local vregs can become global vregs etc.
11119 - Local/global vregs:
11120 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11121 local register allocator.
11122 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11123 structure, created by mono_create_var (). Assigned to hregs or the stack by
11124 the global register allocator.
11125 - When to do optimizations like alu->alu_imm:
11126 - earlier -> saves work later on since the IR will be smaller/simpler
11127 - later -> can work on more instructions
11128 - Handling of valuetypes:
11129 - When a vtype is pushed on the stack, a new temporary is created, an
11130 instruction computing its address (LDADDR) is emitted and pushed on
11131 the stack. Need to optimize cases when the vtype is used immediately as in
11132 argument passing, stloc etc.
11133 - Instead of the to_end stuff in the old JIT, simply call the function handling
11134 the values on the stack before emitting the last instruction of the bb.
11137 #endif /* DISABLE_JIT */