2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
241 return alloc_ireg (cfg);
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
251 switch (type->type) {
254 case MONO_TYPE_BOOLEAN:
266 case MONO_TYPE_FNPTR:
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
276 #if SIZEOF_REGISTER == 8
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
293 case MONO_TYPE_TYPEDBYREF:
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
300 g_assert (cfg->generic_sharing_context);
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
408 #define ADD_BINCOND(next_block) do { \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
453 if (from->cil_code) {
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
462 printf ("edge from entry to exit\n");
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
618 inst->klass = klass = mono_class_from_mono_type (type);
620 inst->type = STACK_MP;
625 switch (type->type) {
627 inst->type = STACK_INV;
631 case MONO_TYPE_BOOLEAN:
637 inst->type = STACK_I4;
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
654 inst->type = STACK_I8;
658 inst->type = STACK_R8;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
666 inst->type = STACK_VTYPE;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
840 ins->opcode = OP_ICOMPARE;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
857 ins->opcode += beqops_op_map [src1->type];
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
890 ins->type = STACK_R8;
891 switch (src1->type) {
894 ins->opcode = OP_ICONV_TO_R_UN;
897 ins->opcode = OP_LCONV_TO_R_UN;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 switch (src1->type) {
928 ins->opcode = OP_ICONV_TO_U;
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
935 ins->opcode = OP_MOVE;
939 ins->opcode = OP_LCONV_TO_U;
942 ins->opcode = OP_FCONV_TO_U;
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
967 ins->type = STACK_R8;
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1026 param_table [STACK_MAX] [STACK_MAX] = {
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1035 switch (args->type) {
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1050 if (!sig->params [i]->byref)
1054 if (sig->params [i]->byref)
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1068 if (sig->params [i]->byref)
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1156 case MONO_TYPE_BOOLEAN:
1159 case MONO_TYPE_CHAR:
1166 case MONO_TYPE_FNPTR:
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1190 g_assert_not_reached ();
1197 array_access_to_klass (int opcode)
1201 return mono_defaults.byte_class;
1203 return mono_defaults.uint16_class;
1206 return mono_defaults.int_class;
1209 return mono_defaults.sbyte_class;
1212 return mono_defaults.int16_class;
1215 return mono_defaults.int32_class;
1217 return mono_defaults.uint32_class;
1220 return mono_defaults.int64_class;
1223 return mono_defaults.single_class;
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1231 g_assert_not_reached ();
1237 * We try to share variables when possible
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1322 bb->out_stack = outb->in_stack;
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1385 /* Find a bblock which has a different in_stack */
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1700 g_assert (val == 0);
1705 if ((size <= 4) && (size <= align)) {
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1716 #if SIZEOF_REGISTER == 8
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1732 /* This could be optimized further if neccesary */
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1785 /* This could be optimized further if neccesary */
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1848 case MONO_TYPE_BOOLEAN:
1851 case MONO_TYPE_CHAR:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1898 * Returns: non-0 value if arg can't be stored on a target.
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1921 case MONO_TYPE_BOOLEAN:
1924 case MONO_TYPE_CHAR:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1937 case MONO_TYPE_FNPTR:
1938 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 case MONO_TYPE_CLASS:
1942 case MONO_TYPE_STRING:
1943 case MONO_TYPE_OBJECT:
1944 case MONO_TYPE_SZARRAY:
1945 case MONO_TYPE_ARRAY:
1946 if (arg->type != STACK_OBJ)
1948 /* FIXME: check type compatibility */
1952 if (arg->type != STACK_I8)
1957 if (arg->type != STACK_R8)
1960 case MONO_TYPE_VALUETYPE:
1961 if (arg->type != STACK_VTYPE)
1963 klass = mono_class_from_mono_type (simple_type);
1964 if (klass != arg->klass)
1967 case MONO_TYPE_TYPEDBYREF:
1968 if (arg->type != STACK_VTYPE)
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1974 case MONO_TYPE_GENERICINST:
1975 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1976 if (arg->type != STACK_VTYPE)
1978 klass = mono_class_from_mono_type (simple_type);
1979 if (klass != arg->klass)
1983 if (arg->type != STACK_OBJ)
1985 /* FIXME: check type compatibility */
1989 case MONO_TYPE_MVAR:
1990 /* FIXME: all the arguments must be references for now,
1991 * later look inside cfg and see if the arg num is
1992 * really a reference
1994 g_assert (cfg->generic_sharing_context);
1995 if (arg->type != STACK_OBJ)
1999 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2005 * Prepare arguments for passing to a function call.
2006 * Return a non-zero value if the arguments can't be passed to the given
2008 * The type checks are not yet complete and some conversions may need
2009 * casts on 32 or 64 bit architectures.
2011 * FIXME: implement this using target_type_is_incompatible ()
2014 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2016 MonoType *simple_type;
2020 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2024 for (i = 0; i < sig->param_count; ++i) {
2025 if (sig->params [i]->byref) {
2026 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2030 simple_type = sig->params [i];
2031 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2033 switch (simple_type->type) {
2034 case MONO_TYPE_VOID:
2039 case MONO_TYPE_BOOLEAN:
2042 case MONO_TYPE_CHAR:
2045 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2051 case MONO_TYPE_FNPTR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (args [i]->type != STACK_OBJ)
2065 if (args [i]->type != STACK_I8)
2070 if (args [i]->type != STACK_R8)
2073 case MONO_TYPE_VALUETYPE:
2074 if (simple_type->data.klass->enumtype) {
2075 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2078 if (args [i]->type != STACK_VTYPE)
2081 case MONO_TYPE_TYPEDBYREF:
2082 if (args [i]->type != STACK_VTYPE)
2085 case MONO_TYPE_GENERICINST:
2086 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2090 g_error ("unknown type 0x%02x in check_call_signature",
2098 callvirt_to_call (int opcode)
2103 case OP_VOIDCALLVIRT:
2112 g_assert_not_reached ();
2119 callvirt_to_call_membase (int opcode)
2123 return OP_CALL_MEMBASE;
2124 case OP_VOIDCALLVIRT:
2125 return OP_VOIDCALL_MEMBASE;
2127 return OP_FCALL_MEMBASE;
2129 return OP_LCALL_MEMBASE;
2131 return OP_VCALL_MEMBASE;
2133 g_assert_not_reached ();
2139 #ifdef MONO_ARCH_HAVE_IMT
2141 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2145 if (COMPILE_LLVM (cfg)) {
2146 method_reg = alloc_preg (cfg);
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2161 call->imt_arg_reg = method_reg;
2163 #ifdef MONO_ARCH_IMT_REG
2164 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2166 /* Need this to keep the IMT arg alive */
2167 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2172 #ifdef MONO_ARCH_IMT_REG
2173 method_reg = alloc_preg (cfg);
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2177 } else if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2181 MONO_INST_NEW (cfg, ins, OP_PCONST);
2182 ins->inst_p0 = call->method;
2183 ins->dreg = method_reg;
2184 MONO_ADD_INS (cfg->cbb, ins);
2187 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2189 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2194 static MonoJumpInfo *
2195 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2197 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2201 ji->data.target = target;
2206 inline static MonoCallInst *
2207 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2208 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2211 #ifdef MONO_ARCH_SOFT_FLOAT
2216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2218 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2221 call->signature = sig;
2222 call->rgctx_reg = rgctx;
2224 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 call->vret_var = cfg->vret_addr;
2229 //g_assert_not_reached ();
2231 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2235 temp->backend.is_pinvoke = sig->pinvoke;
2238 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2239 * address of return value to increase optimization opportunities.
2240 * Before vtype decomposition, the dreg of the call ins itself represents the
2241 * fact the call modifies the return value. After decomposition, the call will
2242 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2243 * will be transformed into an LDADDR.
2245 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2246 loada->dreg = alloc_preg (cfg);
2247 loada->inst_p0 = temp;
2248 /* We reference the call too since call->dreg could change during optimization */
2249 loada->inst_p1 = call;
2250 MONO_ADD_INS (cfg->cbb, loada);
2252 call->inst.dreg = temp->dreg;
2254 call->vret_var = loada;
2255 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2256 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2258 #ifdef MONO_ARCH_SOFT_FLOAT
2259 if (COMPILE_SOFT_FLOAT (cfg)) {
2261 * If the call has a float argument, we would need to do an r8->r4 conversion using
2262 * an icall, but that cannot be done during the call sequence since it would clobber
2263 * the call registers + the stack. So we do it before emitting the call.
2265 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2267 MonoInst *in = call->args [i];
2269 if (i >= sig->hasthis)
2270 t = sig->params [i - sig->hasthis];
2272 t = &mono_defaults.int_class->byval_arg;
2273 t = mono_type_get_underlying_type (t);
2275 if (!t->byref && t->type == MONO_TYPE_R4) {
2276 MonoInst *iargs [1];
2280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2282 /* The result will be in an int vreg */
2283 call->args [i] = conv;
2290 if (COMPILE_LLVM (cfg))
2291 mono_llvm_emit_call (cfg, call);
2293 mono_arch_emit_call (cfg, call);
2295 mono_arch_emit_call (cfg, call);
2298 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2299 cfg->flags |= MONO_CFG_HAS_CALLS;
2305 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2307 #ifdef MONO_ARCH_RGCTX_REG
2308 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2309 cfg->uses_rgctx_reg = TRUE;
2310 call->rgctx_reg = TRUE;
2312 call->rgctx_arg_reg = rgctx_reg;
2319 inline static MonoInst*
2320 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2326 rgctx_reg = mono_alloc_preg (cfg);
2327 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2330 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2332 call->inst.sreg1 = addr->dreg;
2334 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2337 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2339 return (MonoInst*)call;
2343 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2345 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2348 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2349 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2351 gboolean might_be_remote;
2352 gboolean virtual = this != NULL;
2353 gboolean enable_for_aot = TRUE;
2359 rgctx_reg = mono_alloc_preg (cfg);
2360 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2363 if (method->string_ctor) {
2364 /* Create the real signature */
2365 /* FIXME: Cache these */
2366 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2367 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2372 context_used = mono_method_check_context_used (method);
2374 might_be_remote = this && sig->hasthis &&
2375 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2376 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2378 if (might_be_remote && context_used) {
2381 g_assert (cfg->generic_sharing_context);
2383 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2385 return mono_emit_calli (cfg, sig, args, addr, NULL);
2388 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2390 if (might_be_remote)
2391 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2393 call->method = method;
2394 call->inst.flags |= MONO_INST_HAS_METHOD;
2395 call->inst.inst_left = this;
2398 int vtable_reg, slot_reg, this_reg;
2400 this_reg = this->dreg;
2402 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2403 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2404 MonoInst *dummy_use;
2406 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2408 /* Make a call to delegate->invoke_impl */
2409 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2410 call->inst.inst_basereg = this_reg;
2411 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2412 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2414 /* We must emit a dummy use here because the delegate trampoline will
2415 replace the 'this' argument with the delegate target making this activation
2416 no longer a root for the delegate.
2417 This is an issue for delegates that target collectible code such as dynamic
2418 methods of GC'able assemblies.
2420 For a test case look into #667921.
2422 FIXME: a dummy use is not the best way to do it as the local register allocator
2423 will put it on a caller save register and spil it around the call.
2424 Ideally, we would either put it on a callee save register or only do the store part.
2426 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2428 return (MonoInst*)call;
2432 if ((!cfg->compile_aot || enable_for_aot) &&
2433 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2434 (MONO_METHOD_IS_FINAL (method) &&
2435 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2436 !(method->klass->marshalbyref && context_used)) {
2438 * the method is not virtual, we just need to ensure this is not null
2439 * and then we can call the method directly.
2441 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2443 * The check above ensures method is not gshared, this is needed since
2444 * gshared methods can't have wrappers.
2446 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2449 if (!method->string_ctor)
2450 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2452 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2453 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2455 * the method is virtual, but we can statically dispatch since either
2456 * it's class or the method itself are sealed.
2457 * But first we need to ensure it's not a null reference.
2459 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2461 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2463 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2465 vtable_reg = alloc_preg (cfg);
2466 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2467 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2469 #ifdef MONO_ARCH_HAVE_IMT
2471 guint32 imt_slot = mono_method_get_imt_slot (method);
2472 emit_imt_argument (cfg, call, imt_arg);
2473 slot_reg = vtable_reg;
2474 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2477 if (slot_reg == -1) {
2478 slot_reg = alloc_preg (cfg);
2479 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2480 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2483 slot_reg = vtable_reg;
2484 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2485 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2486 #ifdef MONO_ARCH_HAVE_IMT
2488 g_assert (mono_method_signature (method)->generic_param_count);
2489 emit_imt_argument (cfg, call, imt_arg);
2494 call->inst.sreg1 = slot_reg;
2495 call->virtual = TRUE;
2499 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2502 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2504 return (MonoInst*)call;
2508 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2510 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2514 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2521 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2524 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 return (MonoInst*)call;
2530 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2532 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2536 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2540 * mono_emit_abs_call:
2542 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2544 inline static MonoInst*
2545 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2546 MonoMethodSignature *sig, MonoInst **args)
2548 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2552 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2555 if (cfg->abs_patches == NULL)
2556 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2557 g_hash_table_insert (cfg->abs_patches, ji, ji);
2558 ins = mono_emit_native_call (cfg, ji, sig, args);
2559 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2564 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2566 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2567 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2571 * Native code might return non register sized integers
2572 * without initializing the upper bits.
2574 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2575 case OP_LOADI1_MEMBASE:
2576 widen_op = OP_ICONV_TO_I1;
2578 case OP_LOADU1_MEMBASE:
2579 widen_op = OP_ICONV_TO_U1;
2581 case OP_LOADI2_MEMBASE:
2582 widen_op = OP_ICONV_TO_I2;
2584 case OP_LOADU2_MEMBASE:
2585 widen_op = OP_ICONV_TO_U2;
2591 if (widen_op != -1) {
2592 int dreg = alloc_preg (cfg);
2595 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2596 widen->type = ins->type;
2606 get_memcpy_method (void)
2608 static MonoMethod *memcpy_method = NULL;
2609 if (!memcpy_method) {
2610 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2612 g_error ("Old corlib found. Install a new one");
2614 return memcpy_method;
2618 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2620 MonoClassField *field;
2621 gpointer iter = NULL;
2623 while ((field = mono_class_get_fields (klass, &iter))) {
2626 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2628 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2629 if (mono_type_is_reference (field->type)) {
2630 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2631 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2633 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2634 MonoClass *field_class = mono_class_from_mono_type (field->type);
2635 if (field_class->has_references)
2636 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2642 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2644 int card_table_shift_bits;
2645 gpointer card_table_mask;
2647 MonoInst *dummy_use;
2648 int nursery_shift_bits;
2649 size_t nursery_size;
2650 gboolean has_card_table_wb = FALSE;
2652 if (!cfg->gen_write_barriers)
2655 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2657 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2659 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2660 has_card_table_wb = TRUE;
2663 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2666 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2667 wbarrier->sreg1 = ptr->dreg;
2669 wbarrier->sreg2 = value->dreg;
2671 wbarrier->sreg2 = value_reg;
2672 MONO_ADD_INS (cfg->cbb, wbarrier);
2673 } else if (card_table) {
2674 int offset_reg = alloc_preg (cfg);
2675 int card_reg = alloc_preg (cfg);
2678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2679 if (card_table_mask)
2680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2682 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2683 * IMM's larger than 32bits.
2685 if (cfg->compile_aot) {
2686 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2688 MONO_INST_NEW (cfg, ins, OP_PCONST);
2689 ins->inst_p0 = card_table;
2690 ins->dreg = card_reg;
2691 MONO_ADD_INS (cfg->cbb, ins);
2694 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2695 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2697 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2698 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2702 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2704 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2705 dummy_use->sreg1 = value_reg;
2706 MONO_ADD_INS (cfg->cbb, dummy_use);
2711 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2713 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2714 unsigned need_wb = 0;
2719 /*types with references can't have alignment smaller than sizeof(void*) */
2720 if (align < SIZEOF_VOID_P)
2723 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2724 if (size > 32 * SIZEOF_VOID_P)
2727 create_write_barrier_bitmap (klass, &need_wb, 0);
2729 /* We don't unroll more than 5 stores to avoid code bloat. */
2730 if (size > 5 * SIZEOF_VOID_P) {
2731 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2732 size += (SIZEOF_VOID_P - 1);
2733 size &= ~(SIZEOF_VOID_P - 1);
2735 EMIT_NEW_ICONST (cfg, iargs [2], size);
2736 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2737 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2741 destreg = iargs [0]->dreg;
2742 srcreg = iargs [1]->dreg;
2745 dest_ptr_reg = alloc_preg (cfg);
2746 tmp_reg = alloc_preg (cfg);
2749 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2751 while (size >= SIZEOF_VOID_P) {
2752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2756 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2758 offset += SIZEOF_VOID_P;
2759 size -= SIZEOF_VOID_P;
2762 /*tmp += sizeof (void*)*/
2763 if (size >= SIZEOF_VOID_P) {
2764 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2765 MONO_ADD_INS (cfg->cbb, iargs [0]);
2769 /* Those cannot be references since size < sizeof (void*) */
2771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2779 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2785 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2786 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2795 * Emit code to copy a valuetype of type @klass whose address is stored in
2796 * @src->dreg to memory whose address is stored at @dest->dreg.
2799 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2801 MonoInst *iargs [4];
2804 MonoMethod *memcpy_method;
2808 * This check breaks with spilled vars... need to handle it during verification anyway.
2809 * g_assert (klass && klass == src->klass && klass == dest->klass);
2813 n = mono_class_native_size (klass, &align);
2815 n = mono_class_value_size (klass, &align);
2817 /* if native is true there should be no references in the struct */
2818 if (cfg->gen_write_barriers && klass->has_references && !native) {
2819 /* Avoid barriers when storing to the stack */
2820 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2821 (dest->opcode == OP_LDADDR))) {
2822 int context_used = 0;
2827 if (cfg->generic_sharing_context)
2828 context_used = mono_class_check_context_used (klass);
2830 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2831 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2833 } else if (context_used) {
2834 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2836 if (cfg->compile_aot) {
2837 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2839 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2840 mono_class_compute_gc_descriptor (klass);
2844 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2849 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2850 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2851 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2855 EMIT_NEW_ICONST (cfg, iargs [2], n);
2857 memcpy_method = get_memcpy_method ();
2858 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2863 get_memset_method (void)
2865 static MonoMethod *memset_method = NULL;
2866 if (!memset_method) {
2867 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2869 g_error ("Old corlib found. Install a new one");
2871 return memset_method;
2875 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2877 MonoInst *iargs [3];
2880 MonoMethod *memset_method;
2882 /* FIXME: Optimize this for the case when dest is an LDADDR */
2884 mono_class_init (klass);
2885 n = mono_class_value_size (klass, &align);
2887 if (n <= sizeof (gpointer) * 5) {
2888 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2891 memset_method = get_memset_method ();
2893 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2894 EMIT_NEW_ICONST (cfg, iargs [2], n);
2895 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2900 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2902 MonoInst *this = NULL;
2904 g_assert (cfg->generic_sharing_context);
2906 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2907 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2908 !method->klass->valuetype)
2909 EMIT_NEW_ARGLOAD (cfg, this, 0);
2911 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2912 MonoInst *mrgctx_loc, *mrgctx_var;
2915 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2917 mrgctx_loc = mono_get_vtable_var (cfg);
2918 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2921 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2922 MonoInst *vtable_loc, *vtable_var;
2926 vtable_loc = mono_get_vtable_var (cfg);
2927 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2929 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2930 MonoInst *mrgctx_var = vtable_var;
2933 vtable_reg = alloc_preg (cfg);
2934 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2935 vtable_var->type = STACK_PTR;
2943 vtable_reg = alloc_preg (cfg);
2944 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2949 static MonoJumpInfoRgctxEntry *
2950 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2952 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2953 res->method = method;
2954 res->in_mrgctx = in_mrgctx;
2955 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2956 res->data->type = patch_type;
2957 res->data->data.target = patch_data;
2958 res->info_type = info_type;
2963 static inline MonoInst*
2964 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2966 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2970 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2971 MonoClass *klass, int rgctx_type)
2973 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2974 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2976 return emit_rgctx_fetch (cfg, rgctx, entry);
2980 * emit_get_rgctx_method:
2982 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2983 * normal constants, else emit a load from the rgctx.
2986 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2987 MonoMethod *cmethod, int rgctx_type)
2989 if (!context_used) {
2992 switch (rgctx_type) {
2993 case MONO_RGCTX_INFO_METHOD:
2994 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2996 case MONO_RGCTX_INFO_METHOD_RGCTX:
2997 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3000 g_assert_not_reached ();
3003 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3004 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3006 return emit_rgctx_fetch (cfg, rgctx, entry);
3011 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3012 MonoClassField *field, int rgctx_type)
3014 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3015 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3017 return emit_rgctx_fetch (cfg, rgctx, entry);
3021 * On return the caller must check @klass for load errors.
3024 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3026 MonoInst *vtable_arg;
3028 int context_used = 0;
3030 if (cfg->generic_sharing_context)
3031 context_used = mono_class_check_context_used (klass);
3034 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3035 klass, MONO_RGCTX_INFO_VTABLE);
3037 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3041 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3044 if (COMPILE_LLVM (cfg))
3045 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3047 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3048 #ifdef MONO_ARCH_VTABLE_REG
3049 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3050 cfg->uses_vtable_reg = TRUE;
3057 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3059 if (mini_get_debug_options ()->better_cast_details) {
3060 int to_klass_reg = alloc_preg (cfg);
3061 int vtable_reg = alloc_preg (cfg);
3062 int klass_reg = alloc_preg (cfg);
3063 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3066 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3070 MONO_ADD_INS (cfg->cbb, tls_get);
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3074 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3075 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3076 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3081 reset_cast_details (MonoCompile *cfg)
3083 /* Reset the variables holding the cast details */
3084 if (mini_get_debug_options ()->better_cast_details) {
3085 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3087 MONO_ADD_INS (cfg->cbb, tls_get);
3088 /* It is enough to reset the from field */
3089 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3094 * On return the caller must check @array_class for load errors
3097 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3099 int vtable_reg = alloc_preg (cfg);
3100 int context_used = 0;
3102 if (cfg->generic_sharing_context)
3103 context_used = mono_class_check_context_used (array_class);
3105 save_cast_details (cfg, array_class, obj->dreg);
3107 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3109 if (cfg->opt & MONO_OPT_SHARED) {
3110 int class_reg = alloc_preg (cfg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3112 if (cfg->compile_aot) {
3113 int klass_reg = alloc_preg (cfg);
3114 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3115 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3119 } else if (context_used) {
3120 MonoInst *vtable_ins;
3122 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3123 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3125 if (cfg->compile_aot) {
3129 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3131 vt_reg = alloc_preg (cfg);
3132 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3133 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3136 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3142 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3144 reset_cast_details (cfg);
3148 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3149 * generic code is generated.
3152 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3154 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3157 MonoInst *rgctx, *addr;
3159 /* FIXME: What if the class is shared? We might not
3160 have to get the address of the method from the
3162 addr = emit_get_rgctx_method (cfg, context_used, method,
3163 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3165 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3167 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3169 return mono_emit_method_call (cfg, method, &val, NULL);
3174 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3178 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3179 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3180 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3181 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3183 obj_reg = sp [0]->dreg;
3184 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3185 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3187 /* FIXME: generics */
3188 g_assert (klass->rank == 0);
3191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3192 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3198 MonoInst *element_class;
3200 /* This assertion is from the unboxcast insn */
3201 g_assert (klass->rank == 0);
3203 element_class = emit_get_rgctx_klass (cfg, context_used,
3204 klass->element_class, MONO_RGCTX_INFO_KLASS);
3206 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3207 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3209 save_cast_details (cfg, klass->element_class, obj_reg);
3210 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3211 reset_cast_details (cfg);
3214 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3215 MONO_ADD_INS (cfg->cbb, add);
3216 add->type = STACK_MP;
3223 * Returns NULL and set the cfg exception on error.
3226 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3228 MonoInst *iargs [2];
3234 MonoInst *iargs [2];
3237 FIXME: we cannot get managed_alloc here because we can't get
3238 the class's vtable (because it's not a closed class)
3240 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3241 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3244 if (cfg->opt & MONO_OPT_SHARED)
3245 rgctx_info = MONO_RGCTX_INFO_KLASS;
3247 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3248 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3250 if (cfg->opt & MONO_OPT_SHARED) {
3251 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3253 alloc_ftn = mono_object_new;
3256 alloc_ftn = mono_object_new_specific;
3259 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3262 if (cfg->opt & MONO_OPT_SHARED) {
3263 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3264 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3266 alloc_ftn = mono_object_new;
3267 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3268 /* This happens often in argument checking code, eg. throw new FooException... */
3269 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3270 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3271 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3273 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3274 MonoMethod *managed_alloc = NULL;
3278 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3279 cfg->exception_ptr = klass;
3283 #ifndef MONO_CROSS_COMPILE
3284 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3287 if (managed_alloc) {
3288 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3289 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3291 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3293 guint32 lw = vtable->klass->instance_size;
3294 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3295 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3296 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3299 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3303 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3307 * Returns NULL and set the cfg exception on error.
3310 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3312 MonoInst *alloc, *ins;
3314 if (mono_class_is_nullable (klass)) {
3315 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3318 /* FIXME: What if the class is shared? We might not
3319 have to get the method address from the RGCTX. */
3320 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3321 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3322 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3324 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3326 return mono_emit_method_call (cfg, method, &val, NULL);
3330 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3341 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3344 MonoGenericContainer *container;
3345 MonoGenericInst *ginst;
3347 if (klass->generic_class) {
3348 container = klass->generic_class->container_class->generic_container;
3349 ginst = klass->generic_class->context.class_inst;
3350 } else if (klass->generic_container && context_used) {
3351 container = klass->generic_container;
3352 ginst = container->context.class_inst;
3357 for (i = 0; i < container->type_argc; ++i) {
3359 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3361 type = ginst->type_argv [i];
3362 if (MONO_TYPE_IS_REFERENCE (type))
3365 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3371 // FIXME: This doesn't work yet (class libs tests fail?)
3372 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3375 * Returns NULL and set the cfg exception on error.
3378 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3380 MonoBasicBlock *is_null_bb;
3381 int obj_reg = src->dreg;
3382 int vtable_reg = alloc_preg (cfg);
3383 MonoInst *klass_inst = NULL;
3388 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3389 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3390 MonoInst *cache_ins;
3392 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3397 /* klass - it's the second element of the cache entry*/
3398 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3401 args [2] = cache_ins;
3403 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3406 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3408 if (is_complex_isinst (klass)) {
3409 /* Complex case, handle by an icall */
3415 args [1] = klass_inst;
3417 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3419 /* Simple case, handled by the code below */
3423 NEW_BBLOCK (cfg, is_null_bb);
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3428 save_cast_details (cfg, klass, obj_reg);
3430 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3432 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3434 int klass_reg = alloc_preg (cfg);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3438 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3439 /* the remoting code is broken, access the class for now */
3440 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3441 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3443 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3444 cfg->exception_ptr = klass;
3447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3452 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3455 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3459 MONO_START_BB (cfg, is_null_bb);
3461 reset_cast_details (cfg);
3467 * Returns NULL and set the cfg exception on error.
3470 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3473 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3474 int obj_reg = src->dreg;
3475 int vtable_reg = alloc_preg (cfg);
3476 int res_reg = alloc_ireg_ref (cfg);
3477 MonoInst *klass_inst = NULL;
3482 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3483 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3484 MonoInst *cache_ins;
3486 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3491 /* klass - it's the second element of the cache entry*/
3492 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3495 args [2] = cache_ins;
3497 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3500 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3502 if (is_complex_isinst (klass)) {
3503 /* Complex case, handle by an icall */
3509 args [1] = klass_inst;
3511 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3513 /* Simple case, the code below can handle it */
3517 NEW_BBLOCK (cfg, is_null_bb);
3518 NEW_BBLOCK (cfg, false_bb);
3519 NEW_BBLOCK (cfg, end_bb);
3521 /* Do the assignment at the beginning, so the other assignment can be if converted */
3522 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3523 ins->type = STACK_OBJ;
3526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3531 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3532 g_assert (!context_used);
3533 /* the is_null_bb target simply copies the input register to the output */
3534 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3536 int klass_reg = alloc_preg (cfg);
3539 int rank_reg = alloc_preg (cfg);
3540 int eclass_reg = alloc_preg (cfg);
3542 g_assert (!context_used);
3543 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3548 if (klass->cast_class == mono_defaults.object_class) {
3549 int parent_reg = alloc_preg (cfg);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3551 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3552 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3554 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3555 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3556 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3558 } else if (klass->cast_class == mono_defaults.enum_class) {
3559 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3561 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3562 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3564 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3565 /* Check that the object is a vector too */
3566 int bounds_reg = alloc_preg (cfg);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3572 /* the is_null_bb target simply copies the input register to the output */
3573 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3575 } else if (mono_class_is_nullable (klass)) {
3576 g_assert (!context_used);
3577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3578 /* the is_null_bb target simply copies the input register to the output */
3579 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3581 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3582 g_assert (!context_used);
3583 /* the remoting code is broken, access the class for now */
3584 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3585 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3587 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3588 cfg->exception_ptr = klass;
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3600 /* the is_null_bb target simply copies the input register to the output */
3601 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3606 MONO_START_BB (cfg, false_bb);
3608 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3609 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3611 MONO_START_BB (cfg, is_null_bb);
3613 MONO_START_BB (cfg, end_bb);
3619 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3621 /* This opcode takes as input an object reference and a class, and returns:
3622 0) if the object is an instance of the class,
3623 1) if the object is not instance of the class,
3624 2) if the object is a proxy whose type cannot be determined */
3627 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3628 int obj_reg = src->dreg;
3629 int dreg = alloc_ireg (cfg);
3631 int klass_reg = alloc_preg (cfg);
3633 NEW_BBLOCK (cfg, true_bb);
3634 NEW_BBLOCK (cfg, false_bb);
3635 NEW_BBLOCK (cfg, false2_bb);
3636 NEW_BBLOCK (cfg, end_bb);
3637 NEW_BBLOCK (cfg, no_proxy_bb);
3639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3642 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3643 NEW_BBLOCK (cfg, interface_fail_bb);
3645 tmp_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3647 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3648 MONO_START_BB (cfg, interface_fail_bb);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3651 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3653 tmp_reg = alloc_preg (cfg);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3655 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3658 tmp_reg = alloc_preg (cfg);
3659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3662 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3663 tmp_reg = alloc_preg (cfg);
3664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3667 tmp_reg = alloc_preg (cfg);
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3672 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3675 MONO_START_BB (cfg, no_proxy_bb);
3677 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3680 MONO_START_BB (cfg, false_bb);
3682 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3685 MONO_START_BB (cfg, false2_bb);
3687 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3690 MONO_START_BB (cfg, true_bb);
3692 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3694 MONO_START_BB (cfg, end_bb);
3697 MONO_INST_NEW (cfg, ins, OP_ICONST);
3699 ins->type = STACK_I4;
3705 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3707 /* This opcode takes as input an object reference and a class, and returns:
3708 0) if the object is an instance of the class,
3709 1) if the object is a proxy whose type cannot be determined
3710 an InvalidCastException exception is thrown otherwhise*/
3713 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3714 int obj_reg = src->dreg;
3715 int dreg = alloc_ireg (cfg);
3716 int tmp_reg = alloc_preg (cfg);
3717 int klass_reg = alloc_preg (cfg);
3719 NEW_BBLOCK (cfg, end_bb);
3720 NEW_BBLOCK (cfg, ok_result_bb);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3725 save_cast_details (cfg, klass, obj_reg);
3727 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3728 NEW_BBLOCK (cfg, interface_fail_bb);
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3731 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3732 MONO_START_BB (cfg, interface_fail_bb);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3735 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3737 tmp_reg = alloc_preg (cfg);
3738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3740 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3742 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3746 NEW_BBLOCK (cfg, no_proxy_bb);
3748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3750 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3752 tmp_reg = alloc_preg (cfg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3756 tmp_reg = alloc_preg (cfg);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3761 NEW_BBLOCK (cfg, fail_1_bb);
3763 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3765 MONO_START_BB (cfg, fail_1_bb);
3767 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3770 MONO_START_BB (cfg, no_proxy_bb);
3772 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3775 MONO_START_BB (cfg, ok_result_bb);
3777 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3779 MONO_START_BB (cfg, end_bb);
3782 MONO_INST_NEW (cfg, ins, OP_ICONST);
3784 ins->type = STACK_I4;
3790 * Returns NULL and set the cfg exception on error.
3792 static G_GNUC_UNUSED MonoInst*
3793 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3797 gpointer *trampoline;
3798 MonoInst *obj, *method_ins, *tramp_ins;
3802 obj = handle_alloc (cfg, klass, FALSE, 0);
3806 /* Inline the contents of mono_delegate_ctor */
3808 /* Set target field */
3809 /* Optimize away setting of NULL target */
3810 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3812 if (cfg->gen_write_barriers) {
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3815 emit_write_barrier (cfg, ptr, target, 0);
3819 /* Set method field */
3820 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3822 if (cfg->gen_write_barriers) {
3823 dreg = alloc_preg (cfg);
3824 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3825 emit_write_barrier (cfg, ptr, method_ins, 0);
3828 * To avoid looking up the compiled code belonging to the target method
3829 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3830 * store it, and we fill it after the method has been compiled.
3832 if (!cfg->compile_aot && !method->dynamic) {
3833 MonoInst *code_slot_ins;
3836 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3838 domain = mono_domain_get ();
3839 mono_domain_lock (domain);
3840 if (!domain_jit_info (domain)->method_code_hash)
3841 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3842 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3844 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3845 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3847 mono_domain_unlock (domain);
3849 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3854 /* Set invoke_impl field */
3855 if (cfg->compile_aot) {
3856 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3858 trampoline = mono_create_delegate_trampoline (klass);
3859 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3863 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3869 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3871 MonoJitICallInfo *info;
3873 /* Need to register the icall so it gets an icall wrapper */
3874 info = mono_get_array_new_va_icall (rank);
3876 cfg->flags |= MONO_CFG_HAS_VARARGS;
3878 /* mono_array_new_va () needs a vararg calling convention */
3879 cfg->disable_llvm = TRUE;
3881 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3882 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3886 mono_emit_load_got_addr (MonoCompile *cfg)
3888 MonoInst *getaddr, *dummy_use;
3890 if (!cfg->got_var || cfg->got_var_allocated)
3893 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3894 getaddr->dreg = cfg->got_var->dreg;
3896 /* Add it to the start of the first bblock */
3897 if (cfg->bb_entry->code) {
3898 getaddr->next = cfg->bb_entry->code;
3899 cfg->bb_entry->code = getaddr;
3902 MONO_ADD_INS (cfg->bb_entry, getaddr);
3904 cfg->got_var_allocated = TRUE;
3907 * Add a dummy use to keep the got_var alive, since real uses might
3908 * only be generated by the back ends.
3909 * Add it to end_bblock, so the variable's lifetime covers the whole
3911 * It would be better to make the usage of the got var explicit in all
3912 * cases when the backend needs it (i.e. calls, throw etc.), so this
3913 * wouldn't be needed.
3915 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3916 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3919 static int inline_limit;
3920 static gboolean inline_limit_inited;
3923 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3925 MonoMethodHeaderSummary header;
3927 #ifdef MONO_ARCH_SOFT_FLOAT
3928 MonoMethodSignature *sig = mono_method_signature (method);
3932 if (cfg->generic_sharing_context)
3935 if (cfg->inline_depth > 10)
3938 #ifdef MONO_ARCH_HAVE_LMF_OPS
3939 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3940 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3941 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3946 if (!mono_method_get_header_summary (method, &header))
3949 /*runtime, icall and pinvoke are checked by summary call*/
3950 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3951 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3952 (method->klass->marshalbyref) ||
3956 /* also consider num_locals? */
3957 /* Do the size check early to avoid creating vtables */
3958 if (!inline_limit_inited) {
3959 if (getenv ("MONO_INLINELIMIT"))
3960 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3962 inline_limit = INLINE_LENGTH_LIMIT;
3963 inline_limit_inited = TRUE;
3965 if (header.code_size >= inline_limit)
3969 * if we can initialize the class of the method right away, we do,
3970 * otherwise we don't allow inlining if the class needs initialization,
3971 * since it would mean inserting a call to mono_runtime_class_init()
3972 * inside the inlined code
3974 if (!(cfg->opt & MONO_OPT_SHARED)) {
3975 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3976 if (cfg->run_cctors && method->klass->has_cctor) {
3977 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3978 if (!method->klass->runtime_info)
3979 /* No vtable created yet */
3981 vtable = mono_class_vtable (cfg->domain, method->klass);
3984 /* This makes so that inline cannot trigger */
3985 /* .cctors: too many apps depend on them */
3986 /* running with a specific order... */
3987 if (! vtable->initialized)
3989 mono_runtime_class_init (vtable);
3991 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3992 if (!method->klass->runtime_info)
3993 /* No vtable created yet */
3995 vtable = mono_class_vtable (cfg->domain, method->klass);
3998 if (!vtable->initialized)
4003 * If we're compiling for shared code
4004 * the cctor will need to be run at aot method load time, for example,
4005 * or at the end of the compilation of the inlining method.
4007 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4012 * CAS - do not inline methods with declarative security
4013 * Note: this has to be before any possible return TRUE;
4015 if (mono_method_has_declsec (method))
4018 #ifdef MONO_ARCH_SOFT_FLOAT
4020 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4022 for (i = 0; i < sig->param_count; ++i)
4023 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4031 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4033 if (vtable->initialized && !cfg->compile_aot)
4036 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4039 if (!mono_class_needs_cctor_run (vtable->klass, method))
4042 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4043 /* The initialization is already done before the method is called */
4050 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4054 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4056 mono_class_init (klass);
4057 size = mono_class_array_element_size (klass);
4059 mult_reg = alloc_preg (cfg);
4060 array_reg = arr->dreg;
4061 index_reg = index->dreg;
4063 #if SIZEOF_REGISTER == 8
4064 /* The array reg is 64 bits but the index reg is only 32 */
4065 if (COMPILE_LLVM (cfg)) {
4067 index2_reg = index_reg;
4069 index2_reg = alloc_preg (cfg);
4070 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4073 if (index->type == STACK_I8) {
4074 index2_reg = alloc_preg (cfg);
4075 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4077 index2_reg = index_reg;
4082 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4085 if (size == 1 || size == 2 || size == 4 || size == 8) {
4086 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4088 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4089 ins->klass = mono_class_get_element_class (klass);
4090 ins->type = STACK_MP;
4096 add_reg = alloc_ireg_mp (cfg);
4098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4099 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4100 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4101 ins->klass = mono_class_get_element_class (klass);
4102 ins->type = STACK_MP;
4103 MONO_ADD_INS (cfg->cbb, ins);
4108 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4110 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4112 int bounds_reg = alloc_preg (cfg);
4113 int add_reg = alloc_ireg_mp (cfg);
4114 int mult_reg = alloc_preg (cfg);
4115 int mult2_reg = alloc_preg (cfg);
4116 int low1_reg = alloc_preg (cfg);
4117 int low2_reg = alloc_preg (cfg);
4118 int high1_reg = alloc_preg (cfg);
4119 int high2_reg = alloc_preg (cfg);
4120 int realidx1_reg = alloc_preg (cfg);
4121 int realidx2_reg = alloc_preg (cfg);
4122 int sum_reg = alloc_preg (cfg);
4127 mono_class_init (klass);
4128 size = mono_class_array_element_size (klass);
4130 index1 = index_ins1->dreg;
4131 index2 = index_ins2->dreg;
4133 /* range checking */
4134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4135 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4138 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4141 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4143 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4145 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4146 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4147 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4149 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4150 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4151 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4153 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4154 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4156 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4157 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4159 ins->type = STACK_MP;
4161 MONO_ADD_INS (cfg->cbb, ins);
4168 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4172 MonoMethod *addr_method;
4175 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4178 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4180 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4181 /* emit_ldelema_2 depends on OP_LMUL */
4182 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4183 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4187 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4188 addr_method = mono_marshal_get_array_address (rank, element_size);
4189 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4194 static MonoBreakPolicy
4195 always_insert_breakpoint (MonoMethod *method)
4197 return MONO_BREAK_POLICY_ALWAYS;
4200 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4203 * mono_set_break_policy:
4204 * policy_callback: the new callback function
4206 * Allow embedders to decide wherther to actually obey breakpoint instructions
4207 * (both break IL instructions and Debugger.Break () method calls), for example
4208 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4209 * untrusted or semi-trusted code.
4211 * @policy_callback will be called every time a break point instruction needs to
4212 * be inserted with the method argument being the method that calls Debugger.Break()
4213 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4214 * if it wants the breakpoint to not be effective in the given method.
4215 * #MONO_BREAK_POLICY_ALWAYS is the default.
4218 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4220 if (policy_callback)
4221 break_policy_func = policy_callback;
4223 break_policy_func = always_insert_breakpoint;
4227 should_insert_brekpoint (MonoMethod *method) {
4228 switch (break_policy_func (method)) {
4229 case MONO_BREAK_POLICY_ALWAYS:
4231 case MONO_BREAK_POLICY_NEVER:
4233 case MONO_BREAK_POLICY_ON_DBG:
4234 return mono_debug_using_mono_debugger ();
4236 g_warning ("Incorrect value returned from break policy callback");
4241 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4243 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4245 MonoInst *addr, *store, *load;
4246 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4248 /* the bounds check is already done by the callers */
4249 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4252 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4261 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4263 MonoInst *ins = NULL;
4264 #ifdef MONO_ARCH_SIMD_INTRINSICS
4265 if (cfg->opt & MONO_OPT_SIMD) {
4266 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4276 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4278 MonoInst *ins = NULL;
4280 static MonoClass *runtime_helpers_class = NULL;
4281 if (! runtime_helpers_class)
4282 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4283 "System.Runtime.CompilerServices", "RuntimeHelpers");
4285 if (cmethod->klass == mono_defaults.string_class) {
4286 if (strcmp (cmethod->name, "get_Chars") == 0) {
4287 int dreg = alloc_ireg (cfg);
4288 int index_reg = alloc_preg (cfg);
4289 int mult_reg = alloc_preg (cfg);
4290 int add_reg = alloc_preg (cfg);
4292 #if SIZEOF_REGISTER == 8
4293 /* The array reg is 64 bits but the index reg is only 32 */
4294 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4296 index_reg = args [1]->dreg;
4298 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4301 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4302 add_reg = ins->dreg;
4303 /* Avoid a warning */
4305 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4309 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4310 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4311 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4313 type_from_op (ins, NULL, NULL);
4315 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4316 int dreg = alloc_ireg (cfg);
4317 /* Decompose later to allow more optimizations */
4318 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4319 ins->type = STACK_I4;
4320 ins->flags |= MONO_INST_FAULT;
4321 cfg->cbb->has_array_access = TRUE;
4322 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4325 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4326 int mult_reg = alloc_preg (cfg);
4327 int add_reg = alloc_preg (cfg);
4329 /* The corlib functions check for oob already. */
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4331 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4333 return cfg->cbb->last_ins;
4336 } else if (cmethod->klass == mono_defaults.object_class) {
4338 if (strcmp (cmethod->name, "GetType") == 0) {
4339 int dreg = alloc_ireg_ref (cfg);
4340 int vt_reg = alloc_preg (cfg);
4341 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4342 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4343 type_from_op (ins, NULL, NULL);
4346 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4347 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4348 int dreg = alloc_ireg (cfg);
4349 int t1 = alloc_ireg (cfg);
4351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4352 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4353 ins->type = STACK_I4;
4357 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4358 MONO_INST_NEW (cfg, ins, OP_NOP);
4359 MONO_ADD_INS (cfg->cbb, ins);
4363 } else if (cmethod->klass == mono_defaults.array_class) {
4364 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4365 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4367 #ifndef MONO_BIG_ARRAYS
4369 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4372 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4373 int dreg = alloc_ireg (cfg);
4374 int bounds_reg = alloc_ireg_mp (cfg);
4375 MonoBasicBlock *end_bb, *szarray_bb;
4376 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4378 NEW_BBLOCK (cfg, end_bb);
4379 NEW_BBLOCK (cfg, szarray_bb);
4381 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4382 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4385 /* Non-szarray case */
4387 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4388 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4390 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4391 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4393 MONO_START_BB (cfg, szarray_bb);
4396 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4397 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4399 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4400 MONO_START_BB (cfg, end_bb);
4402 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4403 ins->type = STACK_I4;
4409 if (cmethod->name [0] != 'g')
4412 if (strcmp (cmethod->name, "get_Rank") == 0) {
4413 int dreg = alloc_ireg (cfg);
4414 int vtable_reg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4416 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4417 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4418 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4419 type_from_op (ins, NULL, NULL);
4422 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4423 int dreg = alloc_ireg (cfg);
4425 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4426 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4427 type_from_op (ins, NULL, NULL);
4432 } else if (cmethod->klass == runtime_helpers_class) {
4434 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4435 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4439 } else if (cmethod->klass == mono_defaults.thread_class) {
4440 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4441 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4442 MONO_ADD_INS (cfg->cbb, ins);
4444 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4445 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4446 MONO_ADD_INS (cfg->cbb, ins);
4449 } else if (cmethod->klass == mono_defaults.monitor_class) {
4450 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4451 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4454 if (COMPILE_LLVM (cfg)) {
4456 * Pass the argument normally, the LLVM backend will handle the
4457 * calling convention problems.
4459 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4461 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4462 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4463 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4464 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4467 return (MonoInst*)call;
4468 } else if (strcmp (cmethod->name, "Exit") == 0) {
4471 if (COMPILE_LLVM (cfg)) {
4472 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4474 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4475 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4476 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4477 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4480 return (MonoInst*)call;
4482 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4483 MonoMethod *fast_method = NULL;
4485 /* Avoid infinite recursion */
4486 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4487 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4488 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4491 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4492 strcmp (cmethod->name, "Exit") == 0)
4493 fast_method = mono_monitor_get_fast_path (cmethod);
4497 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4499 } else if (cmethod->klass->image == mono_defaults.corlib &&
4500 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4501 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4504 #if SIZEOF_REGISTER == 8
4505 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4506 /* 64 bit reads are already atomic */
4507 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4508 ins->dreg = mono_alloc_preg (cfg);
4509 ins->inst_basereg = args [0]->dreg;
4510 ins->inst_offset = 0;
4511 MONO_ADD_INS (cfg->cbb, ins);
4515 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4516 if (strcmp (cmethod->name, "Increment") == 0) {
4517 MonoInst *ins_iconst;
4520 if (fsig->params [0]->type == MONO_TYPE_I4)
4521 opcode = OP_ATOMIC_ADD_NEW_I4;
4522 #if SIZEOF_REGISTER == 8
4523 else if (fsig->params [0]->type == MONO_TYPE_I8)
4524 opcode = OP_ATOMIC_ADD_NEW_I8;
4527 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4528 ins_iconst->inst_c0 = 1;
4529 ins_iconst->dreg = mono_alloc_ireg (cfg);
4530 MONO_ADD_INS (cfg->cbb, ins_iconst);
4532 MONO_INST_NEW (cfg, ins, opcode);
4533 ins->dreg = mono_alloc_ireg (cfg);
4534 ins->inst_basereg = args [0]->dreg;
4535 ins->inst_offset = 0;
4536 ins->sreg2 = ins_iconst->dreg;
4537 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4538 MONO_ADD_INS (cfg->cbb, ins);
4540 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4541 MonoInst *ins_iconst;
4544 if (fsig->params [0]->type == MONO_TYPE_I4)
4545 opcode = OP_ATOMIC_ADD_NEW_I4;
4546 #if SIZEOF_REGISTER == 8
4547 else if (fsig->params [0]->type == MONO_TYPE_I8)
4548 opcode = OP_ATOMIC_ADD_NEW_I8;
4551 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4552 ins_iconst->inst_c0 = -1;
4553 ins_iconst->dreg = mono_alloc_ireg (cfg);
4554 MONO_ADD_INS (cfg->cbb, ins_iconst);
4556 MONO_INST_NEW (cfg, ins, opcode);
4557 ins->dreg = mono_alloc_ireg (cfg);
4558 ins->inst_basereg = args [0]->dreg;
4559 ins->inst_offset = 0;
4560 ins->sreg2 = ins_iconst->dreg;
4561 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4562 MONO_ADD_INS (cfg->cbb, ins);
4564 } else if (strcmp (cmethod->name, "Add") == 0) {
4567 if (fsig->params [0]->type == MONO_TYPE_I4)
4568 opcode = OP_ATOMIC_ADD_NEW_I4;
4569 #if SIZEOF_REGISTER == 8
4570 else if (fsig->params [0]->type == MONO_TYPE_I8)
4571 opcode = OP_ATOMIC_ADD_NEW_I8;
4575 MONO_INST_NEW (cfg, ins, opcode);
4576 ins->dreg = mono_alloc_ireg (cfg);
4577 ins->inst_basereg = args [0]->dreg;
4578 ins->inst_offset = 0;
4579 ins->sreg2 = args [1]->dreg;
4580 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4581 MONO_ADD_INS (cfg->cbb, ins);
4584 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4586 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4587 if (strcmp (cmethod->name, "Exchange") == 0) {
4589 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4591 if (fsig->params [0]->type == MONO_TYPE_I4)
4592 opcode = OP_ATOMIC_EXCHANGE_I4;
4593 #if SIZEOF_REGISTER == 8
4594 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4595 (fsig->params [0]->type == MONO_TYPE_I))
4596 opcode = OP_ATOMIC_EXCHANGE_I8;
4598 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4599 opcode = OP_ATOMIC_EXCHANGE_I4;
4604 MONO_INST_NEW (cfg, ins, opcode);
4605 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4606 ins->inst_basereg = args [0]->dreg;
4607 ins->inst_offset = 0;
4608 ins->sreg2 = args [1]->dreg;
4609 MONO_ADD_INS (cfg->cbb, ins);
4611 switch (fsig->params [0]->type) {
4613 ins->type = STACK_I4;
4617 ins->type = STACK_I8;
4619 case MONO_TYPE_OBJECT:
4620 ins->type = STACK_OBJ;
4623 g_assert_not_reached ();
4626 if (cfg->gen_write_barriers && is_ref)
4627 emit_write_barrier (cfg, args [0], args [1], -1);
4629 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4631 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4632 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4634 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4635 if (fsig->params [1]->type == MONO_TYPE_I4)
4637 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4638 size = sizeof (gpointer);
4639 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4642 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4643 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4644 ins->sreg1 = args [0]->dreg;
4645 ins->sreg2 = args [1]->dreg;
4646 ins->sreg3 = args [2]->dreg;
4647 ins->type = STACK_I4;
4648 MONO_ADD_INS (cfg->cbb, ins);
4649 } else if (size == 8) {
4650 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4651 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4652 ins->sreg1 = args [0]->dreg;
4653 ins->sreg2 = args [1]->dreg;
4654 ins->sreg3 = args [2]->dreg;
4655 ins->type = STACK_I8;
4656 MONO_ADD_INS (cfg->cbb, ins);
4658 /* g_assert_not_reached (); */
4660 if (cfg->gen_write_barriers && is_ref)
4661 emit_write_barrier (cfg, args [0], args [1], -1);
4663 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4667 } else if (cmethod->klass->image == mono_defaults.corlib) {
4668 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4669 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4670 if (should_insert_brekpoint (cfg->method))
4671 MONO_INST_NEW (cfg, ins, OP_BREAK);
4673 MONO_INST_NEW (cfg, ins, OP_NOP);
4674 MONO_ADD_INS (cfg->cbb, ins);
4677 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4678 && strcmp (cmethod->klass->name, "Environment") == 0) {
4680 EMIT_NEW_ICONST (cfg, ins, 1);
4682 EMIT_NEW_ICONST (cfg, ins, 0);
4686 } else if (cmethod->klass == mono_defaults.math_class) {
4688 * There is general branches code for Min/Max, but it does not work for
4690 * http://everything2.com/?node_id=1051618
4694 #ifdef MONO_ARCH_SIMD_INTRINSICS
4695 if (cfg->opt & MONO_OPT_SIMD) {
4696 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4702 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4706 * This entry point could be used later for arbitrary method
4709 inline static MonoInst*
4710 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4711 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4713 if (method->klass == mono_defaults.string_class) {
4714 /* managed string allocation support */
4715 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4716 MonoInst *iargs [2];
4717 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4718 MonoMethod *managed_alloc = NULL;
4720 g_assert (vtable); /*Should not fail since it System.String*/
4721 #ifndef MONO_CROSS_COMPILE
4722 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4726 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4727 iargs [1] = args [0];
4728 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4735 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4737 MonoInst *store, *temp;
4740 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4741 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4744 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4745 * would be different than the MonoInst's used to represent arguments, and
4746 * the ldelema implementation can't deal with that.
4747 * Solution: When ldelema is used on an inline argument, create a var for
4748 * it, emit ldelema on that var, and emit the saving code below in
4749 * inline_method () if needed.
4751 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4752 cfg->args [i] = temp;
4753 /* This uses cfg->args [i] which is set by the preceeding line */
4754 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4755 store->cil_code = sp [0]->cil_code;
4760 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4761 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4763 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4765 check_inline_called_method_name_limit (MonoMethod *called_method)
4768 static char *limit = NULL;
4770 if (limit == NULL) {
4771 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4773 if (limit_string != NULL)
4774 limit = limit_string;
4776 limit = (char *) "";
4779 if (limit [0] != '\0') {
4780 char *called_method_name = mono_method_full_name (called_method, TRUE);
4782 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4783 g_free (called_method_name);
4785 //return (strncmp_result <= 0);
4786 return (strncmp_result == 0);
4793 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4795 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4798 static char *limit = NULL;
4800 if (limit == NULL) {
4801 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4802 if (limit_string != NULL) {
4803 limit = limit_string;
4805 limit = (char *) "";
4809 if (limit [0] != '\0') {
4810 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4812 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4813 g_free (caller_method_name);
4815 //return (strncmp_result <= 0);
4816 return (strncmp_result == 0);
4824 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4825 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4827 MonoInst *ins, *rvar = NULL;
4828 MonoMethodHeader *cheader;
4829 MonoBasicBlock *ebblock, *sbblock;
4831 MonoMethod *prev_inlined_method;
4832 MonoInst **prev_locals, **prev_args;
4833 MonoType **prev_arg_types;
4834 guint prev_real_offset;
4835 GHashTable *prev_cbb_hash;
4836 MonoBasicBlock **prev_cil_offset_to_bb;
4837 MonoBasicBlock *prev_cbb;
4838 unsigned char* prev_cil_start;
4839 guint32 prev_cil_offset_to_bb_len;
4840 MonoMethod *prev_current_method;
4841 MonoGenericContext *prev_generic_context;
4842 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4844 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4846 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4847 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4850 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4851 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4855 if (cfg->verbose_level > 2)
4856 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4858 if (!cmethod->inline_info) {
4859 mono_jit_stats.inlineable_methods++;
4860 cmethod->inline_info = 1;
4863 /* allocate local variables */
4864 cheader = mono_method_get_header (cmethod);
4866 if (cheader == NULL || mono_loader_get_last_error ()) {
4867 MonoLoaderError *error = mono_loader_get_last_error ();
4870 mono_metadata_free_mh (cheader);
4871 if (inline_always && error)
4872 mono_cfg_set_exception (cfg, error->exception_type);
4874 mono_loader_clear_error ();
4878 /*Must verify before creating locals as it can cause the JIT to assert.*/
4879 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4880 mono_metadata_free_mh (cheader);
4884 /* allocate space to store the return value */
4885 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4886 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4889 prev_locals = cfg->locals;
4890 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4891 for (i = 0; i < cheader->num_locals; ++i)
4892 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4894 /* allocate start and end blocks */
4895 /* This is needed so if the inline is aborted, we can clean up */
4896 NEW_BBLOCK (cfg, sbblock);
4897 sbblock->real_offset = real_offset;
4899 NEW_BBLOCK (cfg, ebblock);
4900 ebblock->block_num = cfg->num_bblocks++;
4901 ebblock->real_offset = real_offset;
4903 prev_args = cfg->args;
4904 prev_arg_types = cfg->arg_types;
4905 prev_inlined_method = cfg->inlined_method;
4906 cfg->inlined_method = cmethod;
4907 cfg->ret_var_set = FALSE;
4908 cfg->inline_depth ++;
4909 prev_real_offset = cfg->real_offset;
4910 prev_cbb_hash = cfg->cbb_hash;
4911 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4912 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4913 prev_cil_start = cfg->cil_start;
4914 prev_cbb = cfg->cbb;
4915 prev_current_method = cfg->current_method;
4916 prev_generic_context = cfg->generic_context;
4917 prev_ret_var_set = cfg->ret_var_set;
4919 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4922 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4924 ret_var_set = cfg->ret_var_set;
4926 cfg->inlined_method = prev_inlined_method;
4927 cfg->real_offset = prev_real_offset;
4928 cfg->cbb_hash = prev_cbb_hash;
4929 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4930 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4931 cfg->cil_start = prev_cil_start;
4932 cfg->locals = prev_locals;
4933 cfg->args = prev_args;
4934 cfg->arg_types = prev_arg_types;
4935 cfg->current_method = prev_current_method;
4936 cfg->generic_context = prev_generic_context;
4937 cfg->ret_var_set = prev_ret_var_set;
4938 cfg->inline_depth --;
4940 if ((costs >= 0 && costs < 60) || inline_always) {
4941 if (cfg->verbose_level > 2)
4942 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4944 mono_jit_stats.inlined_methods++;
4946 /* always add some code to avoid block split failures */
4947 MONO_INST_NEW (cfg, ins, OP_NOP);
4948 MONO_ADD_INS (prev_cbb, ins);
4950 prev_cbb->next_bb = sbblock;
4951 link_bblock (cfg, prev_cbb, sbblock);
4954 * Get rid of the begin and end bblocks if possible to aid local
4957 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4959 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4960 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4962 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4963 MonoBasicBlock *prev = ebblock->in_bb [0];
4964 mono_merge_basic_blocks (cfg, prev, ebblock);
4966 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4967 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4968 cfg->cbb = prev_cbb;
4976 * If the inlined method contains only a throw, then the ret var is not
4977 * set, so set it to a dummy value.
4980 static double r8_0 = 0.0;
4982 switch (rvar->type) {
4984 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4987 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4992 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4995 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4996 ins->type = STACK_R8;
4997 ins->inst_p0 = (void*)&r8_0;
4998 ins->dreg = rvar->dreg;
4999 MONO_ADD_INS (cfg->cbb, ins);
5002 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5005 g_assert_not_reached ();
5009 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5012 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5015 if (cfg->verbose_level > 2)
5016 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5017 cfg->exception_type = MONO_EXCEPTION_NONE;
5018 mono_loader_clear_error ();
5020 /* This gets rid of the newly added bblocks */
5021 cfg->cbb = prev_cbb;
5023 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5028 * Some of these comments may well be out-of-date.
5029 * Design decisions: we do a single pass over the IL code (and we do bblock
5030 * splitting/merging in the few cases when it's required: a back jump to an IL
5031 * address that was not already seen as bblock starting point).
5032 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5033 * Complex operations are decomposed in simpler ones right away. We need to let the
5034 * arch-specific code peek and poke inside this process somehow (except when the
5035 * optimizations can take advantage of the full semantic info of coarse opcodes).
5036 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5037 * MonoInst->opcode initially is the IL opcode or some simplification of that
5038 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5039 * opcode with value bigger than OP_LAST.
5040 * At this point the IR can be handed over to an interpreter, a dumb code generator
5041 * or to the optimizing code generator that will translate it to SSA form.
5043 * Profiling directed optimizations.
5044 * We may compile by default with few or no optimizations and instrument the code
5045 * or the user may indicate what methods to optimize the most either in a config file
5046 * or through repeated runs where the compiler applies offline the optimizations to
5047 * each method and then decides if it was worth it.
5050 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5051 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5052 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5053 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5054 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5055 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5056 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5057 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5059 /* offset from br.s -> br like opcodes */
5060 #define BIG_BRANCH_OFFSET 13
5063 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5065 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5067 return b == NULL || b == bb;
5071 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5073 unsigned char *ip = start;
5074 unsigned char *target;
5077 MonoBasicBlock *bblock;
5078 const MonoOpcode *opcode;
5081 cli_addr = ip - start;
5082 i = mono_opcode_value ((const guint8 **)&ip, end);
5085 opcode = &mono_opcodes [i];
5086 switch (opcode->argument) {
5087 case MonoInlineNone:
5090 case MonoInlineString:
5091 case MonoInlineType:
5092 case MonoInlineField:
5093 case MonoInlineMethod:
5096 case MonoShortInlineR:
5103 case MonoShortInlineVar:
5104 case MonoShortInlineI:
5107 case MonoShortInlineBrTarget:
5108 target = start + cli_addr + 2 + (signed char)ip [1];
5109 GET_BBLOCK (cfg, bblock, target);
5112 GET_BBLOCK (cfg, bblock, ip);
5114 case MonoInlineBrTarget:
5115 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5116 GET_BBLOCK (cfg, bblock, target);
5119 GET_BBLOCK (cfg, bblock, ip);
5121 case MonoInlineSwitch: {
5122 guint32 n = read32 (ip + 1);
5125 cli_addr += 5 + 4 * n;
5126 target = start + cli_addr;
5127 GET_BBLOCK (cfg, bblock, target);
5129 for (j = 0; j < n; ++j) {
5130 target = start + cli_addr + (gint32)read32 (ip);
5131 GET_BBLOCK (cfg, bblock, target);
5141 g_assert_not_reached ();
5144 if (i == CEE_THROW) {
5145 unsigned char *bb_start = ip - 1;
5147 /* Find the start of the bblock containing the throw */
5149 while ((bb_start >= start) && !bblock) {
5150 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5154 bblock->out_of_line = 1;
5163 static inline MonoMethod *
5164 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5168 if (m->wrapper_type != MONO_WRAPPER_NONE)
5169 return mono_method_get_wrapper_data (m, token);
5171 method = mono_get_method_full (m->klass->image, token, klass, context);
5176 static inline MonoMethod *
5177 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5179 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5181 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5187 static inline MonoClass*
5188 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5192 if (method->wrapper_type != MONO_WRAPPER_NONE)
5193 klass = mono_method_get_wrapper_data (method, token);
5195 klass = mono_class_get_full (method->klass->image, token, context);
5197 mono_class_init (klass);
5202 * Returns TRUE if the JIT should abort inlining because "callee"
5203 * is influenced by security attributes.
5206 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5210 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5214 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5215 if (result == MONO_JIT_SECURITY_OK)
5218 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5219 /* Generate code to throw a SecurityException before the actual call/link */
5220 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5223 NEW_ICONST (cfg, args [0], 4);
5224 NEW_METHODCONST (cfg, args [1], caller);
5225 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5226 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5227 /* don't hide previous results */
5228 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5229 cfg->exception_data = result;
5237 throw_exception (void)
5239 static MonoMethod *method = NULL;
5242 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5243 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5250 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5252 MonoMethod *thrower = throw_exception ();
5255 EMIT_NEW_PCONST (cfg, args [0], ex);
5256 mono_emit_method_call (cfg, thrower, args, NULL);
5260 * Return the original method is a wrapper is specified. We can only access
5261 * the custom attributes from the original method.
5264 get_original_method (MonoMethod *method)
5266 if (method->wrapper_type == MONO_WRAPPER_NONE)
5269 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5270 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5273 /* in other cases we need to find the original method */
5274 return mono_marshal_method_from_wrapper (method);
5278 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5279 MonoBasicBlock *bblock, unsigned char *ip)
5281 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5282 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5284 emit_throw_exception (cfg, ex);
5288 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5289 MonoBasicBlock *bblock, unsigned char *ip)
5291 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5292 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5294 emit_throw_exception (cfg, ex);
5298 * Check that the IL instructions at ip are the array initialization
5299 * sequence and return the pointer to the data and the size.
5302 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5305 * newarr[System.Int32]
5307 * ldtoken field valuetype ...
5308 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5310 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5311 guint32 token = read32 (ip + 7);
5312 guint32 field_token = read32 (ip + 2);
5313 guint32 field_index = field_token & 0xffffff;
5315 const char *data_ptr;
5317 MonoMethod *cmethod;
5318 MonoClass *dummy_class;
5319 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5325 *out_field_token = field_token;
5327 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5330 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5332 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5333 case MONO_TYPE_BOOLEAN:
5337 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5338 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5339 case MONO_TYPE_CHAR:
5349 return NULL; /* stupid ARM FP swapped format */
5359 if (size > mono_type_size (field->type, &dummy_align))
5362 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5363 if (!method->klass->image->dynamic) {
5364 field_index = read32 (ip + 2) & 0xffffff;
5365 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5366 data_ptr = mono_image_rva_map (method->klass->image, rva);
5367 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5368 /* for aot code we do the lookup on load */
5369 if (aot && data_ptr)
5370 return GUINT_TO_POINTER (rva);
5372 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5374 data_ptr = mono_field_get_data (field);
5382 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5384 char *method_fname = mono_method_full_name (method, TRUE);
5386 MonoMethodHeader *header = mono_method_get_header (method);
5388 if (header->code_size == 0)
5389 method_code = g_strdup ("method body is empty.");
5391 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5393 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5394 g_free (method_fname);
5395 g_free (method_code);
5396 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5400 set_exception_object (MonoCompile *cfg, MonoException *exception)
5402 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5403 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5404 cfg->exception_ptr = exception;
5408 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5412 if (cfg->generic_sharing_context)
5413 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5415 type = &klass->byval_arg;
5416 return MONO_TYPE_IS_REFERENCE (type);
5420 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5423 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5424 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5425 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5426 /* Optimize reg-reg moves away */
5428 * Can't optimize other opcodes, since sp[0] might point to
5429 * the last ins of a decomposed opcode.
5431 sp [0]->dreg = (cfg)->locals [n]->dreg;
5433 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5438 * ldloca inhibits many optimizations so try to get rid of it in common
5441 static inline unsigned char *
5442 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5451 local = read16 (ip + 2);
5455 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5456 gboolean skip = FALSE;
5458 /* From the INITOBJ case */
5459 token = read32 (ip + 2);
5460 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5461 CHECK_TYPELOAD (klass);
5462 if (generic_class_is_reference_type (cfg, klass)) {
5463 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5464 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5465 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5466 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5467 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5480 is_exception_class (MonoClass *class)
5483 if (class == mono_defaults.exception_class)
5485 class = class->parent;
5491 * is_jit_optimizer_disabled:
5493 * Determine whenever M's assembly has a DebuggableAttribute with the
5494 * IsJITOptimizerDisabled flag set.
5497 is_jit_optimizer_disabled (MonoMethod *m)
5499 MonoAssembly *ass = m->klass->image->assembly;
5500 MonoCustomAttrInfo* attrs;
5501 static MonoClass *klass;
5503 gboolean val = FALSE;
5506 if (ass->jit_optimizer_disabled_inited)
5507 return ass->jit_optimizer_disabled;
5510 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5513 ass->jit_optimizer_disabled = FALSE;
5514 mono_memory_barrier ();
5515 ass->jit_optimizer_disabled_inited = TRUE;
5519 attrs = mono_custom_attrs_from_assembly (ass);
5521 for (i = 0; i < attrs->num_attrs; ++i) {
5522 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5525 MonoMethodSignature *sig;
5527 if (!attr->ctor || attr->ctor->klass != klass)
5529 /* Decode the attribute. See reflection.c */
5530 len = attr->data_size;
5531 p = (const char*)attr->data;
5532 g_assert (read16 (p) == 0x0001);
5535 // FIXME: Support named parameters
5536 sig = mono_method_signature (attr->ctor);
5537 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5539 /* Two boolean arguments */
5543 mono_custom_attrs_free (attrs);
5546 ass->jit_optimizer_disabled = val;
5547 mono_memory_barrier ();
5548 ass->jit_optimizer_disabled_inited = TRUE;
5554 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5556 gboolean supported_tail_call;
5559 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5560 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5562 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5565 for (i = 0; i < fsig->param_count; ++i) {
5566 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5567 /* These can point to the current method's stack */
5568 supported_tail_call = FALSE;
5570 if (fsig->hasthis && cmethod->klass->valuetype)
5571 /* this might point to the current method's stack */
5572 supported_tail_call = FALSE;
5573 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5574 supported_tail_call = FALSE;
5575 if (cfg->method->save_lmf)
5576 supported_tail_call = FALSE;
5577 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5578 supported_tail_call = FALSE;
5580 /* Debugging support */
5582 if (supported_tail_call) {
5583 static int count = 0;
5585 if (getenv ("COUNT")) {
5586 if (count == atoi (getenv ("COUNT")))
5587 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5588 if (count > atoi (getenv ("COUNT")))
5589 supported_tail_call = FALSE;
5594 return supported_tail_call;
5598 * mono_method_to_ir:
5600 * Translate the .net IL into linear IR.
5603 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5604 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5605 guint inline_offset, gboolean is_virtual_call)
5608 MonoInst *ins, **sp, **stack_start;
5609 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5610 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5611 MonoMethod *cmethod, *method_definition;
5612 MonoInst **arg_array;
5613 MonoMethodHeader *header;
5615 guint32 token, ins_flag;
5617 MonoClass *constrained_call = NULL;
5618 unsigned char *ip, *end, *target, *err_pos;
5619 static double r8_0 = 0.0;
5620 MonoMethodSignature *sig;
5621 MonoGenericContext *generic_context = NULL;
5622 MonoGenericContainer *generic_container = NULL;
5623 MonoType **param_types;
5624 int i, n, start_new_bblock, dreg;
5625 int num_calls = 0, inline_costs = 0;
5626 int breakpoint_id = 0;
5628 MonoBoolean security, pinvoke;
5629 MonoSecurityManager* secman = NULL;
5630 MonoDeclSecurityActions actions;
5631 GSList *class_inits = NULL;
5632 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5634 gboolean init_locals, seq_points, skip_dead_blocks;
5635 gboolean disable_inline;
5637 disable_inline = is_jit_optimizer_disabled (method);
5639 /* serialization and xdomain stuff may need access to private fields and methods */
5640 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5641 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5642 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5643 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5644 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5645 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5647 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5649 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5650 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5651 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5652 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5653 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5655 image = method->klass->image;
5656 header = mono_method_get_header (method);
5658 MonoLoaderError *error;
5660 if ((error = mono_loader_get_last_error ())) {
5661 mono_cfg_set_exception (cfg, error->exception_type);
5663 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5664 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5666 goto exception_exit;
5668 generic_container = mono_method_get_generic_container (method);
5669 sig = mono_method_signature (method);
5670 num_args = sig->hasthis + sig->param_count;
5671 ip = (unsigned char*)header->code;
5672 cfg->cil_start = ip;
5673 end = ip + header->code_size;
5674 mono_jit_stats.cil_code_size += header->code_size;
5675 init_locals = header->init_locals;
5677 seq_points = cfg->gen_seq_points && cfg->method == method;
5680 * Methods without init_locals set could cause asserts in various passes
5685 method_definition = method;
5686 while (method_definition->is_inflated) {
5687 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5688 method_definition = imethod->declaring;
5691 /* SkipVerification is not allowed if core-clr is enabled */
5692 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5694 dont_verify_stloc = TRUE;
5697 if (mono_debug_using_mono_debugger ())
5698 cfg->keep_cil_nops = TRUE;
5700 if (sig->is_inflated)
5701 generic_context = mono_method_get_context (method);
5702 else if (generic_container)
5703 generic_context = &generic_container->context;
5704 cfg->generic_context = generic_context;
5706 if (!cfg->generic_sharing_context)
5707 g_assert (!sig->has_type_parameters);
5709 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5710 g_assert (method->is_inflated);
5711 g_assert (mono_method_get_context (method)->method_inst);
5713 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5714 g_assert (sig->generic_param_count);
5716 if (cfg->method == method) {
5717 cfg->real_offset = 0;
5719 cfg->real_offset = inline_offset;
5722 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5723 cfg->cil_offset_to_bb_len = header->code_size;
5725 cfg->current_method = method;
5727 if (cfg->verbose_level > 2)
5728 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5730 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5732 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5733 for (n = 0; n < sig->param_count; ++n)
5734 param_types [n + sig->hasthis] = sig->params [n];
5735 cfg->arg_types = param_types;
5737 dont_inline = g_list_prepend (dont_inline, method);
5738 if (cfg->method == method) {
5740 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5741 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5744 NEW_BBLOCK (cfg, start_bblock);
5745 cfg->bb_entry = start_bblock;
5746 start_bblock->cil_code = NULL;
5747 start_bblock->cil_length = 0;
5748 #if defined(__native_client_codegen__)
5749 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5750 ins->dreg = alloc_dreg (cfg, STACK_I4);
5751 MONO_ADD_INS (start_bblock, ins);
5755 NEW_BBLOCK (cfg, end_bblock);
5756 cfg->bb_exit = end_bblock;
5757 end_bblock->cil_code = NULL;
5758 end_bblock->cil_length = 0;
5759 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5760 g_assert (cfg->num_bblocks == 2);
5762 arg_array = cfg->args;
5764 if (header->num_clauses) {
5765 cfg->spvars = g_hash_table_new (NULL, NULL);
5766 cfg->exvars = g_hash_table_new (NULL, NULL);
5768 /* handle exception clauses */
5769 for (i = 0; i < header->num_clauses; ++i) {
5770 MonoBasicBlock *try_bb;
5771 MonoExceptionClause *clause = &header->clauses [i];
5772 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5773 try_bb->real_offset = clause->try_offset;
5774 try_bb->try_start = TRUE;
5775 try_bb->region = ((i + 1) << 8) | clause->flags;
5776 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5777 tblock->real_offset = clause->handler_offset;
5778 tblock->flags |= BB_EXCEPTION_HANDLER;
5780 link_bblock (cfg, try_bb, tblock);
5782 if (*(ip + clause->handler_offset) == CEE_POP)
5783 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5785 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5786 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5787 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5788 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5789 MONO_ADD_INS (tblock, ins);
5792 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5793 MONO_ADD_INS (tblock, ins);
5796 /* todo: is a fault block unsafe to optimize? */
5797 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5798 tblock->flags |= BB_EXCEPTION_UNSAFE;
5802 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5804 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5806 /* catch and filter blocks get the exception object on the stack */
5807 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5808 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5809 MonoInst *dummy_use;
5811 /* mostly like handle_stack_args (), but just sets the input args */
5812 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5813 tblock->in_scount = 1;
5814 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5815 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5818 * Add a dummy use for the exvar so its liveness info will be
5822 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5824 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5825 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5826 tblock->flags |= BB_EXCEPTION_HANDLER;
5827 tblock->real_offset = clause->data.filter_offset;
5828 tblock->in_scount = 1;
5829 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5830 /* The filter block shares the exvar with the handler block */
5831 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5832 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5833 MONO_ADD_INS (tblock, ins);
5837 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5838 clause->data.catch_class &&
5839 cfg->generic_sharing_context &&
5840 mono_class_check_context_used (clause->data.catch_class)) {
5842 * In shared generic code with catch
5843 * clauses containing type variables
5844 * the exception handling code has to
5845 * be able to get to the rgctx.
5846 * Therefore we have to make sure that
5847 * the vtable/mrgctx argument (for
5848 * static or generic methods) or the
5849 * "this" argument (for non-static
5850 * methods) are live.
5852 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5853 mini_method_get_context (method)->method_inst ||
5854 method->klass->valuetype) {
5855 mono_get_vtable_var (cfg);
5857 MonoInst *dummy_use;
5859 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5864 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5865 cfg->cbb = start_bblock;
5866 cfg->args = arg_array;
5867 mono_save_args (cfg, sig, inline_args);
5870 /* FIRST CODE BLOCK */
5871 NEW_BBLOCK (cfg, bblock);
5872 bblock->cil_code = ip;
5876 ADD_BBLOCK (cfg, bblock);
5878 if (cfg->method == method) {
5879 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5880 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5881 MONO_INST_NEW (cfg, ins, OP_BREAK);
5882 MONO_ADD_INS (bblock, ins);
5886 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5887 secman = mono_security_manager_get_methods ();
5889 security = (secman && mono_method_has_declsec (method));
5890 /* at this point having security doesn't mean we have any code to generate */
5891 if (security && (cfg->method == method)) {
5892 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5893 * And we do not want to enter the next section (with allocation) if we
5894 * have nothing to generate */
5895 security = mono_declsec_get_demands (method, &actions);
5898 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5899 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5901 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5902 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5903 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5905 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5906 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5910 mono_custom_attrs_free (custom);
5913 custom = mono_custom_attrs_from_class (wrapped->klass);
5914 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5918 mono_custom_attrs_free (custom);
5921 /* not a P/Invoke after all */
5926 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5927 /* we use a separate basic block for the initialization code */
5928 NEW_BBLOCK (cfg, init_localsbb);
5929 cfg->bb_init = init_localsbb;
5930 init_localsbb->real_offset = cfg->real_offset;
5931 start_bblock->next_bb = init_localsbb;
5932 init_localsbb->next_bb = bblock;
5933 link_bblock (cfg, start_bblock, init_localsbb);
5934 link_bblock (cfg, init_localsbb, bblock);
5936 cfg->cbb = init_localsbb;
5938 start_bblock->next_bb = bblock;
5939 link_bblock (cfg, start_bblock, bblock);
5942 /* at this point we know, if security is TRUE, that some code needs to be generated */
5943 if (security && (cfg->method == method)) {
5946 mono_jit_stats.cas_demand_generation++;
5948 if (actions.demand.blob) {
5949 /* Add code for SecurityAction.Demand */
5950 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5951 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5952 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5953 mono_emit_method_call (cfg, secman->demand, args, NULL);
5955 if (actions.noncasdemand.blob) {
5956 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5957 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5958 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5959 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5960 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5961 mono_emit_method_call (cfg, secman->demand, args, NULL);
5963 if (actions.demandchoice.blob) {
5964 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5965 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5966 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5967 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5968 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5972 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5974 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5977 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5978 /* check if this is native code, e.g. an icall or a p/invoke */
5979 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5980 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5982 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5983 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5985 /* if this ia a native call then it can only be JITted from platform code */
5986 if ((icall || pinvk) && method->klass && method->klass->image) {
5987 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5988 MonoException *ex = icall ? mono_get_exception_security () :
5989 mono_get_exception_method_access ();
5990 emit_throw_exception (cfg, ex);
5997 if (header->code_size == 0)
6000 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6005 if (cfg->method == method)
6006 mono_debug_init_method (cfg, bblock, breakpoint_id);
6008 for (n = 0; n < header->num_locals; ++n) {
6009 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6014 /* We force the vtable variable here for all shared methods
6015 for the possibility that they might show up in a stack
6016 trace where their exact instantiation is needed. */
6017 if (cfg->generic_sharing_context && method == cfg->method) {
6018 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6019 mini_method_get_context (method)->method_inst ||
6020 method->klass->valuetype) {
6021 mono_get_vtable_var (cfg);
6023 /* FIXME: Is there a better way to do this?
6024 We need the variable live for the duration
6025 of the whole method. */
6026 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6030 /* add a check for this != NULL to inlined methods */
6031 if (is_virtual_call) {
6034 NEW_ARGLOAD (cfg, arg_ins, 0);
6035 MONO_ADD_INS (cfg->cbb, arg_ins);
6036 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6039 skip_dead_blocks = !dont_verify;
6040 if (skip_dead_blocks) {
6041 original_bb = bb = mono_basic_block_split (method, &error);
6042 if (!mono_error_ok (&error)) {
6043 mono_error_cleanup (&error);
6049 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6050 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6053 start_new_bblock = 0;
6056 if (cfg->method == method)
6057 cfg->real_offset = ip - header->code;
6059 cfg->real_offset = inline_offset;
6064 if (start_new_bblock) {
6065 bblock->cil_length = ip - bblock->cil_code;
6066 if (start_new_bblock == 2) {
6067 g_assert (ip == tblock->cil_code);
6069 GET_BBLOCK (cfg, tblock, ip);
6071 bblock->next_bb = tblock;
6074 start_new_bblock = 0;
6075 for (i = 0; i < bblock->in_scount; ++i) {
6076 if (cfg->verbose_level > 3)
6077 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6078 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6082 g_slist_free (class_inits);
6085 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6086 link_bblock (cfg, bblock, tblock);
6087 if (sp != stack_start) {
6088 handle_stack_args (cfg, stack_start, sp - stack_start);
6090 CHECK_UNVERIFIABLE (cfg);
6092 bblock->next_bb = tblock;
6095 for (i = 0; i < bblock->in_scount; ++i) {
6096 if (cfg->verbose_level > 3)
6097 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6098 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6101 g_slist_free (class_inits);
6106 if (skip_dead_blocks) {
6107 int ip_offset = ip - header->code;
6109 if (ip_offset == bb->end)
6113 int op_size = mono_opcode_size (ip, end);
6114 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6116 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6118 if (ip_offset + op_size == bb->end) {
6119 MONO_INST_NEW (cfg, ins, OP_NOP);
6120 MONO_ADD_INS (bblock, ins);
6121 start_new_bblock = 1;
6129 * Sequence points are points where the debugger can place a breakpoint.
6130 * Currently, we generate these automatically at points where the IL
6133 if (seq_points && sp == stack_start) {
6134 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6135 MONO_ADD_INS (cfg->cbb, ins);
6138 bblock->real_offset = cfg->real_offset;
6140 if ((cfg->method == method) && cfg->coverage_info) {
6141 guint32 cil_offset = ip - header->code;
6142 cfg->coverage_info->data [cil_offset].cil_code = ip;
6144 /* TODO: Use an increment here */
6145 #if defined(TARGET_X86)
6146 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6147 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6149 MONO_ADD_INS (cfg->cbb, ins);
6151 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6152 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6156 if (cfg->verbose_level > 3)
6157 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6161 if (cfg->keep_cil_nops)
6162 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6164 MONO_INST_NEW (cfg, ins, OP_NOP);
6166 MONO_ADD_INS (bblock, ins);
6169 if (should_insert_brekpoint (cfg->method))
6170 MONO_INST_NEW (cfg, ins, OP_BREAK);
6172 MONO_INST_NEW (cfg, ins, OP_NOP);
6174 MONO_ADD_INS (bblock, ins);
6180 CHECK_STACK_OVF (1);
6181 n = (*ip)-CEE_LDARG_0;
6183 EMIT_NEW_ARGLOAD (cfg, ins, n);
6191 CHECK_STACK_OVF (1);
6192 n = (*ip)-CEE_LDLOC_0;
6194 EMIT_NEW_LOCLOAD (cfg, ins, n);
6203 n = (*ip)-CEE_STLOC_0;
6206 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6208 emit_stloc_ir (cfg, sp, header, n);
6215 CHECK_STACK_OVF (1);
6218 EMIT_NEW_ARGLOAD (cfg, ins, n);
6224 CHECK_STACK_OVF (1);
6227 NEW_ARGLOADA (cfg, ins, n);
6228 MONO_ADD_INS (cfg->cbb, ins);
6238 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6240 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6245 CHECK_STACK_OVF (1);
6248 EMIT_NEW_LOCLOAD (cfg, ins, n);
6252 case CEE_LDLOCA_S: {
6253 unsigned char *tmp_ip;
6255 CHECK_STACK_OVF (1);
6256 CHECK_LOCAL (ip [1]);
6258 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6264 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6273 CHECK_LOCAL (ip [1]);
6274 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6276 emit_stloc_ir (cfg, sp, header, ip [1]);
6281 CHECK_STACK_OVF (1);
6282 EMIT_NEW_PCONST (cfg, ins, NULL);
6283 ins->type = STACK_OBJ;
6288 CHECK_STACK_OVF (1);
6289 EMIT_NEW_ICONST (cfg, ins, -1);
6302 CHECK_STACK_OVF (1);
6303 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6309 CHECK_STACK_OVF (1);
6311 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6317 CHECK_STACK_OVF (1);
6318 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6324 CHECK_STACK_OVF (1);
6325 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6326 ins->type = STACK_I8;
6327 ins->dreg = alloc_dreg (cfg, STACK_I8);
6329 ins->inst_l = (gint64)read64 (ip);
6330 MONO_ADD_INS (bblock, ins);
6336 gboolean use_aotconst = FALSE;
6338 #ifdef TARGET_POWERPC
6339 /* FIXME: Clean this up */
6340 if (cfg->compile_aot)
6341 use_aotconst = TRUE;
6344 /* FIXME: we should really allocate this only late in the compilation process */
6345 f = mono_domain_alloc (cfg->domain, sizeof (float));
6347 CHECK_STACK_OVF (1);
6353 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6355 dreg = alloc_freg (cfg);
6356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6357 ins->type = STACK_R8;
6359 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6360 ins->type = STACK_R8;
6361 ins->dreg = alloc_dreg (cfg, STACK_R8);
6363 MONO_ADD_INS (bblock, ins);
6373 gboolean use_aotconst = FALSE;
6375 #ifdef TARGET_POWERPC
6376 /* FIXME: Clean this up */
6377 if (cfg->compile_aot)
6378 use_aotconst = TRUE;
6381 /* FIXME: we should really allocate this only late in the compilation process */
6382 d = mono_domain_alloc (cfg->domain, sizeof (double));
6384 CHECK_STACK_OVF (1);
6390 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6392 dreg = alloc_freg (cfg);
6393 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6394 ins->type = STACK_R8;
6396 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6397 ins->type = STACK_R8;
6398 ins->dreg = alloc_dreg (cfg, STACK_R8);
6400 MONO_ADD_INS (bblock, ins);
6409 MonoInst *temp, *store;
6411 CHECK_STACK_OVF (1);
6415 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6416 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6418 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6421 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6434 if (sp [0]->type == STACK_R8)
6435 /* we need to pop the value from the x86 FP stack */
6436 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6445 if (stack_start != sp)
6447 token = read32 (ip + 1);
6448 /* FIXME: check the signature matches */
6449 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6451 if (!cmethod || mono_loader_get_last_error ())
6454 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6455 GENERIC_SHARING_FAILURE (CEE_JMP);
6457 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6458 CHECK_CFG_EXCEPTION;
6460 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6462 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6465 /* Handle tail calls similarly to calls */
6466 n = fsig->param_count + fsig->hasthis;
6468 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6469 call->method = cmethod;
6470 call->tail_call = TRUE;
6471 call->signature = mono_method_signature (cmethod);
6472 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6473 call->inst.inst_p0 = cmethod;
6474 for (i = 0; i < n; ++i)
6475 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6477 mono_arch_emit_call (cfg, call);
6478 MONO_ADD_INS (bblock, (MonoInst*)call);
6481 for (i = 0; i < num_args; ++i)
6482 /* Prevent arguments from being optimized away */
6483 arg_array [i]->flags |= MONO_INST_VOLATILE;
6485 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6486 ins = (MonoInst*)call;
6487 ins->inst_p0 = cmethod;
6488 MONO_ADD_INS (bblock, ins);
6492 start_new_bblock = 1;
6497 case CEE_CALLVIRT: {
6498 MonoInst *addr = NULL;
6499 MonoMethodSignature *fsig = NULL;
6501 int virtual = *ip == CEE_CALLVIRT;
6502 int calli = *ip == CEE_CALLI;
6503 gboolean pass_imt_from_rgctx = FALSE;
6504 MonoInst *imt_arg = NULL;
6505 gboolean pass_vtable = FALSE;
6506 gboolean pass_mrgctx = FALSE;
6507 MonoInst *vtable_arg = NULL;
6508 gboolean check_this = FALSE;
6509 gboolean supported_tail_call = FALSE;
6512 token = read32 (ip + 1);
6519 if (method->wrapper_type != MONO_WRAPPER_NONE)
6520 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6522 fsig = mono_metadata_parse_signature (image, token);
6524 n = fsig->param_count + fsig->hasthis;
6526 if (method->dynamic && fsig->pinvoke) {
6530 * This is a call through a function pointer using a pinvoke
6531 * signature. Have to create a wrapper and call that instead.
6532 * FIXME: This is very slow, need to create a wrapper at JIT time
6533 * instead based on the signature.
6535 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6536 EMIT_NEW_PCONST (cfg, args [1], fsig);
6538 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6541 MonoMethod *cil_method;
6543 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6544 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6545 cil_method = cmethod;
6546 } else if (constrained_call) {
6547 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6549 * This is needed since get_method_constrained can't find
6550 * the method in klass representing a type var.
6551 * The type var is guaranteed to be a reference type in this
6554 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6555 cil_method = cmethod;
6556 g_assert (!cmethod->klass->valuetype);
6558 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6561 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6562 cil_method = cmethod;
6565 if (!cmethod || mono_loader_get_last_error ())
6567 if (!dont_verify && !cfg->skip_visibility) {
6568 MonoMethod *target_method = cil_method;
6569 if (method->is_inflated) {
6570 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6572 if (!mono_method_can_access_method (method_definition, target_method) &&
6573 !mono_method_can_access_method (method, cil_method))
6574 METHOD_ACCESS_FAILURE;
6577 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6578 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6580 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6581 /* MS.NET seems to silently convert this to a callvirt */
6586 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6587 * converts to a callvirt.
6589 * tests/bug-515884.il is an example of this behavior
6591 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6592 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6593 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6597 if (!cmethod->klass->inited)
6598 if (!mono_class_init (cmethod->klass))
6601 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6602 mini_class_is_system_array (cmethod->klass)) {
6603 array_rank = cmethod->klass->rank;
6604 fsig = mono_method_signature (cmethod);
6606 fsig = mono_method_signature (cmethod);
6611 if (fsig->pinvoke) {
6612 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6613 check_for_pending_exc, FALSE);
6614 fsig = mono_method_signature (wrapper);
6615 } else if (constrained_call) {
6616 fsig = mono_method_signature (cmethod);
6618 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6622 mono_save_token_info (cfg, image, token, cil_method);
6624 n = fsig->param_count + fsig->hasthis;
6626 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6627 if (check_linkdemand (cfg, method, cmethod))
6629 CHECK_CFG_EXCEPTION;
6632 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6633 g_assert_not_reached ();
6636 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6639 if (!cfg->generic_sharing_context && cmethod)
6640 g_assert (!mono_method_check_context_used (cmethod));
6644 //g_assert (!virtual || fsig->hasthis);
6648 if (constrained_call) {
6650 * We have the `constrained.' prefix opcode.
6652 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6654 * The type parameter is instantiated as a valuetype,
6655 * but that type doesn't override the method we're
6656 * calling, so we need to box `this'.
6658 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6659 ins->klass = constrained_call;
6660 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6661 CHECK_CFG_EXCEPTION;
6662 } else if (!constrained_call->valuetype) {
6663 int dreg = alloc_ireg_ref (cfg);
6666 * The type parameter is instantiated as a reference
6667 * type. We have a managed pointer on the stack, so
6668 * we need to dereference it here.
6670 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6671 ins->type = STACK_OBJ;
6673 } else if (cmethod->klass->valuetype)
6675 constrained_call = NULL;
6678 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6682 * If the callee is a shared method, then its static cctor
6683 * might not get called after the call was patched.
6685 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6686 emit_generic_class_init (cfg, cmethod->klass);
6687 CHECK_TYPELOAD (cmethod->klass);
6690 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6691 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6692 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6693 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6694 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6697 * Pass vtable iff target method might
6698 * be shared, which means that sharing
6699 * is enabled for its class and its
6700 * context is sharable (and it's not a
6703 if (sharing_enabled && context_sharable &&
6704 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6708 if (cmethod && mini_method_get_context (cmethod) &&
6709 mini_method_get_context (cmethod)->method_inst) {
6710 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6711 MonoGenericContext *context = mini_method_get_context (cmethod);
6712 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6714 g_assert (!pass_vtable);
6716 if (sharing_enabled && context_sharable)
6720 if (cfg->generic_sharing_context && cmethod) {
6721 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6723 context_used = mono_method_check_context_used (cmethod);
6725 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6726 /* Generic method interface
6727 calls are resolved via a
6728 helper function and don't
6730 if (!cmethod_context || !cmethod_context->method_inst)
6731 pass_imt_from_rgctx = TRUE;
6735 * If a shared method calls another
6736 * shared method then the caller must
6737 * have a generic sharing context
6738 * because the magic trampoline
6739 * requires it. FIXME: We shouldn't
6740 * have to force the vtable/mrgctx
6741 * variable here. Instead there
6742 * should be a flag in the cfg to
6743 * request a generic sharing context.
6746 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6747 mono_get_vtable_var (cfg);
6752 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6754 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6756 CHECK_TYPELOAD (cmethod->klass);
6757 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6762 g_assert (!vtable_arg);
6764 if (!cfg->compile_aot) {
6766 * emit_get_rgctx_method () calls mono_class_vtable () so check
6767 * for type load errors before.
6769 mono_class_setup_vtable (cmethod->klass);
6770 CHECK_TYPELOAD (cmethod->klass);
6773 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6775 /* !marshalbyref is needed to properly handle generic methods + remoting */
6776 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6777 MONO_METHOD_IS_FINAL (cmethod)) &&
6778 !cmethod->klass->marshalbyref) {
6785 if (pass_imt_from_rgctx) {
6786 g_assert (!pass_vtable);
6789 imt_arg = emit_get_rgctx_method (cfg, context_used,
6790 cmethod, MONO_RGCTX_INFO_METHOD);
6794 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6796 /* Calling virtual generic methods */
6797 if (cmethod && virtual &&
6798 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6799 !(MONO_METHOD_IS_FINAL (cmethod) &&
6800 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6801 mono_method_signature (cmethod)->generic_param_count) {
6802 MonoInst *this_temp, *this_arg_temp, *store;
6803 MonoInst *iargs [4];
6805 g_assert (mono_method_signature (cmethod)->is_inflated);
6807 /* Prevent inlining of methods that contain indirect calls */
6810 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6811 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6812 g_assert (!imt_arg);
6814 g_assert (cmethod->is_inflated);
6815 imt_arg = emit_get_rgctx_method (cfg, context_used,
6816 cmethod, MONO_RGCTX_INFO_METHOD);
6817 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6821 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6822 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6823 MONO_ADD_INS (bblock, store);
6825 /* FIXME: This should be a managed pointer */
6826 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6828 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6829 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6830 cmethod, MONO_RGCTX_INFO_METHOD);
6831 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6832 addr = mono_emit_jit_icall (cfg,
6833 mono_helper_compile_generic_method, iargs);
6835 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6837 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6840 if (!MONO_TYPE_IS_VOID (fsig->ret))
6841 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6843 CHECK_CFG_EXCEPTION;
6851 * Implement a workaround for the inherent races involved in locking:
6857 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6858 * try block, the Exit () won't be executed, see:
6859 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6860 * To work around this, we extend such try blocks to include the last x bytes
6861 * of the Monitor.Enter () call.
6863 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6864 MonoBasicBlock *tbb;
6866 GET_BBLOCK (cfg, tbb, ip + 5);
6868 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6869 * from Monitor.Enter like ArgumentNullException.
6871 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6872 /* Mark this bblock as needing to be extended */
6873 tbb->extend_try_block = TRUE;
6877 /* Conversion to a JIT intrinsic */
6878 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6880 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6881 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6886 CHECK_CFG_EXCEPTION;
6894 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6895 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6896 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6897 !g_list_find (dont_inline, cmethod)) {
6899 gboolean always = FALSE;
6901 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6902 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6903 /* Prevent inlining of methods that call wrappers */
6905 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6909 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6911 cfg->real_offset += 5;
6914 if (!MONO_TYPE_IS_VOID (fsig->ret))
6915 /* *sp is already set by inline_method */
6918 inline_costs += costs;
6924 inline_costs += 10 * num_calls++;
6926 /* Tail recursion elimination */
6927 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6928 gboolean has_vtargs = FALSE;
6931 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6934 /* keep it simple */
6935 for (i = fsig->param_count - 1; i >= 0; i--) {
6936 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6941 for (i = 0; i < n; ++i)
6942 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6943 MONO_INST_NEW (cfg, ins, OP_BR);
6944 MONO_ADD_INS (bblock, ins);
6945 tblock = start_bblock->out_bb [0];
6946 link_bblock (cfg, bblock, tblock);
6947 ins->inst_target_bb = tblock;
6948 start_new_bblock = 1;
6950 /* skip the CEE_RET, too */
6951 if (ip_in_bb (cfg, bblock, ip + 5))
6961 /* Generic sharing */
6962 /* FIXME: only do this for generic methods if
6963 they are not shared! */
6964 if (context_used && !imt_arg && !array_rank &&
6965 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6966 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6967 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6968 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6971 g_assert (cfg->generic_sharing_context && cmethod);
6975 * We are compiling a call to a
6976 * generic method from shared code,
6977 * which means that we have to look up
6978 * the method in the rgctx and do an
6981 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6984 /* Indirect calls */
6986 g_assert (!imt_arg);
6988 if (*ip == CEE_CALL)
6989 g_assert (context_used);
6990 else if (*ip == CEE_CALLI)
6991 g_assert (!vtable_arg);
6993 /* FIXME: what the hell is this??? */
6994 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6995 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6997 /* Prevent inlining of methods with indirect calls */
7003 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7004 call = (MonoCallInst*)ins;
7006 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7008 * Instead of emitting an indirect call, emit a direct call
7009 * with the contents of the aotconst as the patch info.
7011 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7013 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7014 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7017 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7020 if (!MONO_TYPE_IS_VOID (fsig->ret))
7021 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7023 CHECK_CFG_EXCEPTION;
7034 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7035 MonoInst *val = sp [fsig->param_count];
7037 if (val->type == STACK_OBJ) {
7038 MonoInst *iargs [2];
7043 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7046 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7047 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7048 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7049 emit_write_barrier (cfg, addr, val, 0);
7050 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7051 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7053 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7056 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7057 if (!cmethod->klass->element_class->valuetype && !readonly)
7058 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7059 CHECK_TYPELOAD (cmethod->klass);
7062 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7065 g_assert_not_reached ();
7068 CHECK_CFG_EXCEPTION;
7075 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7077 if (!MONO_TYPE_IS_VOID (fsig->ret))
7078 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7080 CHECK_CFG_EXCEPTION;
7087 /* Tail prefix / tail call optimization */
7089 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7090 /* FIXME: runtime generic context pointer for jumps? */
7091 /* FIXME: handle this for generic sharing eventually */
7092 supported_tail_call = cmethod &&
7093 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7094 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7095 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7097 if (supported_tail_call) {
7100 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7103 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7105 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7106 /* Handle tail calls similarly to calls */
7107 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7109 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7110 call->tail_call = TRUE;
7111 call->method = cmethod;
7112 call->signature = mono_method_signature (cmethod);
7115 * We implement tail calls by storing the actual arguments into the
7116 * argument variables, then emitting a CEE_JMP.
7118 for (i = 0; i < n; ++i) {
7119 /* Prevent argument from being register allocated */
7120 arg_array [i]->flags |= MONO_INST_VOLATILE;
7121 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7125 ins = (MonoInst*)call;
7126 ins->inst_p0 = cmethod;
7127 ins->inst_p1 = arg_array [0];
7128 MONO_ADD_INS (bblock, ins);
7129 link_bblock (cfg, bblock, end_bblock);
7130 start_new_bblock = 1;
7132 CHECK_CFG_EXCEPTION;
7137 // FIXME: Eliminate unreachable epilogs
7140 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7141 * only reachable from this call.
7143 GET_BBLOCK (cfg, tblock, ip);
7144 if (tblock == bblock || tblock->in_count == 0)
7151 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7152 imt_arg, vtable_arg);
7154 if (!MONO_TYPE_IS_VOID (fsig->ret))
7155 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7157 CHECK_CFG_EXCEPTION;
7164 if (cfg->method != method) {
7165 /* return from inlined method */
7167 * If in_count == 0, that means the ret is unreachable due to
7168 * being preceeded by a throw. In that case, inline_method () will
7169 * handle setting the return value
7170 * (test case: test_0_inline_throw ()).
7172 if (return_var && cfg->cbb->in_count) {
7176 //g_assert (returnvar != -1);
7177 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7178 cfg->ret_var_set = TRUE;
7182 MonoType *ret_type = mono_method_signature (method)->ret;
7186 * Place a seq point here too even through the IL stack is not
7187 * empty, so a step over on
7190 * will work correctly.
7192 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7193 MONO_ADD_INS (cfg->cbb, ins);
7196 g_assert (!return_var);
7200 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7203 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7206 if (!cfg->vret_addr) {
7209 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7211 EMIT_NEW_RETLOADA (cfg, ret_addr);
7213 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7214 ins->klass = mono_class_from_mono_type (ret_type);
7217 #ifdef MONO_ARCH_SOFT_FLOAT
7218 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7219 MonoInst *iargs [1];
7223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7224 mono_arch_emit_setret (cfg, method, conv);
7226 mono_arch_emit_setret (cfg, method, *sp);
7229 mono_arch_emit_setret (cfg, method, *sp);
7234 if (sp != stack_start)
7236 MONO_INST_NEW (cfg, ins, OP_BR);
7238 ins->inst_target_bb = end_bblock;
7239 MONO_ADD_INS (bblock, ins);
7240 link_bblock (cfg, bblock, end_bblock);
7241 start_new_bblock = 1;
7245 MONO_INST_NEW (cfg, ins, OP_BR);
7247 target = ip + 1 + (signed char)(*ip);
7249 GET_BBLOCK (cfg, tblock, target);
7250 link_bblock (cfg, bblock, tblock);
7251 ins->inst_target_bb = tblock;
7252 if (sp != stack_start) {
7253 handle_stack_args (cfg, stack_start, sp - stack_start);
7255 CHECK_UNVERIFIABLE (cfg);
7257 MONO_ADD_INS (bblock, ins);
7258 start_new_bblock = 1;
7259 inline_costs += BRANCH_COST;
7273 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7275 target = ip + 1 + *(signed char*)ip;
7281 inline_costs += BRANCH_COST;
7285 MONO_INST_NEW (cfg, ins, OP_BR);
7288 target = ip + 4 + (gint32)read32(ip);
7290 GET_BBLOCK (cfg, tblock, target);
7291 link_bblock (cfg, bblock, tblock);
7292 ins->inst_target_bb = tblock;
7293 if (sp != stack_start) {
7294 handle_stack_args (cfg, stack_start, sp - stack_start);
7296 CHECK_UNVERIFIABLE (cfg);
7299 MONO_ADD_INS (bblock, ins);
7301 start_new_bblock = 1;
7302 inline_costs += BRANCH_COST;
7309 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7310 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7311 guint32 opsize = is_short ? 1 : 4;
7313 CHECK_OPSIZE (opsize);
7315 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7318 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7323 GET_BBLOCK (cfg, tblock, target);
7324 link_bblock (cfg, bblock, tblock);
7325 GET_BBLOCK (cfg, tblock, ip);
7326 link_bblock (cfg, bblock, tblock);
7328 if (sp != stack_start) {
7329 handle_stack_args (cfg, stack_start, sp - stack_start);
7330 CHECK_UNVERIFIABLE (cfg);
7333 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7334 cmp->sreg1 = sp [0]->dreg;
7335 type_from_op (cmp, sp [0], NULL);
7338 #if SIZEOF_REGISTER == 4
7339 if (cmp->opcode == OP_LCOMPARE_IMM) {
7340 /* Convert it to OP_LCOMPARE */
7341 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7342 ins->type = STACK_I8;
7343 ins->dreg = alloc_dreg (cfg, STACK_I8);
7345 MONO_ADD_INS (bblock, ins);
7346 cmp->opcode = OP_LCOMPARE;
7347 cmp->sreg2 = ins->dreg;
7350 MONO_ADD_INS (bblock, cmp);
7352 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7353 type_from_op (ins, sp [0], NULL);
7354 MONO_ADD_INS (bblock, ins);
7355 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7356 GET_BBLOCK (cfg, tblock, target);
7357 ins->inst_true_bb = tblock;
7358 GET_BBLOCK (cfg, tblock, ip);
7359 ins->inst_false_bb = tblock;
7360 start_new_bblock = 2;
7363 inline_costs += BRANCH_COST;
7378 MONO_INST_NEW (cfg, ins, *ip);
7380 target = ip + 4 + (gint32)read32(ip);
7386 inline_costs += BRANCH_COST;
7390 MonoBasicBlock **targets;
7391 MonoBasicBlock *default_bblock;
7392 MonoJumpInfoBBTable *table;
7393 int offset_reg = alloc_preg (cfg);
7394 int target_reg = alloc_preg (cfg);
7395 int table_reg = alloc_preg (cfg);
7396 int sum_reg = alloc_preg (cfg);
7397 gboolean use_op_switch;
7401 n = read32 (ip + 1);
7404 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7408 CHECK_OPSIZE (n * sizeof (guint32));
7409 target = ip + n * sizeof (guint32);
7411 GET_BBLOCK (cfg, default_bblock, target);
7412 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7414 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7415 for (i = 0; i < n; ++i) {
7416 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7417 targets [i] = tblock;
7418 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7422 if (sp != stack_start) {
7424 * Link the current bb with the targets as well, so handle_stack_args
7425 * will set their in_stack correctly.
7427 link_bblock (cfg, bblock, default_bblock);
7428 for (i = 0; i < n; ++i)
7429 link_bblock (cfg, bblock, targets [i]);
7431 handle_stack_args (cfg, stack_start, sp - stack_start);
7433 CHECK_UNVERIFIABLE (cfg);
7436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7440 for (i = 0; i < n; ++i)
7441 link_bblock (cfg, bblock, targets [i]);
7443 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7444 table->table = targets;
7445 table->table_size = n;
7447 use_op_switch = FALSE;
7449 /* ARM implements SWITCH statements differently */
7450 /* FIXME: Make it use the generic implementation */
7451 if (!cfg->compile_aot)
7452 use_op_switch = TRUE;
7455 if (COMPILE_LLVM (cfg))
7456 use_op_switch = TRUE;
7458 cfg->cbb->has_jump_table = 1;
7460 if (use_op_switch) {
7461 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7462 ins->sreg1 = src1->dreg;
7463 ins->inst_p0 = table;
7464 ins->inst_many_bb = targets;
7465 ins->klass = GUINT_TO_POINTER (n);
7466 MONO_ADD_INS (cfg->cbb, ins);
7468 if (sizeof (gpointer) == 8)
7469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7473 #if SIZEOF_REGISTER == 8
7474 /* The upper word might not be zero, and we add it to a 64 bit address later */
7475 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7478 if (cfg->compile_aot) {
7479 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7481 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7482 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7483 ins->inst_p0 = table;
7484 ins->dreg = table_reg;
7485 MONO_ADD_INS (cfg->cbb, ins);
7488 /* FIXME: Use load_memindex */
7489 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7491 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7493 start_new_bblock = 1;
7494 inline_costs += (BRANCH_COST * 2);
7514 dreg = alloc_freg (cfg);
7517 dreg = alloc_lreg (cfg);
7520 dreg = alloc_ireg_ref (cfg);
7523 dreg = alloc_preg (cfg);
7526 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7527 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7528 ins->flags |= ins_flag;
7530 MONO_ADD_INS (bblock, ins);
7545 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7546 ins->flags |= ins_flag;
7548 MONO_ADD_INS (bblock, ins);
7550 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7551 emit_write_barrier (cfg, sp [0], sp [1], -1);
7560 MONO_INST_NEW (cfg, ins, (*ip));
7562 ins->sreg1 = sp [0]->dreg;
7563 ins->sreg2 = sp [1]->dreg;
7564 type_from_op (ins, sp [0], sp [1]);
7566 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7568 /* Use the immediate opcodes if possible */
7569 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7570 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7571 if (imm_opcode != -1) {
7572 ins->opcode = imm_opcode;
7573 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7576 sp [1]->opcode = OP_NOP;
7580 MONO_ADD_INS ((cfg)->cbb, (ins));
7582 *sp++ = mono_decompose_opcode (cfg, ins);
7599 MONO_INST_NEW (cfg, ins, (*ip));
7601 ins->sreg1 = sp [0]->dreg;
7602 ins->sreg2 = sp [1]->dreg;
7603 type_from_op (ins, sp [0], sp [1]);
7605 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7606 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7608 /* FIXME: Pass opcode to is_inst_imm */
7610 /* Use the immediate opcodes if possible */
7611 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7614 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7615 if (imm_opcode != -1) {
7616 ins->opcode = imm_opcode;
7617 if (sp [1]->opcode == OP_I8CONST) {
7618 #if SIZEOF_REGISTER == 8
7619 ins->inst_imm = sp [1]->inst_l;
7621 ins->inst_ls_word = sp [1]->inst_ls_word;
7622 ins->inst_ms_word = sp [1]->inst_ms_word;
7626 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7629 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7630 if (sp [1]->next == NULL)
7631 sp [1]->opcode = OP_NOP;
7634 MONO_ADD_INS ((cfg)->cbb, (ins));
7636 *sp++ = mono_decompose_opcode (cfg, ins);
7649 case CEE_CONV_OVF_I8:
7650 case CEE_CONV_OVF_U8:
7654 /* Special case this earlier so we have long constants in the IR */
7655 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7656 int data = sp [-1]->inst_c0;
7657 sp [-1]->opcode = OP_I8CONST;
7658 sp [-1]->type = STACK_I8;
7659 #if SIZEOF_REGISTER == 8
7660 if ((*ip) == CEE_CONV_U8)
7661 sp [-1]->inst_c0 = (guint32)data;
7663 sp [-1]->inst_c0 = data;
7665 sp [-1]->inst_ls_word = data;
7666 if ((*ip) == CEE_CONV_U8)
7667 sp [-1]->inst_ms_word = 0;
7669 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7671 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7678 case CEE_CONV_OVF_I4:
7679 case CEE_CONV_OVF_I1:
7680 case CEE_CONV_OVF_I2:
7681 case CEE_CONV_OVF_I:
7682 case CEE_CONV_OVF_U:
7685 if (sp [-1]->type == STACK_R8) {
7686 ADD_UNOP (CEE_CONV_OVF_I8);
7693 case CEE_CONV_OVF_U1:
7694 case CEE_CONV_OVF_U2:
7695 case CEE_CONV_OVF_U4:
7698 if (sp [-1]->type == STACK_R8) {
7699 ADD_UNOP (CEE_CONV_OVF_U8);
7706 case CEE_CONV_OVF_I1_UN:
7707 case CEE_CONV_OVF_I2_UN:
7708 case CEE_CONV_OVF_I4_UN:
7709 case CEE_CONV_OVF_I8_UN:
7710 case CEE_CONV_OVF_U1_UN:
7711 case CEE_CONV_OVF_U2_UN:
7712 case CEE_CONV_OVF_U4_UN:
7713 case CEE_CONV_OVF_U8_UN:
7714 case CEE_CONV_OVF_I_UN:
7715 case CEE_CONV_OVF_U_UN:
7722 CHECK_CFG_EXCEPTION;
7726 case CEE_ADD_OVF_UN:
7728 case CEE_MUL_OVF_UN:
7730 case CEE_SUB_OVF_UN:
7738 token = read32 (ip + 1);
7739 klass = mini_get_class (method, token, generic_context);
7740 CHECK_TYPELOAD (klass);
7742 if (generic_class_is_reference_type (cfg, klass)) {
7743 MonoInst *store, *load;
7744 int dreg = alloc_ireg_ref (cfg);
7746 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7747 load->flags |= ins_flag;
7748 MONO_ADD_INS (cfg->cbb, load);
7750 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7751 store->flags |= ins_flag;
7752 MONO_ADD_INS (cfg->cbb, store);
7754 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7755 emit_write_barrier (cfg, sp [0], sp [1], -1);
7757 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7769 token = read32 (ip + 1);
7770 klass = mini_get_class (method, token, generic_context);
7771 CHECK_TYPELOAD (klass);
7773 /* Optimize the common ldobj+stloc combination */
7783 loc_index = ip [5] - CEE_STLOC_0;
7790 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7791 CHECK_LOCAL (loc_index);
7793 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7794 ins->dreg = cfg->locals [loc_index]->dreg;
7800 /* Optimize the ldobj+stobj combination */
7801 /* The reference case ends up being a load+store anyway */
7802 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7807 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7814 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7823 CHECK_STACK_OVF (1);
7825 n = read32 (ip + 1);
7827 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7828 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7829 ins->type = STACK_OBJ;
7832 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7833 MonoInst *iargs [1];
7835 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7836 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7838 if (cfg->opt & MONO_OPT_SHARED) {
7839 MonoInst *iargs [3];
7841 if (cfg->compile_aot) {
7842 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7844 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7845 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7846 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7847 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7848 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7850 if (bblock->out_of_line) {
7851 MonoInst *iargs [2];
7853 if (image == mono_defaults.corlib) {
7855 * Avoid relocations in AOT and save some space by using a
7856 * version of helper_ldstr specialized to mscorlib.
7858 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7859 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7861 /* Avoid creating the string object */
7862 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7863 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7864 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7868 if (cfg->compile_aot) {
7869 NEW_LDSTRCONST (cfg, ins, image, n);
7871 MONO_ADD_INS (bblock, ins);
7874 NEW_PCONST (cfg, ins, NULL);
7875 ins->type = STACK_OBJ;
7876 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7878 OUT_OF_MEMORY_FAILURE;
7881 MONO_ADD_INS (bblock, ins);
7890 MonoInst *iargs [2];
7891 MonoMethodSignature *fsig;
7894 MonoInst *vtable_arg = NULL;
7897 token = read32 (ip + 1);
7898 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7899 if (!cmethod || mono_loader_get_last_error ())
7901 fsig = mono_method_get_signature (cmethod, image, token);
7905 mono_save_token_info (cfg, image, token, cmethod);
7907 if (!mono_class_init (cmethod->klass))
7910 if (cfg->generic_sharing_context)
7911 context_used = mono_method_check_context_used (cmethod);
7913 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7914 if (check_linkdemand (cfg, method, cmethod))
7916 CHECK_CFG_EXCEPTION;
7917 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7918 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7921 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7922 emit_generic_class_init (cfg, cmethod->klass);
7923 CHECK_TYPELOAD (cmethod->klass);
7926 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7927 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7928 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7929 mono_class_vtable (cfg->domain, cmethod->klass);
7930 CHECK_TYPELOAD (cmethod->klass);
7932 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7933 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7936 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7937 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7939 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7941 CHECK_TYPELOAD (cmethod->klass);
7942 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7947 n = fsig->param_count;
7951 * Generate smaller code for the common newobj <exception> instruction in
7952 * argument checking code.
7954 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7955 is_exception_class (cmethod->klass) && n <= 2 &&
7956 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7957 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7958 MonoInst *iargs [3];
7960 g_assert (!vtable_arg);
7964 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7967 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7971 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7976 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7979 g_assert_not_reached ();
7987 /* move the args to allow room for 'this' in the first position */
7993 /* check_call_signature () requires sp[0] to be set */
7994 this_ins.type = STACK_OBJ;
7996 if (check_call_signature (cfg, fsig, sp))
8001 if (mini_class_is_system_array (cmethod->klass)) {
8002 g_assert (!vtable_arg);
8004 *sp = emit_get_rgctx_method (cfg, context_used,
8005 cmethod, MONO_RGCTX_INFO_METHOD);
8007 /* Avoid varargs in the common case */
8008 if (fsig->param_count == 1)
8009 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8010 else if (fsig->param_count == 2)
8011 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8012 else if (fsig->param_count == 3)
8013 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8015 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8016 } else if (cmethod->string_ctor) {
8017 g_assert (!context_used);
8018 g_assert (!vtable_arg);
8019 /* we simply pass a null pointer */
8020 EMIT_NEW_PCONST (cfg, *sp, NULL);
8021 /* now call the string ctor */
8022 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8024 MonoInst* callvirt_this_arg = NULL;
8026 if (cmethod->klass->valuetype) {
8027 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8028 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8029 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8034 * The code generated by mini_emit_virtual_call () expects
8035 * iargs [0] to be a boxed instance, but luckily the vcall
8036 * will be transformed into a normal call there.
8038 } else if (context_used) {
8039 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8042 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8044 CHECK_TYPELOAD (cmethod->klass);
8047 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8048 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8049 * As a workaround, we call class cctors before allocating objects.
8051 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8052 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8053 if (cfg->verbose_level > 2)
8054 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8055 class_inits = g_slist_prepend (class_inits, vtable);
8058 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8061 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8064 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8066 /* Now call the actual ctor */
8067 /* Avoid virtual calls to ctors if possible */
8068 if (cmethod->klass->marshalbyref)
8069 callvirt_this_arg = sp [0];
8072 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8073 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8074 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8079 CHECK_CFG_EXCEPTION;
8080 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8081 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8082 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8083 !g_list_find (dont_inline, cmethod)) {
8086 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8087 cfg->real_offset += 5;
8090 inline_costs += costs - 5;
8093 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8095 } else if (context_used &&
8096 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8097 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8098 MonoInst *cmethod_addr;
8100 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8101 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8103 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8106 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8107 callvirt_this_arg, NULL, vtable_arg);
8111 if (alloc == NULL) {
8113 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8114 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8128 token = read32 (ip + 1);
8129 klass = mini_get_class (method, token, generic_context);
8130 CHECK_TYPELOAD (klass);
8131 if (sp [0]->type != STACK_OBJ)
8134 if (cfg->generic_sharing_context)
8135 context_used = mono_class_check_context_used (klass);
8137 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8138 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8145 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8148 /*FIXME AOT support*/
8149 if (cfg->compile_aot)
8150 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8152 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8154 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8155 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8158 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8159 MonoMethod *mono_castclass;
8160 MonoInst *iargs [1];
8163 mono_castclass = mono_marshal_get_castclass (klass);
8166 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8167 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8168 CHECK_CFG_EXCEPTION;
8169 g_assert (costs > 0);
8172 cfg->real_offset += 5;
8177 inline_costs += costs;
8180 ins = handle_castclass (cfg, klass, *sp, context_used);
8181 CHECK_CFG_EXCEPTION;
8191 token = read32 (ip + 1);
8192 klass = mini_get_class (method, token, generic_context);
8193 CHECK_TYPELOAD (klass);
8194 if (sp [0]->type != STACK_OBJ)
8197 if (cfg->generic_sharing_context)
8198 context_used = mono_class_check_context_used (klass);
8200 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8201 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8208 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8211 /*FIXME AOT support*/
8212 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8214 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8217 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8218 MonoMethod *mono_isinst;
8219 MonoInst *iargs [1];
8222 mono_isinst = mono_marshal_get_isinst (klass);
8225 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8226 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8227 CHECK_CFG_EXCEPTION;
8228 g_assert (costs > 0);
8231 cfg->real_offset += 5;
8236 inline_costs += costs;
8239 ins = handle_isinst (cfg, klass, *sp, context_used);
8240 CHECK_CFG_EXCEPTION;
8247 case CEE_UNBOX_ANY: {
8251 token = read32 (ip + 1);
8252 klass = mini_get_class (method, token, generic_context);
8253 CHECK_TYPELOAD (klass);
8255 mono_save_token_info (cfg, image, token, klass);
8257 if (cfg->generic_sharing_context)
8258 context_used = mono_class_check_context_used (klass);
8260 if (generic_class_is_reference_type (cfg, klass)) {
8261 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8262 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8263 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8270 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8273 /*FIXME AOT support*/
8274 if (cfg->compile_aot)
8275 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8277 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8279 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8280 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8283 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8284 MonoMethod *mono_castclass;
8285 MonoInst *iargs [1];
8288 mono_castclass = mono_marshal_get_castclass (klass);
8291 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8292 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8293 CHECK_CFG_EXCEPTION;
8294 g_assert (costs > 0);
8297 cfg->real_offset += 5;
8301 inline_costs += costs;
8303 ins = handle_castclass (cfg, klass, *sp, context_used);
8304 CHECK_CFG_EXCEPTION;
8312 if (mono_class_is_nullable (klass)) {
8313 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8320 ins = handle_unbox (cfg, klass, sp, context_used);
8326 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8339 token = read32 (ip + 1);
8340 klass = mini_get_class (method, token, generic_context);
8341 CHECK_TYPELOAD (klass);
8343 mono_save_token_info (cfg, image, token, klass);
8345 if (cfg->generic_sharing_context)
8346 context_used = mono_class_check_context_used (klass);
8348 if (generic_class_is_reference_type (cfg, klass)) {
8354 if (klass == mono_defaults.void_class)
8356 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8358 /* frequent check in generic code: box (struct), brtrue */
8360 // FIXME: LLVM can't handle the inconsistent bb linking
8361 if (!mono_class_is_nullable (klass) &&
8362 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8363 (ip [5] == CEE_BRTRUE ||
8364 ip [5] == CEE_BRTRUE_S ||
8365 ip [5] == CEE_BRFALSE ||
8366 ip [5] == CEE_BRFALSE_S)) {
8367 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8369 MonoBasicBlock *true_bb, *false_bb;
8373 if (cfg->verbose_level > 3) {
8374 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8375 printf ("<box+brtrue opt>\n");
8383 target = ip + 1 + (signed char)(*ip);
8390 target = ip + 4 + (gint)(read32 (ip));
8394 g_assert_not_reached ();
8398 * We need to link both bblocks, since it is needed for handling stack
8399 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8400 * Branching to only one of them would lead to inconsistencies, so
8401 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8403 GET_BBLOCK (cfg, true_bb, target);
8404 GET_BBLOCK (cfg, false_bb, ip);
8406 mono_link_bblock (cfg, cfg->cbb, true_bb);
8407 mono_link_bblock (cfg, cfg->cbb, false_bb);
8409 if (sp != stack_start) {
8410 handle_stack_args (cfg, stack_start, sp - stack_start);
8412 CHECK_UNVERIFIABLE (cfg);
8415 if (COMPILE_LLVM (cfg)) {
8416 dreg = alloc_ireg (cfg);
8417 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8420 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8422 /* The JIT can't eliminate the iconst+compare */
8423 MONO_INST_NEW (cfg, ins, OP_BR);
8424 ins->inst_target_bb = is_true ? true_bb : false_bb;
8425 MONO_ADD_INS (cfg->cbb, ins);
8428 start_new_bblock = 1;
8432 *sp++ = handle_box (cfg, val, klass, context_used);
8434 CHECK_CFG_EXCEPTION;
8443 token = read32 (ip + 1);
8444 klass = mini_get_class (method, token, generic_context);
8445 CHECK_TYPELOAD (klass);
8447 mono_save_token_info (cfg, image, token, klass);
8449 if (cfg->generic_sharing_context)
8450 context_used = mono_class_check_context_used (klass);
8452 if (mono_class_is_nullable (klass)) {
8455 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8456 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8460 ins = handle_unbox (cfg, klass, sp, context_used);
8470 MonoClassField *field;
8474 if (*ip == CEE_STFLD) {
8481 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8483 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8486 token = read32 (ip + 1);
8487 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8488 field = mono_method_get_wrapper_data (method, token);
8489 klass = field->parent;
8492 field = mono_field_from_token (image, token, &klass, generic_context);
8496 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8497 FIELD_ACCESS_FAILURE;
8498 mono_class_init (klass);
8500 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8501 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8502 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8503 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8506 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8507 if (*ip == CEE_STFLD) {
8508 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8510 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8511 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8512 MonoInst *iargs [5];
8515 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8516 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8517 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8521 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8522 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8523 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8524 CHECK_CFG_EXCEPTION;
8525 g_assert (costs > 0);
8527 cfg->real_offset += 5;
8530 inline_costs += costs;
8532 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8537 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8539 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8540 if (sp [0]->opcode != OP_LDADDR)
8541 store->flags |= MONO_INST_FAULT;
8543 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8544 /* insert call to write barrier */
8548 dreg = alloc_ireg_mp (cfg);
8549 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8550 emit_write_barrier (cfg, ptr, sp [1], -1);
8553 store->flags |= ins_flag;
8560 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8561 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8562 MonoInst *iargs [4];
8565 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8566 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8567 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8568 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8569 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8570 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8571 CHECK_CFG_EXCEPTION;
8573 g_assert (costs > 0);
8575 cfg->real_offset += 5;
8579 inline_costs += costs;
8581 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8585 if (sp [0]->type == STACK_VTYPE) {
8588 /* Have to compute the address of the variable */
8590 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8592 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8594 g_assert (var->klass == klass);
8596 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8600 if (*ip == CEE_LDFLDA) {
8601 if (sp [0]->type == STACK_OBJ) {
8602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8603 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8606 dreg = alloc_ireg_mp (cfg);
8608 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8609 ins->klass = mono_class_from_mono_type (field->type);
8610 ins->type = STACK_MP;
8615 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8617 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8618 load->flags |= ins_flag;
8619 if (sp [0]->opcode != OP_LDADDR)
8620 load->flags |= MONO_INST_FAULT;
8631 MonoClassField *field;
8632 gpointer addr = NULL;
8633 gboolean is_special_static;
8637 token = read32 (ip + 1);
8639 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8640 field = mono_method_get_wrapper_data (method, token);
8641 klass = field->parent;
8644 field = mono_field_from_token (image, token, &klass, generic_context);
8647 mono_class_init (klass);
8648 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8649 FIELD_ACCESS_FAILURE;
8651 /* if the class is Critical then transparent code cannot access it's fields */
8652 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8653 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8656 * We can only support shared generic static
8657 * field access on architectures where the
8658 * trampoline code has been extended to handle
8659 * the generic class init.
8661 #ifndef MONO_ARCH_VTABLE_REG
8662 GENERIC_SHARING_FAILURE (*ip);
8665 if (cfg->generic_sharing_context)
8666 context_used = mono_class_check_context_used (klass);
8668 ftype = mono_field_get_type (field);
8670 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8672 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8673 * to be called here.
8675 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8676 mono_class_vtable (cfg->domain, klass);
8677 CHECK_TYPELOAD (klass);
8679 mono_domain_lock (cfg->domain);
8680 if (cfg->domain->special_static_fields)
8681 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8682 mono_domain_unlock (cfg->domain);
8684 is_special_static = mono_class_field_is_special_static (field);
8686 /* Generate IR to compute the field address */
8687 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8689 * Fast access to TLS data
8690 * Inline version of get_thread_static_data () in
8694 int idx, static_data_reg, array_reg, dreg;
8695 MonoInst *thread_ins;
8697 // offset &= 0x7fffffff;
8698 // idx = (offset >> 24) - 1;
8699 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8701 thread_ins = mono_get_thread_intrinsic (cfg);
8702 MONO_ADD_INS (cfg->cbb, thread_ins);
8703 static_data_reg = alloc_ireg (cfg);
8704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8706 if (cfg->compile_aot) {
8707 int offset_reg, offset2_reg, idx_reg;
8709 /* For TLS variables, this will return the TLS offset */
8710 EMIT_NEW_SFLDACONST (cfg, ins, field);
8711 offset_reg = ins->dreg;
8712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8713 idx_reg = alloc_ireg (cfg);
8714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8717 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8718 array_reg = alloc_ireg (cfg);
8719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8720 offset2_reg = alloc_ireg (cfg);
8721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8722 dreg = alloc_ireg (cfg);
8723 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8725 offset = (gsize)addr & 0x7fffffff;
8726 idx = (offset >> 24) - 1;
8728 array_reg = alloc_ireg (cfg);
8729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8730 dreg = alloc_ireg (cfg);
8731 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8733 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8734 (cfg->compile_aot && is_special_static) ||
8735 (context_used && is_special_static)) {
8736 MonoInst *iargs [2];
8738 g_assert (field->parent);
8739 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8741 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8742 field, MONO_RGCTX_INFO_CLASS_FIELD);
8744 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8746 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8747 } else if (context_used) {
8748 MonoInst *static_data;
8751 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8752 method->klass->name_space, method->klass->name, method->name,
8753 depth, field->offset);
8756 if (mono_class_needs_cctor_run (klass, method))
8757 emit_generic_class_init (cfg, klass);
8760 * The pointer we're computing here is
8762 * super_info.static_data + field->offset
8764 static_data = emit_get_rgctx_klass (cfg, context_used,
8765 klass, MONO_RGCTX_INFO_STATIC_DATA);
8767 if (field->offset == 0) {
8770 int addr_reg = mono_alloc_preg (cfg);
8771 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8773 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8774 MonoInst *iargs [2];
8776 g_assert (field->parent);
8777 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8778 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8779 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8781 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8783 CHECK_TYPELOAD (klass);
8785 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8786 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8787 if (cfg->verbose_level > 2)
8788 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8789 class_inits = g_slist_prepend (class_inits, vtable);
8791 if (cfg->run_cctors) {
8793 /* This makes so that inline cannot trigger */
8794 /* .cctors: too many apps depend on them */
8795 /* running with a specific order... */
8796 if (! vtable->initialized)
8798 ex = mono_runtime_class_init_full (vtable, FALSE);
8800 set_exception_object (cfg, ex);
8801 goto exception_exit;
8805 addr = (char*)vtable->data + field->offset;
8807 if (cfg->compile_aot)
8808 EMIT_NEW_SFLDACONST (cfg, ins, field);
8810 EMIT_NEW_PCONST (cfg, ins, addr);
8812 MonoInst *iargs [1];
8813 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8814 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8818 /* Generate IR to do the actual load/store operation */
8820 if (*ip == CEE_LDSFLDA) {
8821 ins->klass = mono_class_from_mono_type (ftype);
8822 ins->type = STACK_PTR;
8824 } else if (*ip == CEE_STSFLD) {
8829 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8830 store->flags |= ins_flag;
8832 gboolean is_const = FALSE;
8833 MonoVTable *vtable = NULL;
8835 if (!context_used) {
8836 vtable = mono_class_vtable (cfg->domain, klass);
8837 CHECK_TYPELOAD (klass);
8839 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8840 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8841 gpointer addr = (char*)vtable->data + field->offset;
8842 int ro_type = ftype->type;
8843 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8844 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8846 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8849 case MONO_TYPE_BOOLEAN:
8851 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8855 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8858 case MONO_TYPE_CHAR:
8860 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8864 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8869 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8873 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8879 case MONO_TYPE_FNPTR:
8880 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8881 type_to_eval_stack_type ((cfg), field->type, *sp);
8884 case MONO_TYPE_STRING:
8885 case MONO_TYPE_OBJECT:
8886 case MONO_TYPE_CLASS:
8887 case MONO_TYPE_SZARRAY:
8888 case MONO_TYPE_ARRAY:
8889 if (!mono_gc_is_moving ()) {
8890 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8891 type_to_eval_stack_type ((cfg), field->type, *sp);
8899 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8904 case MONO_TYPE_VALUETYPE:
8914 CHECK_STACK_OVF (1);
8916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8917 load->flags |= ins_flag;
8930 token = read32 (ip + 1);
8931 klass = mini_get_class (method, token, generic_context);
8932 CHECK_TYPELOAD (klass);
8933 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8935 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8936 generic_class_is_reference_type (cfg, klass)) {
8937 /* insert call to write barrier */
8938 emit_write_barrier (cfg, sp [0], sp [1], -1);
8950 const char *data_ptr;
8952 guint32 field_token;
8958 token = read32 (ip + 1);
8960 klass = mini_get_class (method, token, generic_context);
8961 CHECK_TYPELOAD (klass);
8963 if (cfg->generic_sharing_context)
8964 context_used = mono_class_check_context_used (klass);
8966 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8967 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8968 ins->sreg1 = sp [0]->dreg;
8969 ins->type = STACK_I4;
8970 ins->dreg = alloc_ireg (cfg);
8971 MONO_ADD_INS (cfg->cbb, ins);
8972 *sp = mono_decompose_opcode (cfg, ins);
8977 MonoClass *array_class = mono_array_class_get (klass, 1);
8978 /* FIXME: we cannot get a managed
8979 allocator because we can't get the
8980 open generic class's vtable. We
8981 have the same problem in
8982 handle_alloc(). This
8983 needs to be solved so that we can
8984 have managed allocs of shared
8987 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8988 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8990 MonoMethod *managed_alloc = NULL;
8992 /* FIXME: Decompose later to help abcrem */
8995 args [0] = emit_get_rgctx_klass (cfg, context_used,
8996 array_class, MONO_RGCTX_INFO_VTABLE);
9001 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9003 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9005 if (cfg->opt & MONO_OPT_SHARED) {
9006 /* Decompose now to avoid problems with references to the domainvar */
9007 MonoInst *iargs [3];
9009 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9010 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9013 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9015 /* Decompose later since it is needed by abcrem */
9016 MonoClass *array_type = mono_array_class_get (klass, 1);
9017 mono_class_vtable (cfg->domain, array_type);
9018 CHECK_TYPELOAD (array_type);
9020 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9021 ins->dreg = alloc_ireg_ref (cfg);
9022 ins->sreg1 = sp [0]->dreg;
9023 ins->inst_newa_class = klass;
9024 ins->type = STACK_OBJ;
9026 MONO_ADD_INS (cfg->cbb, ins);
9027 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9028 cfg->cbb->has_array_access = TRUE;
9030 /* Needed so mono_emit_load_get_addr () gets called */
9031 mono_get_got_var (cfg);
9041 * we inline/optimize the initialization sequence if possible.
9042 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9043 * for small sizes open code the memcpy
9044 * ensure the rva field is big enough
9046 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9047 MonoMethod *memcpy_method = get_memcpy_method ();
9048 MonoInst *iargs [3];
9049 int add_reg = alloc_ireg_mp (cfg);
9051 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9052 if (cfg->compile_aot) {
9053 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9055 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9057 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9058 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9067 if (sp [0]->type != STACK_OBJ)
9070 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9071 ins->dreg = alloc_preg (cfg);
9072 ins->sreg1 = sp [0]->dreg;
9073 ins->type = STACK_I4;
9074 /* This flag will be inherited by the decomposition */
9075 ins->flags |= MONO_INST_FAULT;
9076 MONO_ADD_INS (cfg->cbb, ins);
9077 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9078 cfg->cbb->has_array_access = TRUE;
9086 if (sp [0]->type != STACK_OBJ)
9089 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9091 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9092 CHECK_TYPELOAD (klass);
9093 /* we need to make sure that this array is exactly the type it needs
9094 * to be for correctness. the wrappers are lax with their usage
9095 * so we need to ignore them here
9097 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9098 MonoClass *array_class = mono_array_class_get (klass, 1);
9099 mini_emit_check_array_type (cfg, sp [0], array_class);
9100 CHECK_TYPELOAD (array_class);
9104 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9119 case CEE_LDELEM_REF: {
9125 if (*ip == CEE_LDELEM) {
9127 token = read32 (ip + 1);
9128 klass = mini_get_class (method, token, generic_context);
9129 CHECK_TYPELOAD (klass);
9130 mono_class_init (klass);
9133 klass = array_access_to_klass (*ip);
9135 if (sp [0]->type != STACK_OBJ)
9138 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9140 if (sp [1]->opcode == OP_ICONST) {
9141 int array_reg = sp [0]->dreg;
9142 int index_reg = sp [1]->dreg;
9143 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9145 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9146 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9148 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9149 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9152 if (*ip == CEE_LDELEM)
9165 case CEE_STELEM_REF:
9172 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9174 if (*ip == CEE_STELEM) {
9176 token = read32 (ip + 1);
9177 klass = mini_get_class (method, token, generic_context);
9178 CHECK_TYPELOAD (klass);
9179 mono_class_init (klass);
9182 klass = array_access_to_klass (*ip);
9184 if (sp [0]->type != STACK_OBJ)
9187 /* storing a NULL doesn't need any of the complex checks in stelemref */
9188 if (generic_class_is_reference_type (cfg, klass) &&
9189 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9190 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9191 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9192 MonoInst *iargs [3];
9195 mono_class_setup_vtable (obj_array);
9196 g_assert (helper->slot);
9198 if (sp [0]->type != STACK_OBJ)
9200 if (sp [2]->type != STACK_OBJ)
9207 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9209 if (sp [1]->opcode == OP_ICONST) {
9210 int array_reg = sp [0]->dreg;
9211 int index_reg = sp [1]->dreg;
9212 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9214 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9215 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9217 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9218 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9222 if (*ip == CEE_STELEM)
9229 case CEE_CKFINITE: {
9233 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9234 ins->sreg1 = sp [0]->dreg;
9235 ins->dreg = alloc_freg (cfg);
9236 ins->type = STACK_R8;
9237 MONO_ADD_INS (bblock, ins);
9239 *sp++ = mono_decompose_opcode (cfg, ins);
9244 case CEE_REFANYVAL: {
9245 MonoInst *src_var, *src;
9247 int klass_reg = alloc_preg (cfg);
9248 int dreg = alloc_preg (cfg);
9251 MONO_INST_NEW (cfg, ins, *ip);
9254 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9255 CHECK_TYPELOAD (klass);
9256 mono_class_init (klass);
9258 if (cfg->generic_sharing_context)
9259 context_used = mono_class_check_context_used (klass);
9262 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9264 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9265 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9266 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9269 MonoInst *klass_ins;
9271 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9272 klass, MONO_RGCTX_INFO_KLASS);
9275 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9276 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9278 mini_emit_class_check (cfg, klass_reg, klass);
9280 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9281 ins->type = STACK_MP;
9286 case CEE_MKREFANY: {
9287 MonoInst *loc, *addr;
9290 MONO_INST_NEW (cfg, ins, *ip);
9293 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9294 CHECK_TYPELOAD (klass);
9295 mono_class_init (klass);
9297 if (cfg->generic_sharing_context)
9298 context_used = mono_class_check_context_used (klass);
9300 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9301 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9304 MonoInst *const_ins;
9305 int type_reg = alloc_preg (cfg);
9307 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9309 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9311 } else if (cfg->compile_aot) {
9312 int const_reg = alloc_preg (cfg);
9313 int type_reg = alloc_preg (cfg);
9315 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9320 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9321 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9323 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9325 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9326 ins->type = STACK_VTYPE;
9327 ins->klass = mono_defaults.typed_reference_class;
9334 MonoClass *handle_class;
9336 CHECK_STACK_OVF (1);
9339 n = read32 (ip + 1);
9341 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9342 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9343 handle = mono_method_get_wrapper_data (method, n);
9344 handle_class = mono_method_get_wrapper_data (method, n + 1);
9345 if (handle_class == mono_defaults.typehandle_class)
9346 handle = &((MonoClass*)handle)->byval_arg;
9349 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9353 mono_class_init (handle_class);
9354 if (cfg->generic_sharing_context) {
9355 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9356 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9357 /* This case handles ldtoken
9358 of an open type, like for
9361 } else if (handle_class == mono_defaults.typehandle_class) {
9362 /* If we get a MONO_TYPE_CLASS
9363 then we need to provide the
9365 instantiation of it. */
9366 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9369 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9370 } else if (handle_class == mono_defaults.fieldhandle_class)
9371 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9372 else if (handle_class == mono_defaults.methodhandle_class)
9373 context_used = mono_method_check_context_used (handle);
9375 g_assert_not_reached ();
9378 if ((cfg->opt & MONO_OPT_SHARED) &&
9379 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9380 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9381 MonoInst *addr, *vtvar, *iargs [3];
9382 int method_context_used;
9384 if (cfg->generic_sharing_context)
9385 method_context_used = mono_method_check_context_used (method);
9387 method_context_used = 0;
9389 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9391 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9392 EMIT_NEW_ICONST (cfg, iargs [1], n);
9393 if (method_context_used) {
9394 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9395 method, MONO_RGCTX_INFO_METHOD);
9396 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9398 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9399 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9401 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9403 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9405 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9407 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9408 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9409 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9410 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9411 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9412 MonoClass *tclass = mono_class_from_mono_type (handle);
9414 mono_class_init (tclass);
9416 ins = emit_get_rgctx_klass (cfg, context_used,
9417 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9418 } else if (cfg->compile_aot) {
9419 if (method->wrapper_type) {
9420 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9421 /* Special case for static synchronized wrappers */
9422 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9424 /* FIXME: n is not a normal token */
9425 cfg->disable_aot = TRUE;
9426 EMIT_NEW_PCONST (cfg, ins, NULL);
9429 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9432 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9434 ins->type = STACK_OBJ;
9435 ins->klass = cmethod->klass;
9438 MonoInst *addr, *vtvar;
9440 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9443 if (handle_class == mono_defaults.typehandle_class) {
9444 ins = emit_get_rgctx_klass (cfg, context_used,
9445 mono_class_from_mono_type (handle),
9446 MONO_RGCTX_INFO_TYPE);
9447 } else if (handle_class == mono_defaults.methodhandle_class) {
9448 ins = emit_get_rgctx_method (cfg, context_used,
9449 handle, MONO_RGCTX_INFO_METHOD);
9450 } else if (handle_class == mono_defaults.fieldhandle_class) {
9451 ins = emit_get_rgctx_field (cfg, context_used,
9452 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9454 g_assert_not_reached ();
9456 } else if (cfg->compile_aot) {
9457 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9459 EMIT_NEW_PCONST (cfg, ins, handle);
9461 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9462 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9463 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9473 MONO_INST_NEW (cfg, ins, OP_THROW);
9475 ins->sreg1 = sp [0]->dreg;
9477 bblock->out_of_line = TRUE;
9478 MONO_ADD_INS (bblock, ins);
9479 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9480 MONO_ADD_INS (bblock, ins);
9483 link_bblock (cfg, bblock, end_bblock);
9484 start_new_bblock = 1;
9486 case CEE_ENDFINALLY:
9487 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9488 MONO_ADD_INS (bblock, ins);
9490 start_new_bblock = 1;
9493 * Control will leave the method so empty the stack, otherwise
9494 * the next basic block will start with a nonempty stack.
9496 while (sp != stack_start) {
9504 if (*ip == CEE_LEAVE) {
9506 target = ip + 5 + (gint32)read32(ip + 1);
9509 target = ip + 2 + (signed char)(ip [1]);
9512 /* empty the stack */
9513 while (sp != stack_start) {
9518 * If this leave statement is in a catch block, check for a
9519 * pending exception, and rethrow it if necessary.
9520 * We avoid doing this in runtime invoke wrappers, since those are called
9521 * by native code which excepts the wrapper to catch all exceptions.
9523 for (i = 0; i < header->num_clauses; ++i) {
9524 MonoExceptionClause *clause = &header->clauses [i];
9527 * Use <= in the final comparison to handle clauses with multiple
9528 * leave statements, like in bug #78024.
9529 * The ordering of the exception clauses guarantees that we find the
9532 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9534 MonoBasicBlock *dont_throw;
9539 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9542 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9544 NEW_BBLOCK (cfg, dont_throw);
9547 * Currently, we always rethrow the abort exception, despite the
9548 * fact that this is not correct. See thread6.cs for an example.
9549 * But propagating the abort exception is more important than
9550 * getting the sematics right.
9552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9554 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9556 MONO_START_BB (cfg, dont_throw);
9561 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9563 MonoExceptionClause *clause;
9565 for (tmp = handlers; tmp; tmp = tmp->next) {
9567 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9569 link_bblock (cfg, bblock, tblock);
9570 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9571 ins->inst_target_bb = tblock;
9572 ins->inst_eh_block = clause;
9573 MONO_ADD_INS (bblock, ins);
9574 bblock->has_call_handler = 1;
9575 if (COMPILE_LLVM (cfg)) {
9576 MonoBasicBlock *target_bb;
9579 * Link the finally bblock with the target, since it will
9580 * conceptually branch there.
9581 * FIXME: Have to link the bblock containing the endfinally.
9583 GET_BBLOCK (cfg, target_bb, target);
9584 link_bblock (cfg, tblock, target_bb);
9587 g_list_free (handlers);
9590 MONO_INST_NEW (cfg, ins, OP_BR);
9591 MONO_ADD_INS (bblock, ins);
9592 GET_BBLOCK (cfg, tblock, target);
9593 link_bblock (cfg, bblock, tblock);
9594 ins->inst_target_bb = tblock;
9595 start_new_bblock = 1;
9597 if (*ip == CEE_LEAVE)
9606 * Mono specific opcodes
9608 case MONO_CUSTOM_PREFIX: {
9610 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9614 case CEE_MONO_ICALL: {
9616 MonoJitICallInfo *info;
9618 token = read32 (ip + 2);
9619 func = mono_method_get_wrapper_data (method, token);
9620 info = mono_find_jit_icall_by_addr (func);
9623 CHECK_STACK (info->sig->param_count);
9624 sp -= info->sig->param_count;
9626 ins = mono_emit_jit_icall (cfg, info->func, sp);
9627 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9631 inline_costs += 10 * num_calls++;
9635 case CEE_MONO_LDPTR: {
9638 CHECK_STACK_OVF (1);
9640 token = read32 (ip + 2);
9642 ptr = mono_method_get_wrapper_data (method, token);
9643 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9644 MonoJitICallInfo *callinfo;
9645 const char *icall_name;
9647 icall_name = method->name + strlen ("__icall_wrapper_");
9648 g_assert (icall_name);
9649 callinfo = mono_find_jit_icall_by_name (icall_name);
9650 g_assert (callinfo);
9652 if (ptr == callinfo->func) {
9653 /* Will be transformed into an AOTCONST later */
9654 EMIT_NEW_PCONST (cfg, ins, ptr);
9660 /* FIXME: Generalize this */
9661 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9662 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9667 EMIT_NEW_PCONST (cfg, ins, ptr);
9670 inline_costs += 10 * num_calls++;
9671 /* Can't embed random pointers into AOT code */
9672 cfg->disable_aot = 1;
9675 case CEE_MONO_ICALL_ADDR: {
9676 MonoMethod *cmethod;
9679 CHECK_STACK_OVF (1);
9681 token = read32 (ip + 2);
9683 cmethod = mono_method_get_wrapper_data (method, token);
9685 if (cfg->compile_aot) {
9686 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9688 ptr = mono_lookup_internal_call (cmethod);
9690 EMIT_NEW_PCONST (cfg, ins, ptr);
9696 case CEE_MONO_VTADDR: {
9697 MonoInst *src_var, *src;
9703 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9704 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9709 case CEE_MONO_NEWOBJ: {
9710 MonoInst *iargs [2];
9712 CHECK_STACK_OVF (1);
9714 token = read32 (ip + 2);
9715 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9716 mono_class_init (klass);
9717 NEW_DOMAINCONST (cfg, iargs [0]);
9718 MONO_ADD_INS (cfg->cbb, iargs [0]);
9719 NEW_CLASSCONST (cfg, iargs [1], klass);
9720 MONO_ADD_INS (cfg->cbb, iargs [1]);
9721 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9723 inline_costs += 10 * num_calls++;
9726 case CEE_MONO_OBJADDR:
9729 MONO_INST_NEW (cfg, ins, OP_MOVE);
9730 ins->dreg = alloc_ireg_mp (cfg);
9731 ins->sreg1 = sp [0]->dreg;
9732 ins->type = STACK_MP;
9733 MONO_ADD_INS (cfg->cbb, ins);
9737 case CEE_MONO_LDNATIVEOBJ:
9739 * Similar to LDOBJ, but instead load the unmanaged
9740 * representation of the vtype to the stack.
9745 token = read32 (ip + 2);
9746 klass = mono_method_get_wrapper_data (method, token);
9747 g_assert (klass->valuetype);
9748 mono_class_init (klass);
9751 MonoInst *src, *dest, *temp;
9754 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9755 temp->backend.is_pinvoke = 1;
9756 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9757 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9759 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9760 dest->type = STACK_VTYPE;
9761 dest->klass = klass;
9767 case CEE_MONO_RETOBJ: {
9769 * Same as RET, but return the native representation of a vtype
9772 g_assert (cfg->ret);
9773 g_assert (mono_method_signature (method)->pinvoke);
9778 token = read32 (ip + 2);
9779 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9781 if (!cfg->vret_addr) {
9782 g_assert (cfg->ret_var_is_local);
9784 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9786 EMIT_NEW_RETLOADA (cfg, ins);
9788 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9790 if (sp != stack_start)
9793 MONO_INST_NEW (cfg, ins, OP_BR);
9794 ins->inst_target_bb = end_bblock;
9795 MONO_ADD_INS (bblock, ins);
9796 link_bblock (cfg, bblock, end_bblock);
9797 start_new_bblock = 1;
9801 case CEE_MONO_CISINST:
9802 case CEE_MONO_CCASTCLASS: {
9807 token = read32 (ip + 2);
9808 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9809 if (ip [1] == CEE_MONO_CISINST)
9810 ins = handle_cisinst (cfg, klass, sp [0]);
9812 ins = handle_ccastclass (cfg, klass, sp [0]);
9818 case CEE_MONO_SAVE_LMF:
9819 case CEE_MONO_RESTORE_LMF:
9820 #ifdef MONO_ARCH_HAVE_LMF_OPS
9821 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9822 MONO_ADD_INS (bblock, ins);
9823 cfg->need_lmf_area = TRUE;
9827 case CEE_MONO_CLASSCONST:
9828 CHECK_STACK_OVF (1);
9830 token = read32 (ip + 2);
9831 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9834 inline_costs += 10 * num_calls++;
9836 case CEE_MONO_NOT_TAKEN:
9837 bblock->out_of_line = TRUE;
9841 CHECK_STACK_OVF (1);
9843 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9844 ins->dreg = alloc_preg (cfg);
9845 ins->inst_offset = (gint32)read32 (ip + 2);
9846 ins->type = STACK_PTR;
9847 MONO_ADD_INS (bblock, ins);
9851 case CEE_MONO_DYN_CALL: {
9854 /* It would be easier to call a trampoline, but that would put an
9855 * extra frame on the stack, confusing exception handling. So
9856 * implement it inline using an opcode for now.
9859 if (!cfg->dyn_call_var) {
9860 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9861 /* prevent it from being register allocated */
9862 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9865 /* Has to use a call inst since it local regalloc expects it */
9866 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9867 ins = (MonoInst*)call;
9869 ins->sreg1 = sp [0]->dreg;
9870 ins->sreg2 = sp [1]->dreg;
9871 MONO_ADD_INS (bblock, ins);
9873 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9874 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9878 inline_costs += 10 * num_calls++;
9883 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9893 /* somewhat similar to LDTOKEN */
9894 MonoInst *addr, *vtvar;
9895 CHECK_STACK_OVF (1);
9896 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9898 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9899 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9901 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9902 ins->type = STACK_VTYPE;
9903 ins->klass = mono_defaults.argumenthandle_class;
9916 * The following transforms:
9917 * CEE_CEQ into OP_CEQ
9918 * CEE_CGT into OP_CGT
9919 * CEE_CGT_UN into OP_CGT_UN
9920 * CEE_CLT into OP_CLT
9921 * CEE_CLT_UN into OP_CLT_UN
9923 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9925 MONO_INST_NEW (cfg, ins, cmp->opcode);
9927 cmp->sreg1 = sp [0]->dreg;
9928 cmp->sreg2 = sp [1]->dreg;
9929 type_from_op (cmp, sp [0], sp [1]);
9931 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9932 cmp->opcode = OP_LCOMPARE;
9933 else if (sp [0]->type == STACK_R8)
9934 cmp->opcode = OP_FCOMPARE;
9936 cmp->opcode = OP_ICOMPARE;
9937 MONO_ADD_INS (bblock, cmp);
9938 ins->type = STACK_I4;
9939 ins->dreg = alloc_dreg (cfg, ins->type);
9940 type_from_op (ins, sp [0], sp [1]);
9942 if (cmp->opcode == OP_FCOMPARE) {
9944 * The backends expect the fceq opcodes to do the
9947 cmp->opcode = OP_NOP;
9948 ins->sreg1 = cmp->sreg1;
9949 ins->sreg2 = cmp->sreg2;
9951 MONO_ADD_INS (bblock, ins);
9958 MonoMethod *cil_method;
9959 gboolean needs_static_rgctx_invoke;
9961 CHECK_STACK_OVF (1);
9963 n = read32 (ip + 2);
9964 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9965 if (!cmethod || mono_loader_get_last_error ())
9967 mono_class_init (cmethod->klass);
9969 mono_save_token_info (cfg, image, n, cmethod);
9971 if (cfg->generic_sharing_context)
9972 context_used = mono_method_check_context_used (cmethod);
9974 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9976 cil_method = cmethod;
9977 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9978 METHOD_ACCESS_FAILURE;
9980 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9981 if (check_linkdemand (cfg, method, cmethod))
9983 CHECK_CFG_EXCEPTION;
9984 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9985 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9989 * Optimize the common case of ldftn+delegate creation
9991 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9992 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9993 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9994 MonoInst *target_ins;
9996 int invoke_context_used = 0;
9998 invoke = mono_get_delegate_invoke (ctor_method->klass);
9999 if (!invoke || !mono_method_signature (invoke))
10002 if (cfg->generic_sharing_context)
10003 invoke_context_used = mono_method_check_context_used (invoke);
10005 target_ins = sp [-1];
10007 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10008 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10009 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10011 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10015 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10016 /* FIXME: SGEN support */
10017 if (invoke_context_used == 0) {
10019 if (cfg->verbose_level > 3)
10020 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10022 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10023 CHECK_CFG_EXCEPTION;
10032 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10033 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10037 inline_costs += 10 * num_calls++;
10040 case CEE_LDVIRTFTN: {
10041 MonoInst *args [2];
10045 n = read32 (ip + 2);
10046 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10047 if (!cmethod || mono_loader_get_last_error ())
10049 mono_class_init (cmethod->klass);
10051 if (cfg->generic_sharing_context)
10052 context_used = mono_method_check_context_used (cmethod);
10054 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10055 if (check_linkdemand (cfg, method, cmethod))
10057 CHECK_CFG_EXCEPTION;
10058 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10059 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10065 args [1] = emit_get_rgctx_method (cfg, context_used,
10066 cmethod, MONO_RGCTX_INFO_METHOD);
10069 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10071 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10074 inline_costs += 10 * num_calls++;
10078 CHECK_STACK_OVF (1);
10080 n = read16 (ip + 2);
10082 EMIT_NEW_ARGLOAD (cfg, ins, n);
10087 CHECK_STACK_OVF (1);
10089 n = read16 (ip + 2);
10091 NEW_ARGLOADA (cfg, ins, n);
10092 MONO_ADD_INS (cfg->cbb, ins);
10100 n = read16 (ip + 2);
10102 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10104 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10108 CHECK_STACK_OVF (1);
10110 n = read16 (ip + 2);
10112 EMIT_NEW_LOCLOAD (cfg, ins, n);
10117 unsigned char *tmp_ip;
10118 CHECK_STACK_OVF (1);
10120 n = read16 (ip + 2);
10123 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10129 EMIT_NEW_LOCLOADA (cfg, ins, n);
10138 n = read16 (ip + 2);
10140 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10142 emit_stloc_ir (cfg, sp, header, n);
10149 if (sp != stack_start)
10151 if (cfg->method != method)
10153 * Inlining this into a loop in a parent could lead to
10154 * stack overflows which is different behavior than the
10155 * non-inlined case, thus disable inlining in this case.
10157 goto inline_failure;
10159 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10160 ins->dreg = alloc_preg (cfg);
10161 ins->sreg1 = sp [0]->dreg;
10162 ins->type = STACK_PTR;
10163 MONO_ADD_INS (cfg->cbb, ins);
10165 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10167 ins->flags |= MONO_INST_INIT;
10172 case CEE_ENDFILTER: {
10173 MonoExceptionClause *clause, *nearest;
10174 int cc, nearest_num;
10178 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10180 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10181 ins->sreg1 = (*sp)->dreg;
10182 MONO_ADD_INS (bblock, ins);
10183 start_new_bblock = 1;
10188 for (cc = 0; cc < header->num_clauses; ++cc) {
10189 clause = &header->clauses [cc];
10190 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10191 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10192 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10197 g_assert (nearest);
10198 if ((ip - header->code) != nearest->handler_offset)
10203 case CEE_UNALIGNED_:
10204 ins_flag |= MONO_INST_UNALIGNED;
10205 /* FIXME: record alignment? we can assume 1 for now */
10209 case CEE_VOLATILE_:
10210 ins_flag |= MONO_INST_VOLATILE;
10214 ins_flag |= MONO_INST_TAILCALL;
10215 cfg->flags |= MONO_CFG_HAS_TAIL;
10216 /* Can't inline tail calls at this time */
10217 inline_costs += 100000;
10224 token = read32 (ip + 2);
10225 klass = mini_get_class (method, token, generic_context);
10226 CHECK_TYPELOAD (klass);
10227 if (generic_class_is_reference_type (cfg, klass))
10228 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10230 mini_emit_initobj (cfg, *sp, NULL, klass);
10234 case CEE_CONSTRAINED_:
10236 token = read32 (ip + 2);
10237 if (method->wrapper_type != MONO_WRAPPER_NONE)
10238 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10240 constrained_call = mono_class_get_full (image, token, generic_context);
10241 CHECK_TYPELOAD (constrained_call);
10245 case CEE_INITBLK: {
10246 MonoInst *iargs [3];
10250 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10251 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10252 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10253 /* emit_memset only works when val == 0 */
10254 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10256 iargs [0] = sp [0];
10257 iargs [1] = sp [1];
10258 iargs [2] = sp [2];
10259 if (ip [1] == CEE_CPBLK) {
10260 MonoMethod *memcpy_method = get_memcpy_method ();
10261 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10263 MonoMethod *memset_method = get_memset_method ();
10264 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10274 ins_flag |= MONO_INST_NOTYPECHECK;
10276 ins_flag |= MONO_INST_NORANGECHECK;
10277 /* we ignore the no-nullcheck for now since we
10278 * really do it explicitly only when doing callvirt->call
10282 case CEE_RETHROW: {
10284 int handler_offset = -1;
10286 for (i = 0; i < header->num_clauses; ++i) {
10287 MonoExceptionClause *clause = &header->clauses [i];
10288 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10289 handler_offset = clause->handler_offset;
10294 bblock->flags |= BB_EXCEPTION_UNSAFE;
10296 g_assert (handler_offset != -1);
10298 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10299 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10300 ins->sreg1 = load->dreg;
10301 MONO_ADD_INS (bblock, ins);
10303 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10304 MONO_ADD_INS (bblock, ins);
10307 link_bblock (cfg, bblock, end_bblock);
10308 start_new_bblock = 1;
10316 CHECK_STACK_OVF (1);
10318 token = read32 (ip + 2);
10319 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10320 MonoType *type = mono_type_create_from_typespec (image, token);
10321 token = mono_type_size (type, &ialign);
10323 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10324 CHECK_TYPELOAD (klass);
10325 mono_class_init (klass);
10326 token = mono_class_value_size (klass, &align);
10328 EMIT_NEW_ICONST (cfg, ins, token);
10333 case CEE_REFANYTYPE: {
10334 MonoInst *src_var, *src;
10340 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10342 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10343 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10349 case CEE_READONLY_:
10362 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10372 g_warning ("opcode 0x%02x not handled", *ip);
10376 if (start_new_bblock != 1)
10379 bblock->cil_length = ip - bblock->cil_code;
10380 bblock->next_bb = end_bblock;
10382 if (cfg->method == method && cfg->domainvar) {
10384 MonoInst *get_domain;
10386 cfg->cbb = init_localsbb;
10388 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10389 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10392 get_domain->dreg = alloc_preg (cfg);
10393 MONO_ADD_INS (cfg->cbb, get_domain);
10395 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10396 MONO_ADD_INS (cfg->cbb, store);
10399 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10400 if (cfg->compile_aot)
10401 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10402 mono_get_got_var (cfg);
10405 if (cfg->method == method && cfg->got_var)
10406 mono_emit_load_got_addr (cfg);
10411 cfg->cbb = init_localsbb;
10413 for (i = 0; i < header->num_locals; ++i) {
10414 MonoType *ptype = header->locals [i];
10415 int t = ptype->type;
10416 dreg = cfg->locals [i]->dreg;
10418 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10419 t = mono_class_enum_basetype (ptype->data.klass)->type;
10420 if (ptype->byref) {
10421 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10422 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10423 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10424 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10425 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10426 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10427 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10428 ins->type = STACK_R8;
10429 ins->inst_p0 = (void*)&r8_0;
10430 ins->dreg = alloc_dreg (cfg, STACK_R8);
10431 MONO_ADD_INS (init_localsbb, ins);
10432 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10433 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10434 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10435 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10437 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10442 if (cfg->init_ref_vars && cfg->method == method) {
10443 /* Emit initialization for ref vars */
10444 // FIXME: Avoid duplication initialization for IL locals.
10445 for (i = 0; i < cfg->num_varinfo; ++i) {
10446 MonoInst *ins = cfg->varinfo [i];
10448 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10449 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10453 /* Add a sequence point for method entry/exit events */
10455 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10456 MONO_ADD_INS (init_localsbb, ins);
10457 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10458 MONO_ADD_INS (cfg->bb_exit, ins);
10463 if (cfg->method == method) {
10464 MonoBasicBlock *bb;
10465 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10466 bb->region = mono_find_block_region (cfg, bb->real_offset);
10468 mono_create_spvar_for_region (cfg, bb->region);
10469 if (cfg->verbose_level > 2)
10470 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10474 g_slist_free (class_inits);
10475 dont_inline = g_list_remove (dont_inline, method);
10477 if (inline_costs < 0) {
10480 /* Method is too large */
10481 mname = mono_method_full_name (method, TRUE);
10482 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10483 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10485 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10486 mono_basic_block_free (original_bb);
10490 if ((cfg->verbose_level > 2) && (cfg->method == method))
10491 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10493 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10494 mono_basic_block_free (original_bb);
10495 return inline_costs;
10498 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10505 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10509 set_exception_type_from_invalid_il (cfg, method, ip);
10513 g_slist_free (class_inits);
10514 mono_basic_block_free (original_bb);
10515 dont_inline = g_list_remove (dont_inline, method);
10516 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10521 store_membase_reg_to_store_membase_imm (int opcode)
10524 case OP_STORE_MEMBASE_REG:
10525 return OP_STORE_MEMBASE_IMM;
10526 case OP_STOREI1_MEMBASE_REG:
10527 return OP_STOREI1_MEMBASE_IMM;
10528 case OP_STOREI2_MEMBASE_REG:
10529 return OP_STOREI2_MEMBASE_IMM;
10530 case OP_STOREI4_MEMBASE_REG:
10531 return OP_STOREI4_MEMBASE_IMM;
10532 case OP_STOREI8_MEMBASE_REG:
10533 return OP_STOREI8_MEMBASE_IMM;
10535 g_assert_not_reached ();
10541 #endif /* DISABLE_JIT */
10544 mono_op_to_op_imm (int opcode)
10548 return OP_IADD_IMM;
10550 return OP_ISUB_IMM;
10552 return OP_IDIV_IMM;
10554 return OP_IDIV_UN_IMM;
10556 return OP_IREM_IMM;
10558 return OP_IREM_UN_IMM;
10560 return OP_IMUL_IMM;
10562 return OP_IAND_IMM;
10566 return OP_IXOR_IMM;
10568 return OP_ISHL_IMM;
10570 return OP_ISHR_IMM;
10572 return OP_ISHR_UN_IMM;
10575 return OP_LADD_IMM;
10577 return OP_LSUB_IMM;
10579 return OP_LAND_IMM;
10583 return OP_LXOR_IMM;
10585 return OP_LSHL_IMM;
10587 return OP_LSHR_IMM;
10589 return OP_LSHR_UN_IMM;
10592 return OP_COMPARE_IMM;
10594 return OP_ICOMPARE_IMM;
10596 return OP_LCOMPARE_IMM;
10598 case OP_STORE_MEMBASE_REG:
10599 return OP_STORE_MEMBASE_IMM;
10600 case OP_STOREI1_MEMBASE_REG:
10601 return OP_STOREI1_MEMBASE_IMM;
10602 case OP_STOREI2_MEMBASE_REG:
10603 return OP_STOREI2_MEMBASE_IMM;
10604 case OP_STOREI4_MEMBASE_REG:
10605 return OP_STOREI4_MEMBASE_IMM;
10607 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10609 return OP_X86_PUSH_IMM;
10610 case OP_X86_COMPARE_MEMBASE_REG:
10611 return OP_X86_COMPARE_MEMBASE_IMM;
10613 #if defined(TARGET_AMD64)
10614 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10615 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10617 case OP_VOIDCALL_REG:
10618 return OP_VOIDCALL;
10626 return OP_LOCALLOC_IMM;
10633 ldind_to_load_membase (int opcode)
10637 return OP_LOADI1_MEMBASE;
10639 return OP_LOADU1_MEMBASE;
10641 return OP_LOADI2_MEMBASE;
10643 return OP_LOADU2_MEMBASE;
10645 return OP_LOADI4_MEMBASE;
10647 return OP_LOADU4_MEMBASE;
10649 return OP_LOAD_MEMBASE;
10650 case CEE_LDIND_REF:
10651 return OP_LOAD_MEMBASE;
10653 return OP_LOADI8_MEMBASE;
10655 return OP_LOADR4_MEMBASE;
10657 return OP_LOADR8_MEMBASE;
10659 g_assert_not_reached ();
10666 stind_to_store_membase (int opcode)
10670 return OP_STOREI1_MEMBASE_REG;
10672 return OP_STOREI2_MEMBASE_REG;
10674 return OP_STOREI4_MEMBASE_REG;
10676 case CEE_STIND_REF:
10677 return OP_STORE_MEMBASE_REG;
10679 return OP_STOREI8_MEMBASE_REG;
10681 return OP_STORER4_MEMBASE_REG;
10683 return OP_STORER8_MEMBASE_REG;
10685 g_assert_not_reached ();
10692 mono_load_membase_to_load_mem (int opcode)
10694 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10695 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10697 case OP_LOAD_MEMBASE:
10698 return OP_LOAD_MEM;
10699 case OP_LOADU1_MEMBASE:
10700 return OP_LOADU1_MEM;
10701 case OP_LOADU2_MEMBASE:
10702 return OP_LOADU2_MEM;
10703 case OP_LOADI4_MEMBASE:
10704 return OP_LOADI4_MEM;
10705 case OP_LOADU4_MEMBASE:
10706 return OP_LOADU4_MEM;
10707 #if SIZEOF_REGISTER == 8
10708 case OP_LOADI8_MEMBASE:
10709 return OP_LOADI8_MEM;
10718 op_to_op_dest_membase (int store_opcode, int opcode)
10720 #if defined(TARGET_X86)
10721 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10726 return OP_X86_ADD_MEMBASE_REG;
10728 return OP_X86_SUB_MEMBASE_REG;
10730 return OP_X86_AND_MEMBASE_REG;
10732 return OP_X86_OR_MEMBASE_REG;
10734 return OP_X86_XOR_MEMBASE_REG;
10737 return OP_X86_ADD_MEMBASE_IMM;
10740 return OP_X86_SUB_MEMBASE_IMM;
10743 return OP_X86_AND_MEMBASE_IMM;
10746 return OP_X86_OR_MEMBASE_IMM;
10749 return OP_X86_XOR_MEMBASE_IMM;
10755 #if defined(TARGET_AMD64)
10756 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10761 return OP_X86_ADD_MEMBASE_REG;
10763 return OP_X86_SUB_MEMBASE_REG;
10765 return OP_X86_AND_MEMBASE_REG;
10767 return OP_X86_OR_MEMBASE_REG;
10769 return OP_X86_XOR_MEMBASE_REG;
10771 return OP_X86_ADD_MEMBASE_IMM;
10773 return OP_X86_SUB_MEMBASE_IMM;
10775 return OP_X86_AND_MEMBASE_IMM;
10777 return OP_X86_OR_MEMBASE_IMM;
10779 return OP_X86_XOR_MEMBASE_IMM;
10781 return OP_AMD64_ADD_MEMBASE_REG;
10783 return OP_AMD64_SUB_MEMBASE_REG;
10785 return OP_AMD64_AND_MEMBASE_REG;
10787 return OP_AMD64_OR_MEMBASE_REG;
10789 return OP_AMD64_XOR_MEMBASE_REG;
10792 return OP_AMD64_ADD_MEMBASE_IMM;
10795 return OP_AMD64_SUB_MEMBASE_IMM;
10798 return OP_AMD64_AND_MEMBASE_IMM;
10801 return OP_AMD64_OR_MEMBASE_IMM;
10804 return OP_AMD64_XOR_MEMBASE_IMM;
10814 op_to_op_store_membase (int store_opcode, int opcode)
10816 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10819 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10820 return OP_X86_SETEQ_MEMBASE;
10822 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10823 return OP_X86_SETNE_MEMBASE;
10831 op_to_op_src1_membase (int load_opcode, int opcode)
10834 /* FIXME: This has sign extension issues */
10836 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10837 return OP_X86_COMPARE_MEMBASE8_IMM;
10840 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10845 return OP_X86_PUSH_MEMBASE;
10846 case OP_COMPARE_IMM:
10847 case OP_ICOMPARE_IMM:
10848 return OP_X86_COMPARE_MEMBASE_IMM;
10851 return OP_X86_COMPARE_MEMBASE_REG;
10855 #ifdef TARGET_AMD64
10856 /* FIXME: This has sign extension issues */
10858 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10859 return OP_X86_COMPARE_MEMBASE8_IMM;
10864 #ifdef __mono_ilp32__
10865 if (load_opcode == OP_LOADI8_MEMBASE)
10867 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10869 return OP_X86_PUSH_MEMBASE;
10871 /* FIXME: This only works for 32 bit immediates
10872 case OP_COMPARE_IMM:
10873 case OP_LCOMPARE_IMM:
10874 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10875 return OP_AMD64_COMPARE_MEMBASE_IMM;
10877 case OP_ICOMPARE_IMM:
10878 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10879 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10883 #ifdef __mono_ilp32__
10884 if (load_opcode == OP_LOAD_MEMBASE)
10885 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10886 if (load_opcode == OP_LOADI8_MEMBASE)
10888 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10890 return OP_AMD64_COMPARE_MEMBASE_REG;
10893 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10894 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10903 op_to_op_src2_membase (int load_opcode, int opcode)
10906 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10912 return OP_X86_COMPARE_REG_MEMBASE;
10914 return OP_X86_ADD_REG_MEMBASE;
10916 return OP_X86_SUB_REG_MEMBASE;
10918 return OP_X86_AND_REG_MEMBASE;
10920 return OP_X86_OR_REG_MEMBASE;
10922 return OP_X86_XOR_REG_MEMBASE;
10926 #ifdef TARGET_AMD64
10927 #ifdef __mono_ilp32__
10928 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
10930 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10934 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10936 return OP_X86_ADD_REG_MEMBASE;
10938 return OP_X86_SUB_REG_MEMBASE;
10940 return OP_X86_AND_REG_MEMBASE;
10942 return OP_X86_OR_REG_MEMBASE;
10944 return OP_X86_XOR_REG_MEMBASE;
10946 #ifdef __mono_ilp32__
10947 } else if (load_opcode == OP_LOADI8_MEMBASE) {
10949 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10954 return OP_AMD64_COMPARE_REG_MEMBASE;
10956 return OP_AMD64_ADD_REG_MEMBASE;
10958 return OP_AMD64_SUB_REG_MEMBASE;
10960 return OP_AMD64_AND_REG_MEMBASE;
10962 return OP_AMD64_OR_REG_MEMBASE;
10964 return OP_AMD64_XOR_REG_MEMBASE;
10973 mono_op_to_op_imm_noemul (int opcode)
10976 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10982 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10990 return mono_op_to_op_imm (opcode);
10994 #ifndef DISABLE_JIT
10997 * mono_handle_global_vregs:
10999 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11003 mono_handle_global_vregs (MonoCompile *cfg)
11005 gint32 *vreg_to_bb;
11006 MonoBasicBlock *bb;
11009 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11011 #ifdef MONO_ARCH_SIMD_INTRINSICS
11012 if (cfg->uses_simd_intrinsics)
11013 mono_simd_simplify_indirection (cfg);
11016 /* Find local vregs used in more than one bb */
11017 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11018 MonoInst *ins = bb->code;
11019 int block_num = bb->block_num;
11021 if (cfg->verbose_level > 2)
11022 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11025 for (; ins; ins = ins->next) {
11026 const char *spec = INS_INFO (ins->opcode);
11027 int regtype = 0, regindex;
11030 if (G_UNLIKELY (cfg->verbose_level > 2))
11031 mono_print_ins (ins);
11033 g_assert (ins->opcode >= MONO_CEE_LAST);
11035 for (regindex = 0; regindex < 4; regindex ++) {
11038 if (regindex == 0) {
11039 regtype = spec [MONO_INST_DEST];
11040 if (regtype == ' ')
11043 } else if (regindex == 1) {
11044 regtype = spec [MONO_INST_SRC1];
11045 if (regtype == ' ')
11048 } else if (regindex == 2) {
11049 regtype = spec [MONO_INST_SRC2];
11050 if (regtype == ' ')
11053 } else if (regindex == 3) {
11054 regtype = spec [MONO_INST_SRC3];
11055 if (regtype == ' ')
11060 #if SIZEOF_REGISTER == 4
11061 /* In the LLVM case, the long opcodes are not decomposed */
11062 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11064 * Since some instructions reference the original long vreg,
11065 * and some reference the two component vregs, it is quite hard
11066 * to determine when it needs to be global. So be conservative.
11068 if (!get_vreg_to_inst (cfg, vreg)) {
11069 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11071 if (cfg->verbose_level > 2)
11072 printf ("LONG VREG R%d made global.\n", vreg);
11076 * Make the component vregs volatile since the optimizations can
11077 * get confused otherwise.
11079 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11080 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11084 g_assert (vreg != -1);
11086 prev_bb = vreg_to_bb [vreg];
11087 if (prev_bb == 0) {
11088 /* 0 is a valid block num */
11089 vreg_to_bb [vreg] = block_num + 1;
11090 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11091 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11094 if (!get_vreg_to_inst (cfg, vreg)) {
11095 if (G_UNLIKELY (cfg->verbose_level > 2))
11096 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11100 if (vreg_is_ref (cfg, vreg))
11101 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11103 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11106 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11109 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11112 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11115 g_assert_not_reached ();
11119 /* Flag as having been used in more than one bb */
11120 vreg_to_bb [vreg] = -1;
11126 /* If a variable is used in only one bblock, convert it into a local vreg */
11127 for (i = 0; i < cfg->num_varinfo; i++) {
11128 MonoInst *var = cfg->varinfo [i];
11129 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11131 switch (var->type) {
11137 #if SIZEOF_REGISTER == 8
11140 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11141 /* Enabling this screws up the fp stack on x86 */
11144 /* Arguments are implicitly global */
11145 /* Putting R4 vars into registers doesn't work currently */
11146 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11148 * Make that the variable's liveness interval doesn't contain a call, since
11149 * that would cause the lvreg to be spilled, making the whole optimization
11152 /* This is too slow for JIT compilation */
11154 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11156 int def_index, call_index, ins_index;
11157 gboolean spilled = FALSE;
11162 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11163 const char *spec = INS_INFO (ins->opcode);
11165 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11166 def_index = ins_index;
11168 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11169 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11170 if (call_index > def_index) {
11176 if (MONO_IS_CALL (ins))
11177 call_index = ins_index;
11187 if (G_UNLIKELY (cfg->verbose_level > 2))
11188 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11189 var->flags |= MONO_INST_IS_DEAD;
11190 cfg->vreg_to_inst [var->dreg] = NULL;
11197 * Compress the varinfo and vars tables so the liveness computation is faster and
11198 * takes up less space.
11201 for (i = 0; i < cfg->num_varinfo; ++i) {
11202 MonoInst *var = cfg->varinfo [i];
11203 if (pos < i && cfg->locals_start == i)
11204 cfg->locals_start = pos;
11205 if (!(var->flags & MONO_INST_IS_DEAD)) {
11207 cfg->varinfo [pos] = cfg->varinfo [i];
11208 cfg->varinfo [pos]->inst_c0 = pos;
11209 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11210 cfg->vars [pos].idx = pos;
11211 #if SIZEOF_REGISTER == 4
11212 if (cfg->varinfo [pos]->type == STACK_I8) {
11213 /* Modify the two component vars too */
11216 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11217 var1->inst_c0 = pos;
11218 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11219 var1->inst_c0 = pos;
11226 cfg->num_varinfo = pos;
11227 if (cfg->locals_start > cfg->num_varinfo)
11228 cfg->locals_start = cfg->num_varinfo;
11232 * mono_spill_global_vars:
11234 * Generate spill code for variables which are not allocated to registers,
11235 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11236 * code is generated which could be optimized by the local optimization passes.
11239 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11241 MonoBasicBlock *bb;
11243 int orig_next_vreg;
11244 guint32 *vreg_to_lvreg;
11246 guint32 i, lvregs_len;
11247 gboolean dest_has_lvreg = FALSE;
11248 guint32 stacktypes [128];
11249 MonoInst **live_range_start, **live_range_end;
11250 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11252 *need_local_opts = FALSE;
11254 memset (spec2, 0, sizeof (spec2));
11256 /* FIXME: Move this function to mini.c */
11257 stacktypes ['i'] = STACK_PTR;
11258 stacktypes ['l'] = STACK_I8;
11259 stacktypes ['f'] = STACK_R8;
11260 #ifdef MONO_ARCH_SIMD_INTRINSICS
11261 stacktypes ['x'] = STACK_VTYPE;
11264 #if SIZEOF_REGISTER == 4
11265 /* Create MonoInsts for longs */
11266 for (i = 0; i < cfg->num_varinfo; i++) {
11267 MonoInst *ins = cfg->varinfo [i];
11269 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11270 switch (ins->type) {
11275 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11278 g_assert (ins->opcode == OP_REGOFFSET);
11280 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11282 tree->opcode = OP_REGOFFSET;
11283 tree->inst_basereg = ins->inst_basereg;
11284 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11286 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11288 tree->opcode = OP_REGOFFSET;
11289 tree->inst_basereg = ins->inst_basereg;
11290 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11300 if (cfg->compute_gc_maps) {
11301 /* registers need liveness info even for !non refs */
11302 for (i = 0; i < cfg->num_varinfo; i++) {
11303 MonoInst *ins = cfg->varinfo [i];
11305 if (ins->opcode == OP_REGVAR)
11306 ins->flags |= MONO_INST_GC_TRACK;
11310 /* FIXME: widening and truncation */
11313 * As an optimization, when a variable allocated to the stack is first loaded into
11314 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11315 * the variable again.
11317 orig_next_vreg = cfg->next_vreg;
11318 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11319 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11323 * These arrays contain the first and last instructions accessing a given
11325 * Since we emit bblocks in the same order we process them here, and we
11326 * don't split live ranges, these will precisely describe the live range of
11327 * the variable, i.e. the instruction range where a valid value can be found
11328 * in the variables location.
11329 * The live range is computed using the liveness info computed by the liveness pass.
11330 * We can't use vmv->range, since that is an abstract live range, and we need
11331 * one which is instruction precise.
11332 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11334 /* FIXME: Only do this if debugging info is requested */
11335 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11336 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11337 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11338 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11340 /* Add spill loads/stores */
11341 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11344 if (cfg->verbose_level > 2)
11345 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11347 /* Clear vreg_to_lvreg array */
11348 for (i = 0; i < lvregs_len; i++)
11349 vreg_to_lvreg [lvregs [i]] = 0;
11353 MONO_BB_FOR_EACH_INS (bb, ins) {
11354 const char *spec = INS_INFO (ins->opcode);
11355 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11356 gboolean store, no_lvreg;
11357 int sregs [MONO_MAX_SRC_REGS];
11359 if (G_UNLIKELY (cfg->verbose_level > 2))
11360 mono_print_ins (ins);
11362 if (ins->opcode == OP_NOP)
11366 * We handle LDADDR here as well, since it can only be decomposed
11367 * when variable addresses are known.
11369 if (ins->opcode == OP_LDADDR) {
11370 MonoInst *var = ins->inst_p0;
11372 if (var->opcode == OP_VTARG_ADDR) {
11373 /* Happens on SPARC/S390 where vtypes are passed by reference */
11374 MonoInst *vtaddr = var->inst_left;
11375 if (vtaddr->opcode == OP_REGVAR) {
11376 ins->opcode = OP_MOVE;
11377 ins->sreg1 = vtaddr->dreg;
11379 else if (var->inst_left->opcode == OP_REGOFFSET) {
11380 ins->opcode = OP_LOAD_MEMBASE;
11381 ins->inst_basereg = vtaddr->inst_basereg;
11382 ins->inst_offset = vtaddr->inst_offset;
11386 g_assert (var->opcode == OP_REGOFFSET);
11388 ins->opcode = OP_ADD_IMM;
11389 ins->sreg1 = var->inst_basereg;
11390 ins->inst_imm = var->inst_offset;
11393 *need_local_opts = TRUE;
11394 spec = INS_INFO (ins->opcode);
11397 if (ins->opcode < MONO_CEE_LAST) {
11398 mono_print_ins (ins);
11399 g_assert_not_reached ();
11403 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11407 if (MONO_IS_STORE_MEMBASE (ins)) {
11408 tmp_reg = ins->dreg;
11409 ins->dreg = ins->sreg2;
11410 ins->sreg2 = tmp_reg;
11413 spec2 [MONO_INST_DEST] = ' ';
11414 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11415 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11416 spec2 [MONO_INST_SRC3] = ' ';
11418 } else if (MONO_IS_STORE_MEMINDEX (ins))
11419 g_assert_not_reached ();
11424 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11425 printf ("\t %.3s %d", spec, ins->dreg);
11426 num_sregs = mono_inst_get_src_registers (ins, sregs);
11427 for (srcindex = 0; srcindex < 3; ++srcindex)
11428 printf (" %d", sregs [srcindex]);
11435 regtype = spec [MONO_INST_DEST];
11436 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11439 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11440 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11441 MonoInst *store_ins;
11443 MonoInst *def_ins = ins;
11444 int dreg = ins->dreg; /* The original vreg */
11446 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11448 if (var->opcode == OP_REGVAR) {
11449 ins->dreg = var->dreg;
11450 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11452 * Instead of emitting a load+store, use a _membase opcode.
11454 g_assert (var->opcode == OP_REGOFFSET);
11455 if (ins->opcode == OP_MOVE) {
11459 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11460 ins->inst_basereg = var->inst_basereg;
11461 ins->inst_offset = var->inst_offset;
11464 spec = INS_INFO (ins->opcode);
11468 g_assert (var->opcode == OP_REGOFFSET);
11470 prev_dreg = ins->dreg;
11472 /* Invalidate any previous lvreg for this vreg */
11473 vreg_to_lvreg [ins->dreg] = 0;
11477 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11479 store_opcode = OP_STOREI8_MEMBASE_REG;
11482 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11484 if (regtype == 'l') {
11485 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11486 mono_bblock_insert_after_ins (bb, ins, store_ins);
11487 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11488 mono_bblock_insert_after_ins (bb, ins, store_ins);
11489 def_ins = store_ins;
11492 g_assert (store_opcode != OP_STOREV_MEMBASE);
11494 /* Try to fuse the store into the instruction itself */
11495 /* FIXME: Add more instructions */
11496 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11497 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11498 ins->inst_imm = ins->inst_c0;
11499 ins->inst_destbasereg = var->inst_basereg;
11500 ins->inst_offset = var->inst_offset;
11501 spec = INS_INFO (ins->opcode);
11502 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11503 ins->opcode = store_opcode;
11504 ins->inst_destbasereg = var->inst_basereg;
11505 ins->inst_offset = var->inst_offset;
11509 tmp_reg = ins->dreg;
11510 ins->dreg = ins->sreg2;
11511 ins->sreg2 = tmp_reg;
11514 spec2 [MONO_INST_DEST] = ' ';
11515 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11516 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11517 spec2 [MONO_INST_SRC3] = ' ';
11519 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11520 // FIXME: The backends expect the base reg to be in inst_basereg
11521 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11523 ins->inst_basereg = var->inst_basereg;
11524 ins->inst_offset = var->inst_offset;
11525 spec = INS_INFO (ins->opcode);
11527 /* printf ("INS: "); mono_print_ins (ins); */
11528 /* Create a store instruction */
11529 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11531 /* Insert it after the instruction */
11532 mono_bblock_insert_after_ins (bb, ins, store_ins);
11534 def_ins = store_ins;
11537 * We can't assign ins->dreg to var->dreg here, since the
11538 * sregs could use it. So set a flag, and do it after
11541 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11542 dest_has_lvreg = TRUE;
11547 if (def_ins && !live_range_start [dreg]) {
11548 live_range_start [dreg] = def_ins;
11549 live_range_start_bb [dreg] = bb;
11552 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11555 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11556 tmp->inst_c1 = dreg;
11557 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11564 num_sregs = mono_inst_get_src_registers (ins, sregs);
11565 for (srcindex = 0; srcindex < 3; ++srcindex) {
11566 regtype = spec [MONO_INST_SRC1 + srcindex];
11567 sreg = sregs [srcindex];
11569 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11570 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11571 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11572 MonoInst *use_ins = ins;
11573 MonoInst *load_ins;
11574 guint32 load_opcode;
11576 if (var->opcode == OP_REGVAR) {
11577 sregs [srcindex] = var->dreg;
11578 //mono_inst_set_src_registers (ins, sregs);
11579 live_range_end [sreg] = use_ins;
11580 live_range_end_bb [sreg] = bb;
11582 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11585 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11586 /* var->dreg is a hreg */
11587 tmp->inst_c1 = sreg;
11588 mono_bblock_insert_after_ins (bb, ins, tmp);
11594 g_assert (var->opcode == OP_REGOFFSET);
11596 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11598 g_assert (load_opcode != OP_LOADV_MEMBASE);
11600 if (vreg_to_lvreg [sreg]) {
11601 g_assert (vreg_to_lvreg [sreg] != -1);
11603 /* The variable is already loaded to an lvreg */
11604 if (G_UNLIKELY (cfg->verbose_level > 2))
11605 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11606 sregs [srcindex] = vreg_to_lvreg [sreg];
11607 //mono_inst_set_src_registers (ins, sregs);
11611 /* Try to fuse the load into the instruction */
11612 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11613 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11614 sregs [0] = var->inst_basereg;
11615 //mono_inst_set_src_registers (ins, sregs);
11616 ins->inst_offset = var->inst_offset;
11617 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11618 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11619 sregs [1] = var->inst_basereg;
11620 //mono_inst_set_src_registers (ins, sregs);
11621 ins->inst_offset = var->inst_offset;
11623 if (MONO_IS_REAL_MOVE (ins)) {
11624 ins->opcode = OP_NOP;
11627 //printf ("%d ", srcindex); mono_print_ins (ins);
11629 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11631 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11632 if (var->dreg == prev_dreg) {
11634 * sreg refers to the value loaded by the load
11635 * emitted below, but we need to use ins->dreg
11636 * since it refers to the store emitted earlier.
11640 g_assert (sreg != -1);
11641 vreg_to_lvreg [var->dreg] = sreg;
11642 g_assert (lvregs_len < 1024);
11643 lvregs [lvregs_len ++] = var->dreg;
11647 sregs [srcindex] = sreg;
11648 //mono_inst_set_src_registers (ins, sregs);
11650 if (regtype == 'l') {
11651 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11652 mono_bblock_insert_before_ins (bb, ins, load_ins);
11653 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11654 mono_bblock_insert_before_ins (bb, ins, load_ins);
11655 use_ins = load_ins;
11658 #if SIZEOF_REGISTER == 4
11659 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11661 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11662 mono_bblock_insert_before_ins (bb, ins, load_ins);
11663 use_ins = load_ins;
11667 if (var->dreg < orig_next_vreg) {
11668 live_range_end [var->dreg] = use_ins;
11669 live_range_end_bb [var->dreg] = bb;
11672 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11675 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11676 tmp->inst_c1 = var->dreg;
11677 mono_bblock_insert_after_ins (bb, ins, tmp);
11681 mono_inst_set_src_registers (ins, sregs);
11683 if (dest_has_lvreg) {
11684 g_assert (ins->dreg != -1);
11685 vreg_to_lvreg [prev_dreg] = ins->dreg;
11686 g_assert (lvregs_len < 1024);
11687 lvregs [lvregs_len ++] = prev_dreg;
11688 dest_has_lvreg = FALSE;
11692 tmp_reg = ins->dreg;
11693 ins->dreg = ins->sreg2;
11694 ins->sreg2 = tmp_reg;
11697 if (MONO_IS_CALL (ins)) {
11698 /* Clear vreg_to_lvreg array */
11699 for (i = 0; i < lvregs_len; i++)
11700 vreg_to_lvreg [lvregs [i]] = 0;
11702 } else if (ins->opcode == OP_NOP) {
11704 MONO_INST_NULLIFY_SREGS (ins);
11707 if (cfg->verbose_level > 2)
11708 mono_print_ins_index (1, ins);
11711 /* Extend the live range based on the liveness info */
11712 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11713 for (i = 0; i < cfg->num_varinfo; i ++) {
11714 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11716 if (vreg_is_volatile (cfg, vi->vreg))
11717 /* The liveness info is incomplete */
11720 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11721 /* Live from at least the first ins of this bb */
11722 live_range_start [vi->vreg] = bb->code;
11723 live_range_start_bb [vi->vreg] = bb;
11726 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11727 /* Live at least until the last ins of this bb */
11728 live_range_end [vi->vreg] = bb->last_ins;
11729 live_range_end_bb [vi->vreg] = bb;
11735 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11737 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11738 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11740 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11741 for (i = 0; i < cfg->num_varinfo; ++i) {
11742 int vreg = MONO_VARINFO (cfg, i)->vreg;
11745 if (live_range_start [vreg]) {
11746 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11748 ins->inst_c1 = vreg;
11749 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11751 if (live_range_end [vreg]) {
11752 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11754 ins->inst_c1 = vreg;
11755 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11756 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11758 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11764 g_free (live_range_start);
11765 g_free (live_range_end);
11766 g_free (live_range_start_bb);
11767 g_free (live_range_end_bb);
11772 * - use 'iadd' instead of 'int_add'
11773 * - handling ovf opcodes: decompose in method_to_ir.
11774 * - unify iregs/fregs
11775 * -> partly done, the missing parts are:
11776 * - a more complete unification would involve unifying the hregs as well, so
11777 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11778 * would no longer map to the machine hregs, so the code generators would need to
11779 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11780 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11781 * fp/non-fp branches speeds it up by about 15%.
11782 * - use sext/zext opcodes instead of shifts
11784 * - get rid of TEMPLOADs if possible and use vregs instead
11785 * - clean up usage of OP_P/OP_ opcodes
11786 * - cleanup usage of DUMMY_USE
11787 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11789 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11790 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11791 * - make sure handle_stack_args () is called before the branch is emitted
11792 * - when the new IR is done, get rid of all unused stuff
11793 * - COMPARE/BEQ as separate instructions or unify them ?
11794 * - keeping them separate allows specialized compare instructions like
11795 * compare_imm, compare_membase
11796 * - most back ends unify fp compare+branch, fp compare+ceq
11797 * - integrate mono_save_args into inline_method
11798 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11799 * - handle long shift opts on 32 bit platforms somehow: they require
11800 * 3 sregs (2 for arg1 and 1 for arg2)
11801 * - make byref a 'normal' type.
11802 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11803 * variable if needed.
11804 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11805 * like inline_method.
11806 * - remove inlining restrictions
11807 * - fix LNEG and enable cfold of INEG
11808 * - generalize x86 optimizations like ldelema as a peephole optimization
11809 * - add store_mem_imm for amd64
11810 * - optimize the loading of the interruption flag in the managed->native wrappers
11811 * - avoid special handling of OP_NOP in passes
11812 * - move code inserting instructions into one function/macro.
11813 * - try a coalescing phase after liveness analysis
11814 * - add float -> vreg conversion + local optimizations on !x86
11815 * - figure out how to handle decomposed branches during optimizations, ie.
11816 * compare+branch, op_jump_table+op_br etc.
11817 * - promote RuntimeXHandles to vregs
11818 * - vtype cleanups:
11819 * - add a NEW_VARLOADA_VREG macro
11820 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11821 * accessing vtype fields.
11822 * - get rid of I8CONST on 64 bit platforms
11823 * - dealing with the increase in code size due to branches created during opcode
11825 * - use extended basic blocks
11826 * - all parts of the JIT
11827 * - handle_global_vregs () && local regalloc
11828 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11829 * - sources of increase in code size:
11832 * - isinst and castclass
11833 * - lvregs not allocated to global registers even if used multiple times
11834 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11836 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11837 * - add all micro optimizations from the old JIT
11838 * - put tree optimizations into the deadce pass
11839 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11840 * specific function.
11841 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11842 * fcompare + branchCC.
11843 * - create a helper function for allocating a stack slot, taking into account
11844 * MONO_CFG_HAS_SPILLUP.
11846 * - merge the ia64 switch changes.
11847 * - optimize mono_regstate2_alloc_int/float.
11848 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11849 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11850 * parts of the tree could be separated by other instructions, killing the tree
11851 * arguments, or stores killing loads etc. Also, should we fold loads into other
11852 * instructions if the result of the load is used multiple times ?
11853 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11854 * - LAST MERGE: 108395.
11855 * - when returning vtypes in registers, generate IR and append it to the end of the
11856 * last bb instead of doing it in the epilog.
11857 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11865 - When to decompose opcodes:
11866 - earlier: this makes some optimizations hard to implement, since the low level IR
11867 no longer contains the neccessary information. But it is easier to do.
11868 - later: harder to implement, enables more optimizations.
11869 - Branches inside bblocks:
11870 - created when decomposing complex opcodes.
11871 - branches to another bblock: harmless, but not tracked by the branch
11872 optimizations, so need to branch to a label at the start of the bblock.
11873 - branches to inside the same bblock: very problematic, trips up the local
11874 reg allocator. Can be fixed by spitting the current bblock, but that is a
11875 complex operation, since some local vregs can become global vregs etc.
11876 - Local/global vregs:
11877 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11878 local register allocator.
11879 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11880 structure, created by mono_create_var (). Assigned to hregs or the stack by
11881 the global register allocator.
11882 - When to do optimizations like alu->alu_imm:
11883 - earlier -> saves work later on since the IR will be smaller/simpler
11884 - later -> can work on more instructions
11885 - Handling of valuetypes:
11886 - When a vtype is pushed on the stack, a new temporary is created, an
11887 instruction computing its address (LDADDR) is emitted and pushed on
11888 the stack. Need to optimize cases when the vtype is used immediately as in
11889 argument passing, stloc etc.
11890 - Instead of the to_end stuff in the old JIT, simply call the function handling
11891 the values on the stack before emitting the last instruction of the bb.
11894 #endif /* DISABLE_JIT */