2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
241 return alloc_ireg (cfg);
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
251 switch (type->type) {
254 case MONO_TYPE_BOOLEAN:
266 case MONO_TYPE_FNPTR:
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
276 #if SIZEOF_REGISTER == 8
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
293 case MONO_TYPE_TYPEDBYREF:
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
300 g_assert (cfg->generic_sharing_context);
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
408 #define ADD_BINCOND(next_block) do { \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
453 if (from->cil_code) {
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
462 printf ("edge from entry to exit\n");
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
618 inst->klass = klass = mono_class_from_mono_type (type);
620 inst->type = STACK_MP;
625 switch (type->type) {
627 inst->type = STACK_INV;
631 case MONO_TYPE_BOOLEAN:
637 inst->type = STACK_I4;
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
654 inst->type = STACK_I8;
658 inst->type = STACK_R8;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
666 inst->type = STACK_VTYPE;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
840 ins->opcode = OP_ICOMPARE;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
857 ins->opcode += beqops_op_map [src1->type];
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
890 ins->type = STACK_R8;
891 switch (src1->type) {
894 ins->opcode = OP_ICONV_TO_R_UN;
897 ins->opcode = OP_LCONV_TO_R_UN;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 switch (src1->type) {
928 ins->opcode = OP_ICONV_TO_U;
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
935 ins->opcode = OP_MOVE;
939 ins->opcode = OP_LCONV_TO_U;
942 ins->opcode = OP_FCONV_TO_U;
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
967 ins->type = STACK_R8;
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1026 param_table [STACK_MAX] [STACK_MAX] = {
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1035 switch (args->type) {
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1050 if (!sig->params [i]->byref)
1054 if (sig->params [i]->byref)
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1068 if (sig->params [i]->byref)
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1156 case MONO_TYPE_BOOLEAN:
1159 case MONO_TYPE_CHAR:
1166 case MONO_TYPE_FNPTR:
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1190 g_assert_not_reached ();
1197 array_access_to_klass (int opcode)
1201 return mono_defaults.byte_class;
1203 return mono_defaults.uint16_class;
1206 return mono_defaults.int_class;
1209 return mono_defaults.sbyte_class;
1212 return mono_defaults.int16_class;
1215 return mono_defaults.int32_class;
1217 return mono_defaults.uint32_class;
1220 return mono_defaults.int64_class;
1223 return mono_defaults.single_class;
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1231 g_assert_not_reached ();
1237 * We try to share variables when possible
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1322 bb->out_stack = outb->in_stack;
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1385 /* Find a bblock which has a different in_stack */
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1700 g_assert (val == 0);
1705 if ((size <= 4) && (size <= align)) {
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1716 #if SIZEOF_REGISTER == 8
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1732 /* This could be optimized further if neccesary */
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1785 /* This could be optimized further if neccesary */
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1848 case MONO_TYPE_BOOLEAN:
1851 case MONO_TYPE_CHAR:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1898 * Returns: non-0 value if arg can't be stored on a target.
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1921 case MONO_TYPE_BOOLEAN:
1924 case MONO_TYPE_CHAR:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1937 case MONO_TYPE_FNPTR:
1938 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 case MONO_TYPE_CLASS:
1942 case MONO_TYPE_STRING:
1943 case MONO_TYPE_OBJECT:
1944 case MONO_TYPE_SZARRAY:
1945 case MONO_TYPE_ARRAY:
1946 if (arg->type != STACK_OBJ)
1948 /* FIXME: check type compatibility */
1952 if (arg->type != STACK_I8)
1957 if (arg->type != STACK_R8)
1960 case MONO_TYPE_VALUETYPE:
1961 if (arg->type != STACK_VTYPE)
1963 klass = mono_class_from_mono_type (simple_type);
1964 if (klass != arg->klass)
1967 case MONO_TYPE_TYPEDBYREF:
1968 if (arg->type != STACK_VTYPE)
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1974 case MONO_TYPE_GENERICINST:
1975 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1976 if (arg->type != STACK_VTYPE)
1978 klass = mono_class_from_mono_type (simple_type);
1979 if (klass != arg->klass)
1983 if (arg->type != STACK_OBJ)
1985 /* FIXME: check type compatibility */
1989 case MONO_TYPE_MVAR:
1990 /* FIXME: all the arguments must be references for now,
1991 * later look inside cfg and see if the arg num is
1992 * really a reference
1994 g_assert (cfg->generic_sharing_context);
1995 if (arg->type != STACK_OBJ)
1999 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2005 * Prepare arguments for passing to a function call.
2006 * Return a non-zero value if the arguments can't be passed to the given
2008 * The type checks are not yet complete and some conversions may need
2009 * casts on 32 or 64 bit architectures.
2011 * FIXME: implement this using target_type_is_incompatible ()
2014 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2016 MonoType *simple_type;
2020 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2024 for (i = 0; i < sig->param_count; ++i) {
2025 if (sig->params [i]->byref) {
2026 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2030 simple_type = sig->params [i];
2031 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2033 switch (simple_type->type) {
2034 case MONO_TYPE_VOID:
2039 case MONO_TYPE_BOOLEAN:
2042 case MONO_TYPE_CHAR:
2045 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2051 case MONO_TYPE_FNPTR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (args [i]->type != STACK_OBJ)
2065 if (args [i]->type != STACK_I8)
2070 if (args [i]->type != STACK_R8)
2073 case MONO_TYPE_VALUETYPE:
2074 if (simple_type->data.klass->enumtype) {
2075 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2078 if (args [i]->type != STACK_VTYPE)
2081 case MONO_TYPE_TYPEDBYREF:
2082 if (args [i]->type != STACK_VTYPE)
2085 case MONO_TYPE_GENERICINST:
2086 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2090 g_error ("unknown type 0x%02x in check_call_signature",
2098 callvirt_to_call (int opcode)
2103 case OP_VOIDCALLVIRT:
2112 g_assert_not_reached ();
2119 callvirt_to_call_membase (int opcode)
2123 return OP_CALL_MEMBASE;
2124 case OP_VOIDCALLVIRT:
2125 return OP_VOIDCALL_MEMBASE;
2127 return OP_FCALL_MEMBASE;
2129 return OP_LCALL_MEMBASE;
2131 return OP_VCALL_MEMBASE;
2133 g_assert_not_reached ();
2139 #ifdef MONO_ARCH_HAVE_IMT
2141 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2145 if (COMPILE_LLVM (cfg)) {
2146 method_reg = alloc_preg (cfg);
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2161 call->imt_arg_reg = method_reg;
2163 #ifdef MONO_ARCH_IMT_REG
2164 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2166 /* Need this to keep the IMT arg alive */
2167 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2172 #ifdef MONO_ARCH_IMT_REG
2173 method_reg = alloc_preg (cfg);
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2177 } else if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2181 MONO_INST_NEW (cfg, ins, OP_PCONST);
2182 ins->inst_p0 = call->method;
2183 ins->dreg = method_reg;
2184 MONO_ADD_INS (cfg->cbb, ins);
2187 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2189 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2194 static MonoJumpInfo *
2195 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2197 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2201 ji->data.target = target;
2206 inline static MonoCallInst *
2207 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2208 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2211 #ifdef MONO_ARCH_SOFT_FLOAT
2216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2218 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2221 call->signature = sig;
2222 call->rgctx_reg = rgctx;
2224 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 call->vret_var = cfg->vret_addr;
2229 //g_assert_not_reached ();
2231 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2235 temp->backend.is_pinvoke = sig->pinvoke;
2238 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2239 * address of return value to increase optimization opportunities.
2240 * Before vtype decomposition, the dreg of the call ins itself represents the
2241 * fact the call modifies the return value. After decomposition, the call will
2242 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2243 * will be transformed into an LDADDR.
2245 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2246 loada->dreg = alloc_preg (cfg);
2247 loada->inst_p0 = temp;
2248 /* We reference the call too since call->dreg could change during optimization */
2249 loada->inst_p1 = call;
2250 MONO_ADD_INS (cfg->cbb, loada);
2252 call->inst.dreg = temp->dreg;
2254 call->vret_var = loada;
2255 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2256 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2258 #ifdef MONO_ARCH_SOFT_FLOAT
2259 if (COMPILE_SOFT_FLOAT (cfg)) {
2261 * If the call has a float argument, we would need to do an r8->r4 conversion using
2262 * an icall, but that cannot be done during the call sequence since it would clobber
2263 * the call registers + the stack. So we do it before emitting the call.
2265 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2267 MonoInst *in = call->args [i];
2269 if (i >= sig->hasthis)
2270 t = sig->params [i - sig->hasthis];
2272 t = &mono_defaults.int_class->byval_arg;
2273 t = mono_type_get_underlying_type (t);
2275 if (!t->byref && t->type == MONO_TYPE_R4) {
2276 MonoInst *iargs [1];
2280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2282 /* The result will be in an int vreg */
2283 call->args [i] = conv;
2290 if (COMPILE_LLVM (cfg))
2291 mono_llvm_emit_call (cfg, call);
2293 mono_arch_emit_call (cfg, call);
2295 mono_arch_emit_call (cfg, call);
2298 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2299 cfg->flags |= MONO_CFG_HAS_CALLS;
2305 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2307 #ifdef MONO_ARCH_RGCTX_REG
2308 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2309 cfg->uses_rgctx_reg = TRUE;
2310 call->rgctx_reg = TRUE;
2312 call->rgctx_arg_reg = rgctx_reg;
2319 inline static MonoInst*
2320 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2326 rgctx_reg = mono_alloc_preg (cfg);
2327 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2330 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2332 call->inst.sreg1 = addr->dreg;
2334 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2337 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2339 return (MonoInst*)call;
2343 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2345 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2348 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2349 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2351 gboolean might_be_remote;
2352 gboolean virtual = this != NULL;
2353 gboolean enable_for_aot = TRUE;
2359 rgctx_reg = mono_alloc_preg (cfg);
2360 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2363 if (method->string_ctor) {
2364 /* Create the real signature */
2365 /* FIXME: Cache these */
2366 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2367 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2372 might_be_remote = this && sig->hasthis &&
2373 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2374 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2376 context_used = mono_method_check_context_used (method);
2377 if (might_be_remote && context_used) {
2380 g_assert (cfg->generic_sharing_context);
2382 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2384 return mono_emit_calli (cfg, sig, args, addr, NULL);
2387 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2389 if (might_be_remote)
2390 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2392 call->method = method;
2393 call->inst.flags |= MONO_INST_HAS_METHOD;
2394 call->inst.inst_left = this;
2397 int vtable_reg, slot_reg, this_reg;
2399 this_reg = this->dreg;
2401 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2402 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2403 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2405 /* Make a call to delegate->invoke_impl */
2406 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2407 call->inst.inst_basereg = this_reg;
2408 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2415 if ((!cfg->compile_aot || enable_for_aot) &&
2416 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2417 (MONO_METHOD_IS_FINAL (method) &&
2418 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2419 !(method->klass->marshalbyref && context_used)) {
2421 * the method is not virtual, we just need to ensure this is not null
2422 * and then we can call the method directly.
2424 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2426 * The check above ensures method is not gshared, this is needed since
2427 * gshared methods can't have wrappers.
2429 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2432 if (!method->string_ctor)
2433 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2435 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2437 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2439 return (MonoInst*)call;
2442 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2444 * the method is virtual, but we can statically dispatch since either
2445 * it's class or the method itself are sealed.
2446 * But first we need to ensure it's not a null reference.
2448 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2450 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2458 vtable_reg = alloc_preg (cfg);
2459 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2460 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2462 #ifdef MONO_ARCH_HAVE_IMT
2464 guint32 imt_slot = mono_method_get_imt_slot (method);
2465 emit_imt_argument (cfg, call, imt_arg);
2466 slot_reg = vtable_reg;
2467 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2470 if (slot_reg == -1) {
2471 slot_reg = alloc_preg (cfg);
2472 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2473 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2476 slot_reg = vtable_reg;
2477 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2478 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2479 #ifdef MONO_ARCH_HAVE_IMT
2481 g_assert (mono_method_signature (method)->generic_param_count);
2482 emit_imt_argument (cfg, call, imt_arg);
2487 call->inst.sreg1 = slot_reg;
2488 call->virtual = TRUE;
2491 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2494 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2496 return (MonoInst*)call;
2500 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2502 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2506 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2513 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2516 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2518 return (MonoInst*)call;
2522 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2524 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2528 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2532 * mono_emit_abs_call:
2534 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2536 inline static MonoInst*
2537 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2538 MonoMethodSignature *sig, MonoInst **args)
2540 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2544 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2547 if (cfg->abs_patches == NULL)
2548 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2549 g_hash_table_insert (cfg->abs_patches, ji, ji);
2550 ins = mono_emit_native_call (cfg, ji, sig, args);
2551 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2556 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2558 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2559 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2563 * Native code might return non register sized integers
2564 * without initializing the upper bits.
2566 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2567 case OP_LOADI1_MEMBASE:
2568 widen_op = OP_ICONV_TO_I1;
2570 case OP_LOADU1_MEMBASE:
2571 widen_op = OP_ICONV_TO_U1;
2573 case OP_LOADI2_MEMBASE:
2574 widen_op = OP_ICONV_TO_I2;
2576 case OP_LOADU2_MEMBASE:
2577 widen_op = OP_ICONV_TO_U2;
2583 if (widen_op != -1) {
2584 int dreg = alloc_preg (cfg);
2587 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2588 widen->type = ins->type;
2598 get_memcpy_method (void)
2600 static MonoMethod *memcpy_method = NULL;
2601 if (!memcpy_method) {
2602 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2604 g_error ("Old corlib found. Install a new one");
2606 return memcpy_method;
2610 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2612 MonoClassField *field;
2613 gpointer iter = NULL;
2615 while ((field = mono_class_get_fields (klass, &iter))) {
2618 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2620 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2621 if (mono_type_is_reference (field->type)) {
2622 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2623 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2625 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2626 MonoClass *field_class = mono_class_from_mono_type (field->type);
2627 if (field_class->has_references)
2628 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2634 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2636 int card_table_shift_bits;
2637 gpointer card_table_mask;
2639 MonoInst *dummy_use;
2640 int nursery_shift_bits;
2641 size_t nursery_size;
2642 gboolean has_card_table_wb = FALSE;
2644 if (!cfg->gen_write_barriers)
2647 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2649 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2651 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2652 has_card_table_wb = TRUE;
2655 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2658 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2659 wbarrier->sreg1 = ptr->dreg;
2661 wbarrier->sreg2 = value->dreg;
2663 wbarrier->sreg2 = value_reg;
2664 MONO_ADD_INS (cfg->cbb, wbarrier);
2665 } else if (card_table) {
2666 int offset_reg = alloc_preg (cfg);
2667 int card_reg = alloc_preg (cfg);
2670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2671 if (card_table_mask)
2672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2674 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2675 * IMM's larger than 32bits.
2677 if (cfg->compile_aot) {
2678 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2680 MONO_INST_NEW (cfg, ins, OP_PCONST);
2681 ins->inst_p0 = card_table;
2682 ins->dreg = card_reg;
2683 MONO_ADD_INS (cfg->cbb, ins);
2686 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2687 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2689 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2690 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2694 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2696 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2697 dummy_use->sreg1 = value_reg;
2698 MONO_ADD_INS (cfg->cbb, dummy_use);
2703 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2705 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2706 unsigned need_wb = 0;
2711 /*types with references can't have alignment smaller than sizeof(void*) */
2712 if (align < SIZEOF_VOID_P)
2715 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2716 if (size > 32 * SIZEOF_VOID_P)
2719 create_write_barrier_bitmap (klass, &need_wb, 0);
2721 /* We don't unroll more than 5 stores to avoid code bloat. */
2722 if (size > 5 * SIZEOF_VOID_P) {
2723 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2724 size += (SIZEOF_VOID_P - 1);
2725 size &= ~(SIZEOF_VOID_P - 1);
2727 EMIT_NEW_ICONST (cfg, iargs [2], size);
2728 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2729 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2733 destreg = iargs [0]->dreg;
2734 srcreg = iargs [1]->dreg;
2737 dest_ptr_reg = alloc_preg (cfg);
2738 tmp_reg = alloc_preg (cfg);
2741 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2743 while (size >= SIZEOF_VOID_P) {
2744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2748 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2750 offset += SIZEOF_VOID_P;
2751 size -= SIZEOF_VOID_P;
2754 /*tmp += sizeof (void*)*/
2755 if (size >= SIZEOF_VOID_P) {
2756 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2757 MONO_ADD_INS (cfg->cbb, iargs [0]);
2761 /* Those cannot be references since size < sizeof (void*) */
2763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2787 * Emit code to copy a valuetype of type @klass whose address is stored in
2788 * @src->dreg to memory whose address is stored at @dest->dreg.
2791 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2793 MonoInst *iargs [4];
2796 MonoMethod *memcpy_method;
2800 * This check breaks with spilled vars... need to handle it during verification anyway.
2801 * g_assert (klass && klass == src->klass && klass == dest->klass);
2805 n = mono_class_native_size (klass, &align);
2807 n = mono_class_value_size (klass, &align);
2809 /* if native is true there should be no references in the struct */
2810 if (cfg->gen_write_barriers && klass->has_references && !native) {
2811 /* Avoid barriers when storing to the stack */
2812 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2813 (dest->opcode == OP_LDADDR))) {
2814 int context_used = 0;
2819 if (cfg->generic_sharing_context)
2820 context_used = mono_class_check_context_used (klass);
2822 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2823 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2825 } else if (context_used) {
2826 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2828 if (cfg->compile_aot) {
2829 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2831 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2832 mono_class_compute_gc_descriptor (klass);
2836 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2841 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2842 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2843 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2847 EMIT_NEW_ICONST (cfg, iargs [2], n);
2849 memcpy_method = get_memcpy_method ();
2850 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2855 get_memset_method (void)
2857 static MonoMethod *memset_method = NULL;
2858 if (!memset_method) {
2859 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2861 g_error ("Old corlib found. Install a new one");
2863 return memset_method;
2867 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2869 MonoInst *iargs [3];
2872 MonoMethod *memset_method;
2874 /* FIXME: Optimize this for the case when dest is an LDADDR */
2876 mono_class_init (klass);
2877 n = mono_class_value_size (klass, &align);
2879 if (n <= sizeof (gpointer) * 5) {
2880 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2883 memset_method = get_memset_method ();
2885 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2886 EMIT_NEW_ICONST (cfg, iargs [2], n);
2887 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2892 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2894 MonoInst *this = NULL;
2896 g_assert (cfg->generic_sharing_context);
2898 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2899 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2900 !method->klass->valuetype)
2901 EMIT_NEW_ARGLOAD (cfg, this, 0);
2903 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2904 MonoInst *mrgctx_loc, *mrgctx_var;
2907 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2909 mrgctx_loc = mono_get_vtable_var (cfg);
2910 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2913 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2914 MonoInst *vtable_loc, *vtable_var;
2918 vtable_loc = mono_get_vtable_var (cfg);
2919 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2921 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2922 MonoInst *mrgctx_var = vtable_var;
2925 vtable_reg = alloc_preg (cfg);
2926 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2927 vtable_var->type = STACK_PTR;
2935 vtable_reg = alloc_preg (cfg);
2936 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2941 static MonoJumpInfoRgctxEntry *
2942 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2944 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2945 res->method = method;
2946 res->in_mrgctx = in_mrgctx;
2947 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2948 res->data->type = patch_type;
2949 res->data->data.target = patch_data;
2950 res->info_type = info_type;
2955 static inline MonoInst*
2956 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2958 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2962 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2963 MonoClass *klass, int rgctx_type)
2965 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2966 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2968 return emit_rgctx_fetch (cfg, rgctx, entry);
2972 * emit_get_rgctx_method:
2974 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2975 * normal constants, else emit a load from the rgctx.
2978 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2979 MonoMethod *cmethod, int rgctx_type)
2981 if (!context_used) {
2984 switch (rgctx_type) {
2985 case MONO_RGCTX_INFO_METHOD:
2986 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2988 case MONO_RGCTX_INFO_METHOD_RGCTX:
2989 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2992 g_assert_not_reached ();
2995 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2996 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2998 return emit_rgctx_fetch (cfg, rgctx, entry);
3003 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3004 MonoClassField *field, int rgctx_type)
3006 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3007 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3009 return emit_rgctx_fetch (cfg, rgctx, entry);
3013 * On return the caller must check @klass for load errors.
3016 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3018 MonoInst *vtable_arg;
3020 int context_used = 0;
3022 if (cfg->generic_sharing_context)
3023 context_used = mono_class_check_context_used (klass);
3026 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3027 klass, MONO_RGCTX_INFO_VTABLE);
3029 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3033 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3036 if (COMPILE_LLVM (cfg))
3037 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3039 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3040 #ifdef MONO_ARCH_VTABLE_REG
3041 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3042 cfg->uses_vtable_reg = TRUE;
3049 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3051 if (mini_get_debug_options ()->better_cast_details) {
3052 int to_klass_reg = alloc_preg (cfg);
3053 int vtable_reg = alloc_preg (cfg);
3054 int klass_reg = alloc_preg (cfg);
3055 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3058 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3062 MONO_ADD_INS (cfg->cbb, tls_get);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3066 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3067 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3073 reset_cast_details (MonoCompile *cfg)
3075 /* Reset the variables holding the cast details */
3076 if (mini_get_debug_options ()->better_cast_details) {
3077 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3079 MONO_ADD_INS (cfg->cbb, tls_get);
3080 /* It is enough to reset the from field */
3081 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3086 * On return the caller must check @array_class for load errors
3089 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3091 int vtable_reg = alloc_preg (cfg);
3092 int context_used = 0;
3094 if (cfg->generic_sharing_context)
3095 context_used = mono_class_check_context_used (array_class);
3097 save_cast_details (cfg, array_class, obj->dreg);
3099 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3101 if (cfg->opt & MONO_OPT_SHARED) {
3102 int class_reg = alloc_preg (cfg);
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3104 if (cfg->compile_aot) {
3105 int klass_reg = alloc_preg (cfg);
3106 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3107 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3111 } else if (context_used) {
3112 MonoInst *vtable_ins;
3114 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3115 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3117 if (cfg->compile_aot) {
3121 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3123 vt_reg = alloc_preg (cfg);
3124 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3125 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3128 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3134 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3136 reset_cast_details (cfg);
3140 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3141 * generic code is generated.
3144 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3146 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3149 MonoInst *rgctx, *addr;
3151 /* FIXME: What if the class is shared? We might not
3152 have to get the address of the method from the
3154 addr = emit_get_rgctx_method (cfg, context_used, method,
3155 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3157 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3159 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3161 return mono_emit_method_call (cfg, method, &val, NULL);
3166 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3170 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3171 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3172 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3173 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3175 obj_reg = sp [0]->dreg;
3176 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3177 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3179 /* FIXME: generics */
3180 g_assert (klass->rank == 0);
3183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3184 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3186 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3190 MonoInst *element_class;
3192 /* This assertion is from the unboxcast insn */
3193 g_assert (klass->rank == 0);
3195 element_class = emit_get_rgctx_klass (cfg, context_used,
3196 klass->element_class, MONO_RGCTX_INFO_KLASS);
3198 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3199 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3201 save_cast_details (cfg, klass->element_class, obj_reg);
3202 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3203 reset_cast_details (cfg);
3206 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3207 MONO_ADD_INS (cfg->cbb, add);
3208 add->type = STACK_MP;
3215 * Returns NULL and set the cfg exception on error.
3218 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3220 MonoInst *iargs [2];
3226 MonoInst *iargs [2];
3229 FIXME: we cannot get managed_alloc here because we can't get
3230 the class's vtable (because it's not a closed class)
3232 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3233 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3236 if (cfg->opt & MONO_OPT_SHARED)
3237 rgctx_info = MONO_RGCTX_INFO_KLASS;
3239 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3240 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3242 if (cfg->opt & MONO_OPT_SHARED) {
3243 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3245 alloc_ftn = mono_object_new;
3248 alloc_ftn = mono_object_new_specific;
3251 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3254 if (cfg->opt & MONO_OPT_SHARED) {
3255 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3256 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3258 alloc_ftn = mono_object_new;
3259 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3260 /* This happens often in argument checking code, eg. throw new FooException... */
3261 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3262 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3263 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3265 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3266 MonoMethod *managed_alloc = NULL;
3270 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3271 cfg->exception_ptr = klass;
3275 #ifndef MONO_CROSS_COMPILE
3276 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3279 if (managed_alloc) {
3280 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3281 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3283 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3285 guint32 lw = vtable->klass->instance_size;
3286 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3287 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3288 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3291 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3295 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3299 * Returns NULL and set the cfg exception on error.
3302 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3304 MonoInst *alloc, *ins;
3306 if (mono_class_is_nullable (klass)) {
3307 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3310 /* FIXME: What if the class is shared? We might not
3311 have to get the method address from the RGCTX. */
3312 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3313 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3314 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3316 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3318 return mono_emit_method_call (cfg, method, &val, NULL);
3322 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3326 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3333 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3336 MonoGenericContainer *container;
3337 MonoGenericInst *ginst;
3339 if (klass->generic_class) {
3340 container = klass->generic_class->container_class->generic_container;
3341 ginst = klass->generic_class->context.class_inst;
3342 } else if (klass->generic_container && context_used) {
3343 container = klass->generic_container;
3344 ginst = container->context.class_inst;
3349 for (i = 0; i < container->type_argc; ++i) {
3351 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3353 type = ginst->type_argv [i];
3354 if (MONO_TYPE_IS_REFERENCE (type))
3357 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3363 // FIXME: This doesn't work yet (class libs tests fail?)
3364 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3367 * Returns NULL and set the cfg exception on error.
3370 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3372 MonoBasicBlock *is_null_bb;
3373 int obj_reg = src->dreg;
3374 int vtable_reg = alloc_preg (cfg);
3375 MonoInst *klass_inst = NULL;
3380 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3381 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3382 MonoInst *cache_ins;
3384 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3389 /* klass - it's the second element of the cache entry*/
3390 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3393 args [2] = cache_ins;
3395 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3398 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3400 if (is_complex_isinst (klass)) {
3401 /* Complex case, handle by an icall */
3407 args [1] = klass_inst;
3409 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3411 /* Simple case, handled by the code below */
3415 NEW_BBLOCK (cfg, is_null_bb);
3417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3420 save_cast_details (cfg, klass, obj_reg);
3422 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3424 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3426 int klass_reg = alloc_preg (cfg);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3430 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3431 /* the remoting code is broken, access the class for now */
3432 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3433 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3435 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3436 cfg->exception_ptr = klass;
3439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3444 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3446 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3447 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3451 MONO_START_BB (cfg, is_null_bb);
3453 reset_cast_details (cfg);
3459 * Returns NULL and set the cfg exception on error.
3462 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3465 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3466 int obj_reg = src->dreg;
3467 int vtable_reg = alloc_preg (cfg);
3468 int res_reg = alloc_ireg_ref (cfg);
3469 MonoInst *klass_inst = NULL;
3474 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3475 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3476 MonoInst *cache_ins;
3478 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3483 /* klass - it's the second element of the cache entry*/
3484 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3487 args [2] = cache_ins;
3489 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3492 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3494 if (is_complex_isinst (klass)) {
3495 /* Complex case, handle by an icall */
3501 args [1] = klass_inst;
3503 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3505 /* Simple case, the code below can handle it */
3509 NEW_BBLOCK (cfg, is_null_bb);
3510 NEW_BBLOCK (cfg, false_bb);
3511 NEW_BBLOCK (cfg, end_bb);
3513 /* Do the assignment at the beginning, so the other assignment can be if converted */
3514 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3515 ins->type = STACK_OBJ;
3518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3523 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3524 g_assert (!context_used);
3525 /* the is_null_bb target simply copies the input register to the output */
3526 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3528 int klass_reg = alloc_preg (cfg);
3531 int rank_reg = alloc_preg (cfg);
3532 int eclass_reg = alloc_preg (cfg);
3534 g_assert (!context_used);
3535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3540 if (klass->cast_class == mono_defaults.object_class) {
3541 int parent_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3543 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3544 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3546 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3547 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3548 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3550 } else if (klass->cast_class == mono_defaults.enum_class) {
3551 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3553 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3554 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3556 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3557 /* Check that the object is a vector too */
3558 int bounds_reg = alloc_preg (cfg);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3564 /* the is_null_bb target simply copies the input register to the output */
3565 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3567 } else if (mono_class_is_nullable (klass)) {
3568 g_assert (!context_used);
3569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3570 /* the is_null_bb target simply copies the input register to the output */
3571 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3573 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3574 g_assert (!context_used);
3575 /* the remoting code is broken, access the class for now */
3576 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3577 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3579 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3580 cfg->exception_ptr = klass;
3583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3592 /* the is_null_bb target simply copies the input register to the output */
3593 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3598 MONO_START_BB (cfg, false_bb);
3600 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3603 MONO_START_BB (cfg, is_null_bb);
3605 MONO_START_BB (cfg, end_bb);
3611 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3613 /* This opcode takes as input an object reference and a class, and returns:
3614 0) if the object is an instance of the class,
3615 1) if the object is not instance of the class,
3616 2) if the object is a proxy whose type cannot be determined */
3619 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3620 int obj_reg = src->dreg;
3621 int dreg = alloc_ireg (cfg);
3623 int klass_reg = alloc_preg (cfg);
3625 NEW_BBLOCK (cfg, true_bb);
3626 NEW_BBLOCK (cfg, false_bb);
3627 NEW_BBLOCK (cfg, false2_bb);
3628 NEW_BBLOCK (cfg, end_bb);
3629 NEW_BBLOCK (cfg, no_proxy_bb);
3631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3634 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3635 NEW_BBLOCK (cfg, interface_fail_bb);
3637 tmp_reg = alloc_preg (cfg);
3638 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3639 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3640 MONO_START_BB (cfg, interface_fail_bb);
3641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3643 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3645 tmp_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3650 tmp_reg = alloc_preg (cfg);
3651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3654 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3655 tmp_reg = alloc_preg (cfg);
3656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3659 tmp_reg = alloc_preg (cfg);
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3661 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3664 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3667 MONO_START_BB (cfg, no_proxy_bb);
3669 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3672 MONO_START_BB (cfg, false_bb);
3674 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3677 MONO_START_BB (cfg, false2_bb);
3679 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3682 MONO_START_BB (cfg, true_bb);
3684 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3686 MONO_START_BB (cfg, end_bb);
3689 MONO_INST_NEW (cfg, ins, OP_ICONST);
3691 ins->type = STACK_I4;
3697 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3699 /* This opcode takes as input an object reference and a class, and returns:
3700 0) if the object is an instance of the class,
3701 1) if the object is a proxy whose type cannot be determined
3702 an InvalidCastException exception is thrown otherwhise*/
3705 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3706 int obj_reg = src->dreg;
3707 int dreg = alloc_ireg (cfg);
3708 int tmp_reg = alloc_preg (cfg);
3709 int klass_reg = alloc_preg (cfg);
3711 NEW_BBLOCK (cfg, end_bb);
3712 NEW_BBLOCK (cfg, ok_result_bb);
3714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3717 save_cast_details (cfg, klass, obj_reg);
3719 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3720 NEW_BBLOCK (cfg, interface_fail_bb);
3722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3723 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3724 MONO_START_BB (cfg, interface_fail_bb);
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3727 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3729 tmp_reg = alloc_preg (cfg);
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3732 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3734 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3735 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3738 NEW_BBLOCK (cfg, no_proxy_bb);
3740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3742 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3744 tmp_reg = alloc_preg (cfg);
3745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3748 tmp_reg = alloc_preg (cfg);
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3753 NEW_BBLOCK (cfg, fail_1_bb);
3755 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3757 MONO_START_BB (cfg, fail_1_bb);
3759 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3762 MONO_START_BB (cfg, no_proxy_bb);
3764 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3767 MONO_START_BB (cfg, ok_result_bb);
3769 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3771 MONO_START_BB (cfg, end_bb);
3774 MONO_INST_NEW (cfg, ins, OP_ICONST);
3776 ins->type = STACK_I4;
3782 * Returns NULL and set the cfg exception on error.
3784 static G_GNUC_UNUSED MonoInst*
3785 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3789 gpointer *trampoline;
3790 MonoInst *obj, *method_ins, *tramp_ins;
3794 obj = handle_alloc (cfg, klass, FALSE, 0);
3798 /* Inline the contents of mono_delegate_ctor */
3800 /* Set target field */
3801 /* Optimize away setting of NULL target */
3802 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3804 if (cfg->gen_write_barriers) {
3805 dreg = alloc_preg (cfg);
3806 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3807 emit_write_barrier (cfg, ptr, target, 0);
3811 /* Set method field */
3812 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3814 if (cfg->gen_write_barriers) {
3815 dreg = alloc_preg (cfg);
3816 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3817 emit_write_barrier (cfg, ptr, method_ins, 0);
3820 * To avoid looking up the compiled code belonging to the target method
3821 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3822 * store it, and we fill it after the method has been compiled.
3824 if (!cfg->compile_aot && !method->dynamic) {
3825 MonoInst *code_slot_ins;
3828 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3830 domain = mono_domain_get ();
3831 mono_domain_lock (domain);
3832 if (!domain_jit_info (domain)->method_code_hash)
3833 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3834 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3836 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3837 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3839 mono_domain_unlock (domain);
3841 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3846 /* Set invoke_impl field */
3847 if (cfg->compile_aot) {
3848 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3850 trampoline = mono_create_delegate_trampoline (klass);
3851 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3855 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3861 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3863 MonoJitICallInfo *info;
3865 /* Need to register the icall so it gets an icall wrapper */
3866 info = mono_get_array_new_va_icall (rank);
3868 cfg->flags |= MONO_CFG_HAS_VARARGS;
3870 /* mono_array_new_va () needs a vararg calling convention */
3871 cfg->disable_llvm = TRUE;
3873 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3874 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3878 mono_emit_load_got_addr (MonoCompile *cfg)
3880 MonoInst *getaddr, *dummy_use;
3882 if (!cfg->got_var || cfg->got_var_allocated)
3885 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3886 getaddr->dreg = cfg->got_var->dreg;
3888 /* Add it to the start of the first bblock */
3889 if (cfg->bb_entry->code) {
3890 getaddr->next = cfg->bb_entry->code;
3891 cfg->bb_entry->code = getaddr;
3894 MONO_ADD_INS (cfg->bb_entry, getaddr);
3896 cfg->got_var_allocated = TRUE;
3899 * Add a dummy use to keep the got_var alive, since real uses might
3900 * only be generated by the back ends.
3901 * Add it to end_bblock, so the variable's lifetime covers the whole
3903 * It would be better to make the usage of the got var explicit in all
3904 * cases when the backend needs it (i.e. calls, throw etc.), so this
3905 * wouldn't be needed.
3907 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3908 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3911 static int inline_limit;
3912 static gboolean inline_limit_inited;
3915 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3917 MonoMethodHeaderSummary header;
3919 #ifdef MONO_ARCH_SOFT_FLOAT
3920 MonoMethodSignature *sig = mono_method_signature (method);
3924 if (cfg->generic_sharing_context)
3927 if (cfg->inline_depth > 10)
3930 #ifdef MONO_ARCH_HAVE_LMF_OPS
3931 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3932 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3933 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3938 if (!mono_method_get_header_summary (method, &header))
3941 /*runtime, icall and pinvoke are checked by summary call*/
3942 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3943 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3944 (method->klass->marshalbyref) ||
3948 /* also consider num_locals? */
3949 /* Do the size check early to avoid creating vtables */
3950 if (!inline_limit_inited) {
3951 if (getenv ("MONO_INLINELIMIT"))
3952 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3954 inline_limit = INLINE_LENGTH_LIMIT;
3955 inline_limit_inited = TRUE;
3957 if (header.code_size >= inline_limit)
3961 * if we can initialize the class of the method right away, we do,
3962 * otherwise we don't allow inlining if the class needs initialization,
3963 * since it would mean inserting a call to mono_runtime_class_init()
3964 * inside the inlined code
3966 if (!(cfg->opt & MONO_OPT_SHARED)) {
3967 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3968 if (cfg->run_cctors && method->klass->has_cctor) {
3969 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3970 if (!method->klass->runtime_info)
3971 /* No vtable created yet */
3973 vtable = mono_class_vtable (cfg->domain, method->klass);
3976 /* This makes so that inline cannot trigger */
3977 /* .cctors: too many apps depend on them */
3978 /* running with a specific order... */
3979 if (! vtable->initialized)
3981 mono_runtime_class_init (vtable);
3983 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3984 if (!method->klass->runtime_info)
3985 /* No vtable created yet */
3987 vtable = mono_class_vtable (cfg->domain, method->klass);
3990 if (!vtable->initialized)
3995 * If we're compiling for shared code
3996 * the cctor will need to be run at aot method load time, for example,
3997 * or at the end of the compilation of the inlining method.
3999 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4004 * CAS - do not inline methods with declarative security
4005 * Note: this has to be before any possible return TRUE;
4007 if (mono_method_has_declsec (method))
4010 #ifdef MONO_ARCH_SOFT_FLOAT
4012 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4014 for (i = 0; i < sig->param_count; ++i)
4015 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4023 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4025 if (vtable->initialized && !cfg->compile_aot)
4028 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4031 if (!mono_class_needs_cctor_run (vtable->klass, method))
4034 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4035 /* The initialization is already done before the method is called */
4042 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4046 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4048 mono_class_init (klass);
4049 size = mono_class_array_element_size (klass);
4051 mult_reg = alloc_preg (cfg);
4052 array_reg = arr->dreg;
4053 index_reg = index->dreg;
4055 #if SIZEOF_REGISTER == 8
4056 /* The array reg is 64 bits but the index reg is only 32 */
4057 if (COMPILE_LLVM (cfg)) {
4059 index2_reg = index_reg;
4061 index2_reg = alloc_preg (cfg);
4062 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4065 if (index->type == STACK_I8) {
4066 index2_reg = alloc_preg (cfg);
4067 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4069 index2_reg = index_reg;
4074 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4076 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4077 if (size == 1 || size == 2 || size == 4 || size == 8) {
4078 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4080 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4081 ins->klass = mono_class_get_element_class (klass);
4082 ins->type = STACK_MP;
4088 add_reg = alloc_ireg_mp (cfg);
4090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4091 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4092 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4093 ins->klass = mono_class_get_element_class (klass);
4094 ins->type = STACK_MP;
4095 MONO_ADD_INS (cfg->cbb, ins);
4100 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4102 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4104 int bounds_reg = alloc_preg (cfg);
4105 int add_reg = alloc_ireg_mp (cfg);
4106 int mult_reg = alloc_preg (cfg);
4107 int mult2_reg = alloc_preg (cfg);
4108 int low1_reg = alloc_preg (cfg);
4109 int low2_reg = alloc_preg (cfg);
4110 int high1_reg = alloc_preg (cfg);
4111 int high2_reg = alloc_preg (cfg);
4112 int realidx1_reg = alloc_preg (cfg);
4113 int realidx2_reg = alloc_preg (cfg);
4114 int sum_reg = alloc_preg (cfg);
4119 mono_class_init (klass);
4120 size = mono_class_array_element_size (klass);
4122 index1 = index_ins1->dreg;
4123 index2 = index_ins2->dreg;
4125 /* range checking */
4126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4127 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4129 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4130 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4131 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4133 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4134 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4135 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4138 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4141 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4143 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4145 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4146 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4148 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4149 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4151 ins->type = STACK_MP;
4153 MONO_ADD_INS (cfg->cbb, ins);
4160 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4164 MonoMethod *addr_method;
4167 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4170 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4172 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4173 /* emit_ldelema_2 depends on OP_LMUL */
4174 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4175 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4179 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4180 addr_method = mono_marshal_get_array_address (rank, element_size);
4181 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4186 static MonoBreakPolicy
4187 always_insert_breakpoint (MonoMethod *method)
4189 return MONO_BREAK_POLICY_ALWAYS;
4192 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4195 * mono_set_break_policy:
4196 * policy_callback: the new callback function
4198 * Allow embedders to decide wherther to actually obey breakpoint instructions
4199 * (both break IL instructions and Debugger.Break () method calls), for example
4200 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4201 * untrusted or semi-trusted code.
4203 * @policy_callback will be called every time a break point instruction needs to
4204 * be inserted with the method argument being the method that calls Debugger.Break()
4205 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4206 * if it wants the breakpoint to not be effective in the given method.
4207 * #MONO_BREAK_POLICY_ALWAYS is the default.
4210 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4212 if (policy_callback)
4213 break_policy_func = policy_callback;
4215 break_policy_func = always_insert_breakpoint;
4219 should_insert_brekpoint (MonoMethod *method) {
4220 switch (break_policy_func (method)) {
4221 case MONO_BREAK_POLICY_ALWAYS:
4223 case MONO_BREAK_POLICY_NEVER:
4225 case MONO_BREAK_POLICY_ON_DBG:
4226 return mono_debug_using_mono_debugger ();
4228 g_warning ("Incorrect value returned from break policy callback");
4233 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4235 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4237 MonoInst *addr, *store, *load;
4238 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4240 /* the bounds check is already done by the callers */
4241 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4244 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4246 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4247 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4253 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4255 MonoInst *ins = NULL;
4256 #ifdef MONO_ARCH_SIMD_INTRINSICS
4257 if (cfg->opt & MONO_OPT_SIMD) {
4258 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4268 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4270 MonoInst *ins = NULL;
4272 static MonoClass *runtime_helpers_class = NULL;
4273 if (! runtime_helpers_class)
4274 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4275 "System.Runtime.CompilerServices", "RuntimeHelpers");
4277 if (cmethod->klass == mono_defaults.string_class) {
4278 if (strcmp (cmethod->name, "get_Chars") == 0) {
4279 int dreg = alloc_ireg (cfg);
4280 int index_reg = alloc_preg (cfg);
4281 int mult_reg = alloc_preg (cfg);
4282 int add_reg = alloc_preg (cfg);
4284 #if SIZEOF_REGISTER == 8
4285 /* The array reg is 64 bits but the index reg is only 32 */
4286 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4288 index_reg = args [1]->dreg;
4290 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4292 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4293 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4294 add_reg = ins->dreg;
4295 /* Avoid a warning */
4297 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4300 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4301 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4302 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4303 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4305 type_from_op (ins, NULL, NULL);
4307 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4308 int dreg = alloc_ireg (cfg);
4309 /* Decompose later to allow more optimizations */
4310 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4311 ins->type = STACK_I4;
4312 ins->flags |= MONO_INST_FAULT;
4313 cfg->cbb->has_array_access = TRUE;
4314 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4317 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4318 int mult_reg = alloc_preg (cfg);
4319 int add_reg = alloc_preg (cfg);
4321 /* The corlib functions check for oob already. */
4322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4323 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4325 return cfg->cbb->last_ins;
4328 } else if (cmethod->klass == mono_defaults.object_class) {
4330 if (strcmp (cmethod->name, "GetType") == 0) {
4331 int dreg = alloc_ireg_ref (cfg);
4332 int vt_reg = alloc_preg (cfg);
4333 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4334 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4335 type_from_op (ins, NULL, NULL);
4338 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4339 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4340 int dreg = alloc_ireg (cfg);
4341 int t1 = alloc_ireg (cfg);
4343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4344 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4345 ins->type = STACK_I4;
4349 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4350 MONO_INST_NEW (cfg, ins, OP_NOP);
4351 MONO_ADD_INS (cfg->cbb, ins);
4355 } else if (cmethod->klass == mono_defaults.array_class) {
4356 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4357 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4359 #ifndef MONO_BIG_ARRAYS
4361 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4364 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4365 int dreg = alloc_ireg (cfg);
4366 int bounds_reg = alloc_ireg_mp (cfg);
4367 MonoBasicBlock *end_bb, *szarray_bb;
4368 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4370 NEW_BBLOCK (cfg, end_bb);
4371 NEW_BBLOCK (cfg, szarray_bb);
4373 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4374 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4375 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4377 /* Non-szarray case */
4379 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4380 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4382 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4383 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4385 MONO_START_BB (cfg, szarray_bb);
4388 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4389 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4391 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4392 MONO_START_BB (cfg, end_bb);
4394 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4395 ins->type = STACK_I4;
4401 if (cmethod->name [0] != 'g')
4404 if (strcmp (cmethod->name, "get_Rank") == 0) {
4405 int dreg = alloc_ireg (cfg);
4406 int vtable_reg = alloc_preg (cfg);
4407 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4408 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4409 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4410 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4411 type_from_op (ins, NULL, NULL);
4414 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4415 int dreg = alloc_ireg (cfg);
4417 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4418 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4419 type_from_op (ins, NULL, NULL);
4424 } else if (cmethod->klass == runtime_helpers_class) {
4426 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4427 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4431 } else if (cmethod->klass == mono_defaults.thread_class) {
4432 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4433 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4434 MONO_ADD_INS (cfg->cbb, ins);
4436 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4437 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4438 MONO_ADD_INS (cfg->cbb, ins);
4441 } else if (cmethod->klass == mono_defaults.monitor_class) {
4442 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4443 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4446 if (COMPILE_LLVM (cfg)) {
4448 * Pass the argument normally, the LLVM backend will handle the
4449 * calling convention problems.
4451 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4453 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4454 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4455 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4456 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4459 return (MonoInst*)call;
4460 } else if (strcmp (cmethod->name, "Exit") == 0) {
4463 if (COMPILE_LLVM (cfg)) {
4464 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4466 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4467 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4468 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4469 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4472 return (MonoInst*)call;
4474 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4475 MonoMethod *fast_method = NULL;
4477 /* Avoid infinite recursion */
4478 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4479 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4480 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4483 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4484 strcmp (cmethod->name, "Exit") == 0)
4485 fast_method = mono_monitor_get_fast_path (cmethod);
4489 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4491 } else if (cmethod->klass->image == mono_defaults.corlib &&
4492 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4493 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4496 #if SIZEOF_REGISTER == 8
4497 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4498 /* 64 bit reads are already atomic */
4499 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4500 ins->dreg = mono_alloc_preg (cfg);
4501 ins->inst_basereg = args [0]->dreg;
4502 ins->inst_offset = 0;
4503 MONO_ADD_INS (cfg->cbb, ins);
4507 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4508 if (strcmp (cmethod->name, "Increment") == 0) {
4509 MonoInst *ins_iconst;
4512 if (fsig->params [0]->type == MONO_TYPE_I4)
4513 opcode = OP_ATOMIC_ADD_NEW_I4;
4514 #if SIZEOF_REGISTER == 8
4515 else if (fsig->params [0]->type == MONO_TYPE_I8)
4516 opcode = OP_ATOMIC_ADD_NEW_I8;
4519 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4520 ins_iconst->inst_c0 = 1;
4521 ins_iconst->dreg = mono_alloc_ireg (cfg);
4522 MONO_ADD_INS (cfg->cbb, ins_iconst);
4524 MONO_INST_NEW (cfg, ins, opcode);
4525 ins->dreg = mono_alloc_ireg (cfg);
4526 ins->inst_basereg = args [0]->dreg;
4527 ins->inst_offset = 0;
4528 ins->sreg2 = ins_iconst->dreg;
4529 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4530 MONO_ADD_INS (cfg->cbb, ins);
4532 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4533 MonoInst *ins_iconst;
4536 if (fsig->params [0]->type == MONO_TYPE_I4)
4537 opcode = OP_ATOMIC_ADD_NEW_I4;
4538 #if SIZEOF_REGISTER == 8
4539 else if (fsig->params [0]->type == MONO_TYPE_I8)
4540 opcode = OP_ATOMIC_ADD_NEW_I8;
4543 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4544 ins_iconst->inst_c0 = -1;
4545 ins_iconst->dreg = mono_alloc_ireg (cfg);
4546 MONO_ADD_INS (cfg->cbb, ins_iconst);
4548 MONO_INST_NEW (cfg, ins, opcode);
4549 ins->dreg = mono_alloc_ireg (cfg);
4550 ins->inst_basereg = args [0]->dreg;
4551 ins->inst_offset = 0;
4552 ins->sreg2 = ins_iconst->dreg;
4553 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4554 MONO_ADD_INS (cfg->cbb, ins);
4556 } else if (strcmp (cmethod->name, "Add") == 0) {
4559 if (fsig->params [0]->type == MONO_TYPE_I4)
4560 opcode = OP_ATOMIC_ADD_NEW_I4;
4561 #if SIZEOF_REGISTER == 8
4562 else if (fsig->params [0]->type == MONO_TYPE_I8)
4563 opcode = OP_ATOMIC_ADD_NEW_I8;
4567 MONO_INST_NEW (cfg, ins, opcode);
4568 ins->dreg = mono_alloc_ireg (cfg);
4569 ins->inst_basereg = args [0]->dreg;
4570 ins->inst_offset = 0;
4571 ins->sreg2 = args [1]->dreg;
4572 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4573 MONO_ADD_INS (cfg->cbb, ins);
4576 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4578 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4579 if (strcmp (cmethod->name, "Exchange") == 0) {
4581 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4583 if (fsig->params [0]->type == MONO_TYPE_I4)
4584 opcode = OP_ATOMIC_EXCHANGE_I4;
4585 #if SIZEOF_REGISTER == 8
4586 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4587 (fsig->params [0]->type == MONO_TYPE_I))
4588 opcode = OP_ATOMIC_EXCHANGE_I8;
4590 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4591 opcode = OP_ATOMIC_EXCHANGE_I4;
4596 MONO_INST_NEW (cfg, ins, opcode);
4597 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4598 ins->inst_basereg = args [0]->dreg;
4599 ins->inst_offset = 0;
4600 ins->sreg2 = args [1]->dreg;
4601 MONO_ADD_INS (cfg->cbb, ins);
4603 switch (fsig->params [0]->type) {
4605 ins->type = STACK_I4;
4609 ins->type = STACK_I8;
4611 case MONO_TYPE_OBJECT:
4612 ins->type = STACK_OBJ;
4615 g_assert_not_reached ();
4618 if (cfg->gen_write_barriers && is_ref)
4619 emit_write_barrier (cfg, args [0], args [1], -1);
4621 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4623 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4624 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4626 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4627 if (fsig->params [1]->type == MONO_TYPE_I4)
4629 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4630 size = sizeof (gpointer);
4631 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4634 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4635 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4636 ins->sreg1 = args [0]->dreg;
4637 ins->sreg2 = args [1]->dreg;
4638 ins->sreg3 = args [2]->dreg;
4639 ins->type = STACK_I4;
4640 MONO_ADD_INS (cfg->cbb, ins);
4641 } else if (size == 8) {
4642 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4643 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4644 ins->sreg1 = args [0]->dreg;
4645 ins->sreg2 = args [1]->dreg;
4646 ins->sreg3 = args [2]->dreg;
4647 ins->type = STACK_I8;
4648 MONO_ADD_INS (cfg->cbb, ins);
4650 /* g_assert_not_reached (); */
4652 if (cfg->gen_write_barriers && is_ref)
4653 emit_write_barrier (cfg, args [0], args [1], -1);
4655 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4659 } else if (cmethod->klass->image == mono_defaults.corlib) {
4660 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4661 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4662 if (should_insert_brekpoint (cfg->method))
4663 MONO_INST_NEW (cfg, ins, OP_BREAK);
4665 MONO_INST_NEW (cfg, ins, OP_NOP);
4666 MONO_ADD_INS (cfg->cbb, ins);
4669 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4670 && strcmp (cmethod->klass->name, "Environment") == 0) {
4672 EMIT_NEW_ICONST (cfg, ins, 1);
4674 EMIT_NEW_ICONST (cfg, ins, 0);
4678 } else if (cmethod->klass == mono_defaults.math_class) {
4680 * There is general branches code for Min/Max, but it does not work for
4682 * http://everything2.com/?node_id=1051618
4686 #ifdef MONO_ARCH_SIMD_INTRINSICS
4687 if (cfg->opt & MONO_OPT_SIMD) {
4688 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4694 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4698 * This entry point could be used later for arbitrary method
4701 inline static MonoInst*
4702 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4703 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4705 if (method->klass == mono_defaults.string_class) {
4706 /* managed string allocation support */
4707 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4708 MonoInst *iargs [2];
4709 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4710 MonoMethod *managed_alloc = NULL;
4712 g_assert (vtable); /*Should not fail since it System.String*/
4713 #ifndef MONO_CROSS_COMPILE
4714 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4718 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4719 iargs [1] = args [0];
4720 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4727 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4729 MonoInst *store, *temp;
4732 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4733 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4736 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4737 * would be different than the MonoInst's used to represent arguments, and
4738 * the ldelema implementation can't deal with that.
4739 * Solution: When ldelema is used on an inline argument, create a var for
4740 * it, emit ldelema on that var, and emit the saving code below in
4741 * inline_method () if needed.
4743 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4744 cfg->args [i] = temp;
4745 /* This uses cfg->args [i] which is set by the preceeding line */
4746 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4747 store->cil_code = sp [0]->cil_code;
4752 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4753 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4755 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4757 check_inline_called_method_name_limit (MonoMethod *called_method)
4760 static char *limit = NULL;
4762 if (limit == NULL) {
4763 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4765 if (limit_string != NULL)
4766 limit = limit_string;
4768 limit = (char *) "";
4771 if (limit [0] != '\0') {
4772 char *called_method_name = mono_method_full_name (called_method, TRUE);
4774 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4775 g_free (called_method_name);
4777 //return (strncmp_result <= 0);
4778 return (strncmp_result == 0);
4785 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4787 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4790 static char *limit = NULL;
4792 if (limit == NULL) {
4793 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4794 if (limit_string != NULL) {
4795 limit = limit_string;
4797 limit = (char *) "";
4801 if (limit [0] != '\0') {
4802 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4804 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4805 g_free (caller_method_name);
4807 //return (strncmp_result <= 0);
4808 return (strncmp_result == 0);
4816 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4817 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4819 MonoInst *ins, *rvar = NULL;
4820 MonoMethodHeader *cheader;
4821 MonoBasicBlock *ebblock, *sbblock;
4823 MonoMethod *prev_inlined_method;
4824 MonoInst **prev_locals, **prev_args;
4825 MonoType **prev_arg_types;
4826 guint prev_real_offset;
4827 GHashTable *prev_cbb_hash;
4828 MonoBasicBlock **prev_cil_offset_to_bb;
4829 MonoBasicBlock *prev_cbb;
4830 unsigned char* prev_cil_start;
4831 guint32 prev_cil_offset_to_bb_len;
4832 MonoMethod *prev_current_method;
4833 MonoGenericContext *prev_generic_context;
4834 gboolean ret_var_set, prev_ret_var_set;
4836 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4838 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4839 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4842 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4843 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4847 if (cfg->verbose_level > 2)
4848 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4850 if (!cmethod->inline_info) {
4851 mono_jit_stats.inlineable_methods++;
4852 cmethod->inline_info = 1;
4855 /* allocate local variables */
4856 cheader = mono_method_get_header (cmethod);
4858 if (cheader == NULL || mono_loader_get_last_error ()) {
4859 MonoLoaderError *error = mono_loader_get_last_error ();
4862 mono_metadata_free_mh (cheader);
4863 if (inline_always && error)
4864 mono_cfg_set_exception (cfg, error->exception_type);
4866 mono_loader_clear_error ();
4870 /*Must verify before creating locals as it can cause the JIT to assert.*/
4871 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4872 mono_metadata_free_mh (cheader);
4876 /* allocate space to store the return value */
4877 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4878 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4882 prev_locals = cfg->locals;
4883 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4884 for (i = 0; i < cheader->num_locals; ++i)
4885 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4887 /* allocate start and end blocks */
4888 /* This is needed so if the inline is aborted, we can clean up */
4889 NEW_BBLOCK (cfg, sbblock);
4890 sbblock->real_offset = real_offset;
4892 NEW_BBLOCK (cfg, ebblock);
4893 ebblock->block_num = cfg->num_bblocks++;
4894 ebblock->real_offset = real_offset;
4896 prev_args = cfg->args;
4897 prev_arg_types = cfg->arg_types;
4898 prev_inlined_method = cfg->inlined_method;
4899 cfg->inlined_method = cmethod;
4900 cfg->ret_var_set = FALSE;
4901 cfg->inline_depth ++;
4902 prev_real_offset = cfg->real_offset;
4903 prev_cbb_hash = cfg->cbb_hash;
4904 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4905 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4906 prev_cil_start = cfg->cil_start;
4907 prev_cbb = cfg->cbb;
4908 prev_current_method = cfg->current_method;
4909 prev_generic_context = cfg->generic_context;
4910 prev_ret_var_set = cfg->ret_var_set;
4912 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4914 ret_var_set = cfg->ret_var_set;
4916 cfg->inlined_method = prev_inlined_method;
4917 cfg->real_offset = prev_real_offset;
4918 cfg->cbb_hash = prev_cbb_hash;
4919 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4920 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4921 cfg->cil_start = prev_cil_start;
4922 cfg->locals = prev_locals;
4923 cfg->args = prev_args;
4924 cfg->arg_types = prev_arg_types;
4925 cfg->current_method = prev_current_method;
4926 cfg->generic_context = prev_generic_context;
4927 cfg->ret_var_set = prev_ret_var_set;
4928 cfg->inline_depth --;
4930 if ((costs >= 0 && costs < 60) || inline_always) {
4931 if (cfg->verbose_level > 2)
4932 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4934 mono_jit_stats.inlined_methods++;
4936 /* always add some code to avoid block split failures */
4937 MONO_INST_NEW (cfg, ins, OP_NOP);
4938 MONO_ADD_INS (prev_cbb, ins);
4940 prev_cbb->next_bb = sbblock;
4941 link_bblock (cfg, prev_cbb, sbblock);
4944 * Get rid of the begin and end bblocks if possible to aid local
4947 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4949 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4950 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4952 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4953 MonoBasicBlock *prev = ebblock->in_bb [0];
4954 mono_merge_basic_blocks (cfg, prev, ebblock);
4956 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4957 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4958 cfg->cbb = prev_cbb;
4966 * If the inlined method contains only a throw, then the ret var is not
4967 * set, so set it to a dummy value.
4970 static double r8_0 = 0.0;
4972 switch (rvar->type) {
4974 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4977 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4982 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4985 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4986 ins->type = STACK_R8;
4987 ins->inst_p0 = (void*)&r8_0;
4988 ins->dreg = rvar->dreg;
4989 MONO_ADD_INS (cfg->cbb, ins);
4992 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4995 g_assert_not_reached ();
4999 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5002 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5005 if (cfg->verbose_level > 2)
5006 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5007 cfg->exception_type = MONO_EXCEPTION_NONE;
5008 mono_loader_clear_error ();
5010 /* This gets rid of the newly added bblocks */
5011 cfg->cbb = prev_cbb;
5013 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5018 * Some of these comments may well be out-of-date.
5019 * Design decisions: we do a single pass over the IL code (and we do bblock
5020 * splitting/merging in the few cases when it's required: a back jump to an IL
5021 * address that was not already seen as bblock starting point).
5022 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5023 * Complex operations are decomposed in simpler ones right away. We need to let the
5024 * arch-specific code peek and poke inside this process somehow (except when the
5025 * optimizations can take advantage of the full semantic info of coarse opcodes).
5026 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5027 * MonoInst->opcode initially is the IL opcode or some simplification of that
5028 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5029 * opcode with value bigger than OP_LAST.
5030 * At this point the IR can be handed over to an interpreter, a dumb code generator
5031 * or to the optimizing code generator that will translate it to SSA form.
5033 * Profiling directed optimizations.
5034 * We may compile by default with few or no optimizations and instrument the code
5035 * or the user may indicate what methods to optimize the most either in a config file
5036 * or through repeated runs where the compiler applies offline the optimizations to
5037 * each method and then decides if it was worth it.
5040 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5041 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5042 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5043 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5044 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5045 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5046 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5047 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5049 /* offset from br.s -> br like opcodes */
5050 #define BIG_BRANCH_OFFSET 13
5053 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5055 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5057 return b == NULL || b == bb;
5061 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5063 unsigned char *ip = start;
5064 unsigned char *target;
5067 MonoBasicBlock *bblock;
5068 const MonoOpcode *opcode;
5071 cli_addr = ip - start;
5072 i = mono_opcode_value ((const guint8 **)&ip, end);
5075 opcode = &mono_opcodes [i];
5076 switch (opcode->argument) {
5077 case MonoInlineNone:
5080 case MonoInlineString:
5081 case MonoInlineType:
5082 case MonoInlineField:
5083 case MonoInlineMethod:
5086 case MonoShortInlineR:
5093 case MonoShortInlineVar:
5094 case MonoShortInlineI:
5097 case MonoShortInlineBrTarget:
5098 target = start + cli_addr + 2 + (signed char)ip [1];
5099 GET_BBLOCK (cfg, bblock, target);
5102 GET_BBLOCK (cfg, bblock, ip);
5104 case MonoInlineBrTarget:
5105 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5106 GET_BBLOCK (cfg, bblock, target);
5109 GET_BBLOCK (cfg, bblock, ip);
5111 case MonoInlineSwitch: {
5112 guint32 n = read32 (ip + 1);
5115 cli_addr += 5 + 4 * n;
5116 target = start + cli_addr;
5117 GET_BBLOCK (cfg, bblock, target);
5119 for (j = 0; j < n; ++j) {
5120 target = start + cli_addr + (gint32)read32 (ip);
5121 GET_BBLOCK (cfg, bblock, target);
5131 g_assert_not_reached ();
5134 if (i == CEE_THROW) {
5135 unsigned char *bb_start = ip - 1;
5137 /* Find the start of the bblock containing the throw */
5139 while ((bb_start >= start) && !bblock) {
5140 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5144 bblock->out_of_line = 1;
5153 static inline MonoMethod *
5154 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5158 if (m->wrapper_type != MONO_WRAPPER_NONE)
5159 return mono_method_get_wrapper_data (m, token);
5161 method = mono_get_method_full (m->klass->image, token, klass, context);
5166 static inline MonoMethod *
5167 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5169 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5171 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5177 static inline MonoClass*
5178 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5182 if (method->wrapper_type != MONO_WRAPPER_NONE)
5183 klass = mono_method_get_wrapper_data (method, token);
5185 klass = mono_class_get_full (method->klass->image, token, context);
5187 mono_class_init (klass);
5192 * Returns TRUE if the JIT should abort inlining because "callee"
5193 * is influenced by security attributes.
5196 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5200 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5204 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5205 if (result == MONO_JIT_SECURITY_OK)
5208 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5209 /* Generate code to throw a SecurityException before the actual call/link */
5210 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5213 NEW_ICONST (cfg, args [0], 4);
5214 NEW_METHODCONST (cfg, args [1], caller);
5215 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5216 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5217 /* don't hide previous results */
5218 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5219 cfg->exception_data = result;
5227 throw_exception (void)
5229 static MonoMethod *method = NULL;
5232 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5233 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5240 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5242 MonoMethod *thrower = throw_exception ();
5245 EMIT_NEW_PCONST (cfg, args [0], ex);
5246 mono_emit_method_call (cfg, thrower, args, NULL);
5250 * Return the original method is a wrapper is specified. We can only access
5251 * the custom attributes from the original method.
5254 get_original_method (MonoMethod *method)
5256 if (method->wrapper_type == MONO_WRAPPER_NONE)
5259 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5260 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5263 /* in other cases we need to find the original method */
5264 return mono_marshal_method_from_wrapper (method);
5268 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5269 MonoBasicBlock *bblock, unsigned char *ip)
5271 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5272 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5274 emit_throw_exception (cfg, ex);
5278 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5279 MonoBasicBlock *bblock, unsigned char *ip)
5281 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5282 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5284 emit_throw_exception (cfg, ex);
5288 * Check that the IL instructions at ip are the array initialization
5289 * sequence and return the pointer to the data and the size.
5292 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5295 * newarr[System.Int32]
5297 * ldtoken field valuetype ...
5298 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5300 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5301 guint32 token = read32 (ip + 7);
5302 guint32 field_token = read32 (ip + 2);
5303 guint32 field_index = field_token & 0xffffff;
5305 const char *data_ptr;
5307 MonoMethod *cmethod;
5308 MonoClass *dummy_class;
5309 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5315 *out_field_token = field_token;
5317 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5320 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5322 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5323 case MONO_TYPE_BOOLEAN:
5327 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5328 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5329 case MONO_TYPE_CHAR:
5339 return NULL; /* stupid ARM FP swapped format */
5349 if (size > mono_type_size (field->type, &dummy_align))
5352 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5353 if (!method->klass->image->dynamic) {
5354 field_index = read32 (ip + 2) & 0xffffff;
5355 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5356 data_ptr = mono_image_rva_map (method->klass->image, rva);
5357 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5358 /* for aot code we do the lookup on load */
5359 if (aot && data_ptr)
5360 return GUINT_TO_POINTER (rva);
5362 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5364 data_ptr = mono_field_get_data (field);
5372 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5374 char *method_fname = mono_method_full_name (method, TRUE);
5376 MonoMethodHeader *header = mono_method_get_header (method);
5378 if (header->code_size == 0)
5379 method_code = g_strdup ("method body is empty.");
5381 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5383 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5384 g_free (method_fname);
5385 g_free (method_code);
5386 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5390 set_exception_object (MonoCompile *cfg, MonoException *exception)
5392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5393 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5394 cfg->exception_ptr = exception;
5398 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5402 if (cfg->generic_sharing_context)
5403 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5405 type = &klass->byval_arg;
5406 return MONO_TYPE_IS_REFERENCE (type);
5410 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5413 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5414 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5415 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5416 /* Optimize reg-reg moves away */
5418 * Can't optimize other opcodes, since sp[0] might point to
5419 * the last ins of a decomposed opcode.
5421 sp [0]->dreg = (cfg)->locals [n]->dreg;
5423 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5428 * ldloca inhibits many optimizations so try to get rid of it in common
5431 static inline unsigned char *
5432 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5441 local = read16 (ip + 2);
5445 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5446 gboolean skip = FALSE;
5448 /* From the INITOBJ case */
5449 token = read32 (ip + 2);
5450 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5451 CHECK_TYPELOAD (klass);
5452 if (generic_class_is_reference_type (cfg, klass)) {
5453 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5454 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5455 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5456 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5457 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5470 is_exception_class (MonoClass *class)
5473 if (class == mono_defaults.exception_class)
5475 class = class->parent;
5481 * is_jit_optimizer_disabled:
5483 * Determine whenever M's assembly has a DebuggableAttribute with the
5484 * IsJITOptimizerDisabled flag set.
5487 is_jit_optimizer_disabled (MonoMethod *m)
5489 MonoAssembly *ass = m->klass->image->assembly;
5490 MonoCustomAttrInfo* attrs;
5491 static MonoClass *klass;
5493 gboolean val = FALSE;
5496 if (ass->jit_optimizer_disabled_inited)
5497 return ass->jit_optimizer_disabled;
5500 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5503 ass->jit_optimizer_disabled = FALSE;
5504 mono_memory_barrier ();
5505 ass->jit_optimizer_disabled_inited = TRUE;
5509 attrs = mono_custom_attrs_from_assembly (ass);
5511 for (i = 0; i < attrs->num_attrs; ++i) {
5512 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5515 MonoMethodSignature *sig;
5517 if (!attr->ctor || attr->ctor->klass != klass)
5519 /* Decode the attribute. See reflection.c */
5520 len = attr->data_size;
5521 p = (const char*)attr->data;
5522 g_assert (read16 (p) == 0x0001);
5525 // FIXME: Support named parameters
5526 sig = mono_method_signature (attr->ctor);
5527 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5529 /* Two boolean arguments */
5533 mono_custom_attrs_free (attrs);
5536 ass->jit_optimizer_disabled = val;
5537 mono_memory_barrier ();
5538 ass->jit_optimizer_disabled_inited = TRUE;
5544 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5546 gboolean supported_tail_call;
5549 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5550 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5552 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5555 for (i = 0; i < fsig->param_count; ++i) {
5556 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5557 /* These can point to the current method's stack */
5558 supported_tail_call = FALSE;
5560 if (fsig->hasthis && cmethod->klass->valuetype)
5561 /* this might point to the current method's stack */
5562 supported_tail_call = FALSE;
5563 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5564 supported_tail_call = FALSE;
5565 if (cfg->method->save_lmf)
5566 supported_tail_call = FALSE;
5567 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5568 supported_tail_call = FALSE;
5570 /* Debugging support */
5572 if (supported_tail_call) {
5573 static int count = 0;
5575 if (getenv ("COUNT")) {
5576 if (count == atoi (getenv ("COUNT")))
5577 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5578 if (count > atoi (getenv ("COUNT")))
5579 supported_tail_call = FALSE;
5584 return supported_tail_call;
5588 * mono_method_to_ir:
5590 * Translate the .net IL into linear IR.
5593 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5594 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5595 guint inline_offset, gboolean is_virtual_call)
5598 MonoInst *ins, **sp, **stack_start;
5599 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5600 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5601 MonoMethod *cmethod, *method_definition;
5602 MonoInst **arg_array;
5603 MonoMethodHeader *header;
5605 guint32 token, ins_flag;
5607 MonoClass *constrained_call = NULL;
5608 unsigned char *ip, *end, *target, *err_pos;
5609 static double r8_0 = 0.0;
5610 MonoMethodSignature *sig;
5611 MonoGenericContext *generic_context = NULL;
5612 MonoGenericContainer *generic_container = NULL;
5613 MonoType **param_types;
5614 int i, n, start_new_bblock, dreg;
5615 int num_calls = 0, inline_costs = 0;
5616 int breakpoint_id = 0;
5618 MonoBoolean security, pinvoke;
5619 MonoSecurityManager* secman = NULL;
5620 MonoDeclSecurityActions actions;
5621 GSList *class_inits = NULL;
5622 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5624 gboolean init_locals, seq_points, skip_dead_blocks;
5625 gboolean disable_inline;
5627 disable_inline = is_jit_optimizer_disabled (method);
5629 /* serialization and xdomain stuff may need access to private fields and methods */
5630 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5631 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5632 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5633 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5634 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5635 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5637 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5639 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5640 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5641 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5642 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5643 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5645 image = method->klass->image;
5646 header = mono_method_get_header (method);
5648 MonoLoaderError *error;
5650 if ((error = mono_loader_get_last_error ())) {
5651 mono_cfg_set_exception (cfg, error->exception_type);
5653 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5654 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5656 goto exception_exit;
5658 generic_container = mono_method_get_generic_container (method);
5659 sig = mono_method_signature (method);
5660 num_args = sig->hasthis + sig->param_count;
5661 ip = (unsigned char*)header->code;
5662 cfg->cil_start = ip;
5663 end = ip + header->code_size;
5664 mono_jit_stats.cil_code_size += header->code_size;
5665 init_locals = header->init_locals;
5667 seq_points = cfg->gen_seq_points && cfg->method == method;
5670 * Methods without init_locals set could cause asserts in various passes
5675 method_definition = method;
5676 while (method_definition->is_inflated) {
5677 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5678 method_definition = imethod->declaring;
5681 /* SkipVerification is not allowed if core-clr is enabled */
5682 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5684 dont_verify_stloc = TRUE;
5687 if (mono_debug_using_mono_debugger ())
5688 cfg->keep_cil_nops = TRUE;
5690 if (sig->is_inflated)
5691 generic_context = mono_method_get_context (method);
5692 else if (generic_container)
5693 generic_context = &generic_container->context;
5694 cfg->generic_context = generic_context;
5696 if (!cfg->generic_sharing_context)
5697 g_assert (!sig->has_type_parameters);
5699 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5700 g_assert (method->is_inflated);
5701 g_assert (mono_method_get_context (method)->method_inst);
5703 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5704 g_assert (sig->generic_param_count);
5706 if (cfg->method == method) {
5707 cfg->real_offset = 0;
5709 cfg->real_offset = inline_offset;
5712 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5713 cfg->cil_offset_to_bb_len = header->code_size;
5715 cfg->current_method = method;
5717 if (cfg->verbose_level > 2)
5718 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5720 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5722 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5723 for (n = 0; n < sig->param_count; ++n)
5724 param_types [n + sig->hasthis] = sig->params [n];
5725 cfg->arg_types = param_types;
5727 dont_inline = g_list_prepend (dont_inline, method);
5728 if (cfg->method == method) {
5730 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5731 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5734 NEW_BBLOCK (cfg, start_bblock);
5735 cfg->bb_entry = start_bblock;
5736 start_bblock->cil_code = NULL;
5737 start_bblock->cil_length = 0;
5738 #if defined(__native_client_codegen__)
5739 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5740 ins->dreg = alloc_dreg (cfg, STACK_I4);
5741 MONO_ADD_INS (start_bblock, ins);
5745 NEW_BBLOCK (cfg, end_bblock);
5746 cfg->bb_exit = end_bblock;
5747 end_bblock->cil_code = NULL;
5748 end_bblock->cil_length = 0;
5749 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5750 g_assert (cfg->num_bblocks == 2);
5752 arg_array = cfg->args;
5754 if (header->num_clauses) {
5755 cfg->spvars = g_hash_table_new (NULL, NULL);
5756 cfg->exvars = g_hash_table_new (NULL, NULL);
5758 /* handle exception clauses */
5759 for (i = 0; i < header->num_clauses; ++i) {
5760 MonoBasicBlock *try_bb;
5761 MonoExceptionClause *clause = &header->clauses [i];
5762 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5763 try_bb->real_offset = clause->try_offset;
5764 try_bb->try_start = TRUE;
5765 try_bb->region = ((i + 1) << 8) | clause->flags;
5766 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5767 tblock->real_offset = clause->handler_offset;
5768 tblock->flags |= BB_EXCEPTION_HANDLER;
5770 link_bblock (cfg, try_bb, tblock);
5772 if (*(ip + clause->handler_offset) == CEE_POP)
5773 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5775 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5776 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5777 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5778 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5779 MONO_ADD_INS (tblock, ins);
5782 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5783 MONO_ADD_INS (tblock, ins);
5786 /* todo: is a fault block unsafe to optimize? */
5787 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5788 tblock->flags |= BB_EXCEPTION_UNSAFE;
5792 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5794 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5796 /* catch and filter blocks get the exception object on the stack */
5797 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5798 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5799 MonoInst *dummy_use;
5801 /* mostly like handle_stack_args (), but just sets the input args */
5802 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5803 tblock->in_scount = 1;
5804 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5805 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5808 * Add a dummy use for the exvar so its liveness info will be
5812 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5814 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5815 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5816 tblock->flags |= BB_EXCEPTION_HANDLER;
5817 tblock->real_offset = clause->data.filter_offset;
5818 tblock->in_scount = 1;
5819 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5820 /* The filter block shares the exvar with the handler block */
5821 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5822 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5823 MONO_ADD_INS (tblock, ins);
5827 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5828 clause->data.catch_class &&
5829 cfg->generic_sharing_context &&
5830 mono_class_check_context_used (clause->data.catch_class)) {
5832 * In shared generic code with catch
5833 * clauses containing type variables
5834 * the exception handling code has to
5835 * be able to get to the rgctx.
5836 * Therefore we have to make sure that
5837 * the vtable/mrgctx argument (for
5838 * static or generic methods) or the
5839 * "this" argument (for non-static
5840 * methods) are live.
5842 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5843 mini_method_get_context (method)->method_inst ||
5844 method->klass->valuetype) {
5845 mono_get_vtable_var (cfg);
5847 MonoInst *dummy_use;
5849 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5854 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5855 cfg->cbb = start_bblock;
5856 cfg->args = arg_array;
5857 mono_save_args (cfg, sig, inline_args);
5860 /* FIRST CODE BLOCK */
5861 NEW_BBLOCK (cfg, bblock);
5862 bblock->cil_code = ip;
5866 ADD_BBLOCK (cfg, bblock);
5868 if (cfg->method == method) {
5869 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5870 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5871 MONO_INST_NEW (cfg, ins, OP_BREAK);
5872 MONO_ADD_INS (bblock, ins);
5876 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5877 secman = mono_security_manager_get_methods ();
5879 security = (secman && mono_method_has_declsec (method));
5880 /* at this point having security doesn't mean we have any code to generate */
5881 if (security && (cfg->method == method)) {
5882 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5883 * And we do not want to enter the next section (with allocation) if we
5884 * have nothing to generate */
5885 security = mono_declsec_get_demands (method, &actions);
5888 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5889 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5891 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5892 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5893 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5895 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5896 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5900 mono_custom_attrs_free (custom);
5903 custom = mono_custom_attrs_from_class (wrapped->klass);
5904 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5908 mono_custom_attrs_free (custom);
5911 /* not a P/Invoke after all */
5916 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5917 /* we use a separate basic block for the initialization code */
5918 NEW_BBLOCK (cfg, init_localsbb);
5919 cfg->bb_init = init_localsbb;
5920 init_localsbb->real_offset = cfg->real_offset;
5921 start_bblock->next_bb = init_localsbb;
5922 init_localsbb->next_bb = bblock;
5923 link_bblock (cfg, start_bblock, init_localsbb);
5924 link_bblock (cfg, init_localsbb, bblock);
5926 cfg->cbb = init_localsbb;
5928 start_bblock->next_bb = bblock;
5929 link_bblock (cfg, start_bblock, bblock);
5932 /* at this point we know, if security is TRUE, that some code needs to be generated */
5933 if (security && (cfg->method == method)) {
5936 mono_jit_stats.cas_demand_generation++;
5938 if (actions.demand.blob) {
5939 /* Add code for SecurityAction.Demand */
5940 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5941 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5942 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5943 mono_emit_method_call (cfg, secman->demand, args, NULL);
5945 if (actions.noncasdemand.blob) {
5946 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5947 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5948 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5949 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5950 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5951 mono_emit_method_call (cfg, secman->demand, args, NULL);
5953 if (actions.demandchoice.blob) {
5954 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5955 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5956 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5957 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5958 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5962 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5964 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5967 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5968 /* check if this is native code, e.g. an icall or a p/invoke */
5969 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5970 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5972 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5973 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5975 /* if this ia a native call then it can only be JITted from platform code */
5976 if ((icall || pinvk) && method->klass && method->klass->image) {
5977 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5978 MonoException *ex = icall ? mono_get_exception_security () :
5979 mono_get_exception_method_access ();
5980 emit_throw_exception (cfg, ex);
5987 if (header->code_size == 0)
5990 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5995 if (cfg->method == method)
5996 mono_debug_init_method (cfg, bblock, breakpoint_id);
5998 for (n = 0; n < header->num_locals; ++n) {
5999 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6004 /* We force the vtable variable here for all shared methods
6005 for the possibility that they might show up in a stack
6006 trace where their exact instantiation is needed. */
6007 if (cfg->generic_sharing_context && method == cfg->method) {
6008 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6009 mini_method_get_context (method)->method_inst ||
6010 method->klass->valuetype) {
6011 mono_get_vtable_var (cfg);
6013 /* FIXME: Is there a better way to do this?
6014 We need the variable live for the duration
6015 of the whole method. */
6016 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6020 /* add a check for this != NULL to inlined methods */
6021 if (is_virtual_call) {
6024 NEW_ARGLOAD (cfg, arg_ins, 0);
6025 MONO_ADD_INS (cfg->cbb, arg_ins);
6026 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6029 skip_dead_blocks = !dont_verify;
6030 if (skip_dead_blocks) {
6031 original_bb = bb = mono_basic_block_split (method, &error);
6032 if (!mono_error_ok (&error)) {
6033 mono_error_cleanup (&error);
6039 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6040 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6043 start_new_bblock = 0;
6046 if (cfg->method == method)
6047 cfg->real_offset = ip - header->code;
6049 cfg->real_offset = inline_offset;
6054 if (start_new_bblock) {
6055 bblock->cil_length = ip - bblock->cil_code;
6056 if (start_new_bblock == 2) {
6057 g_assert (ip == tblock->cil_code);
6059 GET_BBLOCK (cfg, tblock, ip);
6061 bblock->next_bb = tblock;
6064 start_new_bblock = 0;
6065 for (i = 0; i < bblock->in_scount; ++i) {
6066 if (cfg->verbose_level > 3)
6067 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6068 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6072 g_slist_free (class_inits);
6075 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6076 link_bblock (cfg, bblock, tblock);
6077 if (sp != stack_start) {
6078 handle_stack_args (cfg, stack_start, sp - stack_start);
6080 CHECK_UNVERIFIABLE (cfg);
6082 bblock->next_bb = tblock;
6085 for (i = 0; i < bblock->in_scount; ++i) {
6086 if (cfg->verbose_level > 3)
6087 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6088 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6091 g_slist_free (class_inits);
6096 if (skip_dead_blocks) {
6097 int ip_offset = ip - header->code;
6099 if (ip_offset == bb->end)
6103 int op_size = mono_opcode_size (ip, end);
6104 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6106 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6108 if (ip_offset + op_size == bb->end) {
6109 MONO_INST_NEW (cfg, ins, OP_NOP);
6110 MONO_ADD_INS (bblock, ins);
6111 start_new_bblock = 1;
6119 * Sequence points are points where the debugger can place a breakpoint.
6120 * Currently, we generate these automatically at points where the IL
6123 if (seq_points && sp == stack_start) {
6124 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6125 MONO_ADD_INS (cfg->cbb, ins);
6128 bblock->real_offset = cfg->real_offset;
6130 if ((cfg->method == method) && cfg->coverage_info) {
6131 guint32 cil_offset = ip - header->code;
6132 cfg->coverage_info->data [cil_offset].cil_code = ip;
6134 /* TODO: Use an increment here */
6135 #if defined(TARGET_X86)
6136 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6137 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6139 MONO_ADD_INS (cfg->cbb, ins);
6141 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6142 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6146 if (cfg->verbose_level > 3)
6147 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6151 if (cfg->keep_cil_nops)
6152 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6154 MONO_INST_NEW (cfg, ins, OP_NOP);
6156 MONO_ADD_INS (bblock, ins);
6159 if (should_insert_brekpoint (cfg->method))
6160 MONO_INST_NEW (cfg, ins, OP_BREAK);
6162 MONO_INST_NEW (cfg, ins, OP_NOP);
6164 MONO_ADD_INS (bblock, ins);
6170 CHECK_STACK_OVF (1);
6171 n = (*ip)-CEE_LDARG_0;
6173 EMIT_NEW_ARGLOAD (cfg, ins, n);
6181 CHECK_STACK_OVF (1);
6182 n = (*ip)-CEE_LDLOC_0;
6184 EMIT_NEW_LOCLOAD (cfg, ins, n);
6193 n = (*ip)-CEE_STLOC_0;
6196 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6198 emit_stloc_ir (cfg, sp, header, n);
6205 CHECK_STACK_OVF (1);
6208 EMIT_NEW_ARGLOAD (cfg, ins, n);
6214 CHECK_STACK_OVF (1);
6217 NEW_ARGLOADA (cfg, ins, n);
6218 MONO_ADD_INS (cfg->cbb, ins);
6228 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6230 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6235 CHECK_STACK_OVF (1);
6238 EMIT_NEW_LOCLOAD (cfg, ins, n);
6242 case CEE_LDLOCA_S: {
6243 unsigned char *tmp_ip;
6245 CHECK_STACK_OVF (1);
6246 CHECK_LOCAL (ip [1]);
6248 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6254 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6263 CHECK_LOCAL (ip [1]);
6264 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6266 emit_stloc_ir (cfg, sp, header, ip [1]);
6271 CHECK_STACK_OVF (1);
6272 EMIT_NEW_PCONST (cfg, ins, NULL);
6273 ins->type = STACK_OBJ;
6278 CHECK_STACK_OVF (1);
6279 EMIT_NEW_ICONST (cfg, ins, -1);
6292 CHECK_STACK_OVF (1);
6293 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6299 CHECK_STACK_OVF (1);
6301 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6307 CHECK_STACK_OVF (1);
6308 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6314 CHECK_STACK_OVF (1);
6315 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6316 ins->type = STACK_I8;
6317 ins->dreg = alloc_dreg (cfg, STACK_I8);
6319 ins->inst_l = (gint64)read64 (ip);
6320 MONO_ADD_INS (bblock, ins);
6326 gboolean use_aotconst = FALSE;
6328 #ifdef TARGET_POWERPC
6329 /* FIXME: Clean this up */
6330 if (cfg->compile_aot)
6331 use_aotconst = TRUE;
6334 /* FIXME: we should really allocate this only late in the compilation process */
6335 f = mono_domain_alloc (cfg->domain, sizeof (float));
6337 CHECK_STACK_OVF (1);
6343 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6345 dreg = alloc_freg (cfg);
6346 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6347 ins->type = STACK_R8;
6349 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6350 ins->type = STACK_R8;
6351 ins->dreg = alloc_dreg (cfg, STACK_R8);
6353 MONO_ADD_INS (bblock, ins);
6363 gboolean use_aotconst = FALSE;
6365 #ifdef TARGET_POWERPC
6366 /* FIXME: Clean this up */
6367 if (cfg->compile_aot)
6368 use_aotconst = TRUE;
6371 /* FIXME: we should really allocate this only late in the compilation process */
6372 d = mono_domain_alloc (cfg->domain, sizeof (double));
6374 CHECK_STACK_OVF (1);
6380 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6382 dreg = alloc_freg (cfg);
6383 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6384 ins->type = STACK_R8;
6386 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6387 ins->type = STACK_R8;
6388 ins->dreg = alloc_dreg (cfg, STACK_R8);
6390 MONO_ADD_INS (bblock, ins);
6399 MonoInst *temp, *store;
6401 CHECK_STACK_OVF (1);
6405 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6406 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6408 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6411 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6424 if (sp [0]->type == STACK_R8)
6425 /* we need to pop the value from the x86 FP stack */
6426 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6435 if (stack_start != sp)
6437 token = read32 (ip + 1);
6438 /* FIXME: check the signature matches */
6439 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6441 if (!cmethod || mono_loader_get_last_error ())
6444 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6445 GENERIC_SHARING_FAILURE (CEE_JMP);
6447 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6448 CHECK_CFG_EXCEPTION;
6450 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6452 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6455 /* Handle tail calls similarly to calls */
6456 n = fsig->param_count + fsig->hasthis;
6458 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6459 call->method = cmethod;
6460 call->tail_call = TRUE;
6461 call->signature = mono_method_signature (cmethod);
6462 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6463 call->inst.inst_p0 = cmethod;
6464 for (i = 0; i < n; ++i)
6465 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6467 mono_arch_emit_call (cfg, call);
6468 MONO_ADD_INS (bblock, (MonoInst*)call);
6471 for (i = 0; i < num_args; ++i)
6472 /* Prevent arguments from being optimized away */
6473 arg_array [i]->flags |= MONO_INST_VOLATILE;
6475 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6476 ins = (MonoInst*)call;
6477 ins->inst_p0 = cmethod;
6478 MONO_ADD_INS (bblock, ins);
6482 start_new_bblock = 1;
6487 case CEE_CALLVIRT: {
6488 MonoInst *addr = NULL;
6489 MonoMethodSignature *fsig = NULL;
6491 int virtual = *ip == CEE_CALLVIRT;
6492 int calli = *ip == CEE_CALLI;
6493 gboolean pass_imt_from_rgctx = FALSE;
6494 MonoInst *imt_arg = NULL;
6495 gboolean pass_vtable = FALSE;
6496 gboolean pass_mrgctx = FALSE;
6497 MonoInst *vtable_arg = NULL;
6498 gboolean check_this = FALSE;
6499 gboolean supported_tail_call = FALSE;
6502 token = read32 (ip + 1);
6509 if (method->wrapper_type != MONO_WRAPPER_NONE)
6510 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6512 fsig = mono_metadata_parse_signature (image, token);
6514 n = fsig->param_count + fsig->hasthis;
6516 if (method->dynamic && fsig->pinvoke) {
6520 * This is a call through a function pointer using a pinvoke
6521 * signature. Have to create a wrapper and call that instead.
6522 * FIXME: This is very slow, need to create a wrapper at JIT time
6523 * instead based on the signature.
6525 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6526 EMIT_NEW_PCONST (cfg, args [1], fsig);
6528 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6531 MonoMethod *cil_method;
6533 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6534 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6535 cil_method = cmethod;
6536 } else if (constrained_call) {
6537 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6539 * This is needed since get_method_constrained can't find
6540 * the method in klass representing a type var.
6541 * The type var is guaranteed to be a reference type in this
6544 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6545 cil_method = cmethod;
6546 g_assert (!cmethod->klass->valuetype);
6548 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6551 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6552 cil_method = cmethod;
6555 if (!cmethod || mono_loader_get_last_error ())
6557 if (!dont_verify && !cfg->skip_visibility) {
6558 MonoMethod *target_method = cil_method;
6559 if (method->is_inflated) {
6560 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6562 if (!mono_method_can_access_method (method_definition, target_method) &&
6563 !mono_method_can_access_method (method, cil_method))
6564 METHOD_ACCESS_FAILURE;
6567 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6568 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6570 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6571 /* MS.NET seems to silently convert this to a callvirt */
6576 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6577 * converts to a callvirt.
6579 * tests/bug-515884.il is an example of this behavior
6581 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6582 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6583 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6587 if (!cmethod->klass->inited)
6588 if (!mono_class_init (cmethod->klass))
6591 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6592 mini_class_is_system_array (cmethod->klass)) {
6593 array_rank = cmethod->klass->rank;
6594 fsig = mono_method_signature (cmethod);
6596 fsig = mono_method_signature (cmethod);
6601 if (fsig->pinvoke) {
6602 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6603 check_for_pending_exc, FALSE);
6604 fsig = mono_method_signature (wrapper);
6605 } else if (constrained_call) {
6606 fsig = mono_method_signature (cmethod);
6608 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6612 mono_save_token_info (cfg, image, token, cil_method);
6614 n = fsig->param_count + fsig->hasthis;
6616 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6617 if (check_linkdemand (cfg, method, cmethod))
6619 CHECK_CFG_EXCEPTION;
6622 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6623 g_assert_not_reached ();
6626 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6629 if (!cfg->generic_sharing_context && cmethod)
6630 g_assert (!mono_method_check_context_used (cmethod));
6634 //g_assert (!virtual || fsig->hasthis);
6638 if (constrained_call) {
6640 * We have the `constrained.' prefix opcode.
6642 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6644 * The type parameter is instantiated as a valuetype,
6645 * but that type doesn't override the method we're
6646 * calling, so we need to box `this'.
6648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6649 ins->klass = constrained_call;
6650 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6651 CHECK_CFG_EXCEPTION;
6652 } else if (!constrained_call->valuetype) {
6653 int dreg = alloc_ireg_ref (cfg);
6656 * The type parameter is instantiated as a reference
6657 * type. We have a managed pointer on the stack, so
6658 * we need to dereference it here.
6660 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6661 ins->type = STACK_OBJ;
6663 } else if (cmethod->klass->valuetype)
6665 constrained_call = NULL;
6668 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6672 * If the callee is a shared method, then its static cctor
6673 * might not get called after the call was patched.
6675 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6676 emit_generic_class_init (cfg, cmethod->klass);
6677 CHECK_TYPELOAD (cmethod->klass);
6680 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6681 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6682 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6683 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6684 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6687 * Pass vtable iff target method might
6688 * be shared, which means that sharing
6689 * is enabled for its class and its
6690 * context is sharable (and it's not a
6693 if (sharing_enabled && context_sharable &&
6694 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6698 if (cmethod && mini_method_get_context (cmethod) &&
6699 mini_method_get_context (cmethod)->method_inst) {
6700 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6701 MonoGenericContext *context = mini_method_get_context (cmethod);
6702 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6704 g_assert (!pass_vtable);
6706 if (sharing_enabled && context_sharable)
6710 if (cfg->generic_sharing_context && cmethod) {
6711 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6713 context_used = mono_method_check_context_used (cmethod);
6715 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6716 /* Generic method interface
6717 calls are resolved via a
6718 helper function and don't
6720 if (!cmethod_context || !cmethod_context->method_inst)
6721 pass_imt_from_rgctx = TRUE;
6725 * If a shared method calls another
6726 * shared method then the caller must
6727 * have a generic sharing context
6728 * because the magic trampoline
6729 * requires it. FIXME: We shouldn't
6730 * have to force the vtable/mrgctx
6731 * variable here. Instead there
6732 * should be a flag in the cfg to
6733 * request a generic sharing context.
6736 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6737 mono_get_vtable_var (cfg);
6742 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6744 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6746 CHECK_TYPELOAD (cmethod->klass);
6747 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6752 g_assert (!vtable_arg);
6754 if (!cfg->compile_aot) {
6756 * emit_get_rgctx_method () calls mono_class_vtable () so check
6757 * for type load errors before.
6759 mono_class_setup_vtable (cmethod->klass);
6760 CHECK_TYPELOAD (cmethod->klass);
6763 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6765 /* !marshalbyref is needed to properly handle generic methods + remoting */
6766 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6767 MONO_METHOD_IS_FINAL (cmethod)) &&
6768 !cmethod->klass->marshalbyref) {
6775 if (pass_imt_from_rgctx) {
6776 g_assert (!pass_vtable);
6779 imt_arg = emit_get_rgctx_method (cfg, context_used,
6780 cmethod, MONO_RGCTX_INFO_METHOD);
6784 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6786 /* Calling virtual generic methods */
6787 if (cmethod && virtual &&
6788 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6789 !(MONO_METHOD_IS_FINAL (cmethod) &&
6790 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6791 mono_method_signature (cmethod)->generic_param_count) {
6792 MonoInst *this_temp, *this_arg_temp, *store;
6793 MonoInst *iargs [4];
6795 g_assert (mono_method_signature (cmethod)->is_inflated);
6797 /* Prevent inlining of methods that contain indirect calls */
6800 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6801 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6802 g_assert (!imt_arg);
6804 g_assert (cmethod->is_inflated);
6805 imt_arg = emit_get_rgctx_method (cfg, context_used,
6806 cmethod, MONO_RGCTX_INFO_METHOD);
6807 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6811 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6812 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6813 MONO_ADD_INS (bblock, store);
6815 /* FIXME: This should be a managed pointer */
6816 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6818 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6819 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6820 cmethod, MONO_RGCTX_INFO_METHOD);
6821 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6822 addr = mono_emit_jit_icall (cfg,
6823 mono_helper_compile_generic_method, iargs);
6825 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6827 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6830 if (!MONO_TYPE_IS_VOID (fsig->ret))
6831 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6833 CHECK_CFG_EXCEPTION;
6841 * Implement a workaround for the inherent races involved in locking:
6847 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6848 * try block, the Exit () won't be executed, see:
6849 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6850 * To work around this, we extend such try blocks to include the last x bytes
6851 * of the Monitor.Enter () call.
6853 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6854 MonoBasicBlock *tbb;
6856 GET_BBLOCK (cfg, tbb, ip + 5);
6858 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6859 * from Monitor.Enter like ArgumentNullException.
6861 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6862 /* Mark this bblock as needing to be extended */
6863 tbb->extend_try_block = TRUE;
6867 /* Conversion to a JIT intrinsic */
6868 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6870 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6871 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6876 CHECK_CFG_EXCEPTION;
6884 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6885 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6886 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6887 !g_list_find (dont_inline, cmethod)) {
6889 gboolean always = FALSE;
6891 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6892 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6893 /* Prevent inlining of methods that call wrappers */
6895 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6899 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6901 cfg->real_offset += 5;
6904 if (!MONO_TYPE_IS_VOID (fsig->ret))
6905 /* *sp is already set by inline_method */
6908 inline_costs += costs;
6914 inline_costs += 10 * num_calls++;
6916 /* Tail recursion elimination */
6917 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6918 gboolean has_vtargs = FALSE;
6921 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6924 /* keep it simple */
6925 for (i = fsig->param_count - 1; i >= 0; i--) {
6926 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6931 for (i = 0; i < n; ++i)
6932 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6933 MONO_INST_NEW (cfg, ins, OP_BR);
6934 MONO_ADD_INS (bblock, ins);
6935 tblock = start_bblock->out_bb [0];
6936 link_bblock (cfg, bblock, tblock);
6937 ins->inst_target_bb = tblock;
6938 start_new_bblock = 1;
6940 /* skip the CEE_RET, too */
6941 if (ip_in_bb (cfg, bblock, ip + 5))
6951 /* Generic sharing */
6952 /* FIXME: only do this for generic methods if
6953 they are not shared! */
6954 if (context_used && !imt_arg && !array_rank &&
6955 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6956 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6957 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6958 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6961 g_assert (cfg->generic_sharing_context && cmethod);
6965 * We are compiling a call to a
6966 * generic method from shared code,
6967 * which means that we have to look up
6968 * the method in the rgctx and do an
6971 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6974 /* Indirect calls */
6976 g_assert (!imt_arg);
6978 if (*ip == CEE_CALL)
6979 g_assert (context_used);
6980 else if (*ip == CEE_CALLI)
6981 g_assert (!vtable_arg);
6983 /* FIXME: what the hell is this??? */
6984 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6985 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6987 /* Prevent inlining of methods with indirect calls */
6993 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
6994 call = (MonoCallInst*)ins;
6996 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6998 * Instead of emitting an indirect call, emit a direct call
6999 * with the contents of the aotconst as the patch info.
7001 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7003 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7004 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7007 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7010 if (!MONO_TYPE_IS_VOID (fsig->ret))
7011 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7013 CHECK_CFG_EXCEPTION;
7024 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7025 MonoInst *val = sp [fsig->param_count];
7027 if (val->type == STACK_OBJ) {
7028 MonoInst *iargs [2];
7033 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7036 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7037 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7038 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7039 emit_write_barrier (cfg, addr, val, 0);
7040 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7041 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7043 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7046 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7047 if (!cmethod->klass->element_class->valuetype && !readonly)
7048 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7049 CHECK_TYPELOAD (cmethod->klass);
7052 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7055 g_assert_not_reached ();
7058 CHECK_CFG_EXCEPTION;
7065 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7067 if (!MONO_TYPE_IS_VOID (fsig->ret))
7068 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7070 CHECK_CFG_EXCEPTION;
7077 /* Tail prefix / tail call optimization */
7079 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7080 /* FIXME: runtime generic context pointer for jumps? */
7081 /* FIXME: handle this for generic sharing eventually */
7082 supported_tail_call = cmethod &&
7083 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7084 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7085 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7087 if (supported_tail_call) {
7090 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7093 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7095 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7096 /* Handle tail calls similarly to calls */
7097 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7099 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7100 call->tail_call = TRUE;
7101 call->method = cmethod;
7102 call->signature = mono_method_signature (cmethod);
7105 * We implement tail calls by storing the actual arguments into the
7106 * argument variables, then emitting a CEE_JMP.
7108 for (i = 0; i < n; ++i) {
7109 /* Prevent argument from being register allocated */
7110 arg_array [i]->flags |= MONO_INST_VOLATILE;
7111 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7115 ins = (MonoInst*)call;
7116 ins->inst_p0 = cmethod;
7117 ins->inst_p1 = arg_array [0];
7118 MONO_ADD_INS (bblock, ins);
7119 link_bblock (cfg, bblock, end_bblock);
7120 start_new_bblock = 1;
7122 CHECK_CFG_EXCEPTION;
7127 // FIXME: Eliminate unreachable epilogs
7130 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7131 * only reachable from this call.
7133 GET_BBLOCK (cfg, tblock, ip);
7134 if (tblock == bblock || tblock->in_count == 0)
7141 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7142 imt_arg, vtable_arg);
7144 if (!MONO_TYPE_IS_VOID (fsig->ret))
7145 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7147 CHECK_CFG_EXCEPTION;
7154 if (cfg->method != method) {
7155 /* return from inlined method */
7157 * If in_count == 0, that means the ret is unreachable due to
7158 * being preceeded by a throw. In that case, inline_method () will
7159 * handle setting the return value
7160 * (test case: test_0_inline_throw ()).
7162 if (return_var && cfg->cbb->in_count) {
7166 //g_assert (returnvar != -1);
7167 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7168 cfg->ret_var_set = TRUE;
7172 MonoType *ret_type = mono_method_signature (method)->ret;
7176 * Place a seq point here too even through the IL stack is not
7177 * empty, so a step over on
7180 * will work correctly.
7182 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7183 MONO_ADD_INS (cfg->cbb, ins);
7186 g_assert (!return_var);
7190 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7193 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7196 if (!cfg->vret_addr) {
7199 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7201 EMIT_NEW_RETLOADA (cfg, ret_addr);
7203 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7204 ins->klass = mono_class_from_mono_type (ret_type);
7207 #ifdef MONO_ARCH_SOFT_FLOAT
7208 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7209 MonoInst *iargs [1];
7213 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7214 mono_arch_emit_setret (cfg, method, conv);
7216 mono_arch_emit_setret (cfg, method, *sp);
7219 mono_arch_emit_setret (cfg, method, *sp);
7224 if (sp != stack_start)
7226 MONO_INST_NEW (cfg, ins, OP_BR);
7228 ins->inst_target_bb = end_bblock;
7229 MONO_ADD_INS (bblock, ins);
7230 link_bblock (cfg, bblock, end_bblock);
7231 start_new_bblock = 1;
7235 MONO_INST_NEW (cfg, ins, OP_BR);
7237 target = ip + 1 + (signed char)(*ip);
7239 GET_BBLOCK (cfg, tblock, target);
7240 link_bblock (cfg, bblock, tblock);
7241 ins->inst_target_bb = tblock;
7242 if (sp != stack_start) {
7243 handle_stack_args (cfg, stack_start, sp - stack_start);
7245 CHECK_UNVERIFIABLE (cfg);
7247 MONO_ADD_INS (bblock, ins);
7248 start_new_bblock = 1;
7249 inline_costs += BRANCH_COST;
7263 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7265 target = ip + 1 + *(signed char*)ip;
7271 inline_costs += BRANCH_COST;
7275 MONO_INST_NEW (cfg, ins, OP_BR);
7278 target = ip + 4 + (gint32)read32(ip);
7280 GET_BBLOCK (cfg, tblock, target);
7281 link_bblock (cfg, bblock, tblock);
7282 ins->inst_target_bb = tblock;
7283 if (sp != stack_start) {
7284 handle_stack_args (cfg, stack_start, sp - stack_start);
7286 CHECK_UNVERIFIABLE (cfg);
7289 MONO_ADD_INS (bblock, ins);
7291 start_new_bblock = 1;
7292 inline_costs += BRANCH_COST;
7299 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7300 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7301 guint32 opsize = is_short ? 1 : 4;
7303 CHECK_OPSIZE (opsize);
7305 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7308 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7313 GET_BBLOCK (cfg, tblock, target);
7314 link_bblock (cfg, bblock, tblock);
7315 GET_BBLOCK (cfg, tblock, ip);
7316 link_bblock (cfg, bblock, tblock);
7318 if (sp != stack_start) {
7319 handle_stack_args (cfg, stack_start, sp - stack_start);
7320 CHECK_UNVERIFIABLE (cfg);
7323 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7324 cmp->sreg1 = sp [0]->dreg;
7325 type_from_op (cmp, sp [0], NULL);
7328 #if SIZEOF_REGISTER == 4
7329 if (cmp->opcode == OP_LCOMPARE_IMM) {
7330 /* Convert it to OP_LCOMPARE */
7331 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7332 ins->type = STACK_I8;
7333 ins->dreg = alloc_dreg (cfg, STACK_I8);
7335 MONO_ADD_INS (bblock, ins);
7336 cmp->opcode = OP_LCOMPARE;
7337 cmp->sreg2 = ins->dreg;
7340 MONO_ADD_INS (bblock, cmp);
7342 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7343 type_from_op (ins, sp [0], NULL);
7344 MONO_ADD_INS (bblock, ins);
7345 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7346 GET_BBLOCK (cfg, tblock, target);
7347 ins->inst_true_bb = tblock;
7348 GET_BBLOCK (cfg, tblock, ip);
7349 ins->inst_false_bb = tblock;
7350 start_new_bblock = 2;
7353 inline_costs += BRANCH_COST;
7368 MONO_INST_NEW (cfg, ins, *ip);
7370 target = ip + 4 + (gint32)read32(ip);
7376 inline_costs += BRANCH_COST;
7380 MonoBasicBlock **targets;
7381 MonoBasicBlock *default_bblock;
7382 MonoJumpInfoBBTable *table;
7383 int offset_reg = alloc_preg (cfg);
7384 int target_reg = alloc_preg (cfg);
7385 int table_reg = alloc_preg (cfg);
7386 int sum_reg = alloc_preg (cfg);
7387 gboolean use_op_switch;
7391 n = read32 (ip + 1);
7394 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7398 CHECK_OPSIZE (n * sizeof (guint32));
7399 target = ip + n * sizeof (guint32);
7401 GET_BBLOCK (cfg, default_bblock, target);
7402 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7404 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7405 for (i = 0; i < n; ++i) {
7406 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7407 targets [i] = tblock;
7408 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7412 if (sp != stack_start) {
7414 * Link the current bb with the targets as well, so handle_stack_args
7415 * will set their in_stack correctly.
7417 link_bblock (cfg, bblock, default_bblock);
7418 for (i = 0; i < n; ++i)
7419 link_bblock (cfg, bblock, targets [i]);
7421 handle_stack_args (cfg, stack_start, sp - stack_start);
7423 CHECK_UNVERIFIABLE (cfg);
7426 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7430 for (i = 0; i < n; ++i)
7431 link_bblock (cfg, bblock, targets [i]);
7433 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7434 table->table = targets;
7435 table->table_size = n;
7437 use_op_switch = FALSE;
7439 /* ARM implements SWITCH statements differently */
7440 /* FIXME: Make it use the generic implementation */
7441 if (!cfg->compile_aot)
7442 use_op_switch = TRUE;
7445 if (COMPILE_LLVM (cfg))
7446 use_op_switch = TRUE;
7448 cfg->cbb->has_jump_table = 1;
7450 if (use_op_switch) {
7451 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7452 ins->sreg1 = src1->dreg;
7453 ins->inst_p0 = table;
7454 ins->inst_many_bb = targets;
7455 ins->klass = GUINT_TO_POINTER (n);
7456 MONO_ADD_INS (cfg->cbb, ins);
7458 if (sizeof (gpointer) == 8)
7459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7463 #if SIZEOF_REGISTER == 8
7464 /* The upper word might not be zero, and we add it to a 64 bit address later */
7465 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7468 if (cfg->compile_aot) {
7469 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7471 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7472 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7473 ins->inst_p0 = table;
7474 ins->dreg = table_reg;
7475 MONO_ADD_INS (cfg->cbb, ins);
7478 /* FIXME: Use load_memindex */
7479 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7481 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7483 start_new_bblock = 1;
7484 inline_costs += (BRANCH_COST * 2);
7504 dreg = alloc_freg (cfg);
7507 dreg = alloc_lreg (cfg);
7510 dreg = alloc_ireg_ref (cfg);
7513 dreg = alloc_preg (cfg);
7516 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7517 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7518 ins->flags |= ins_flag;
7520 MONO_ADD_INS (bblock, ins);
7535 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7536 ins->flags |= ins_flag;
7538 MONO_ADD_INS (bblock, ins);
7540 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7541 emit_write_barrier (cfg, sp [0], sp [1], -1);
7550 MONO_INST_NEW (cfg, ins, (*ip));
7552 ins->sreg1 = sp [0]->dreg;
7553 ins->sreg2 = sp [1]->dreg;
7554 type_from_op (ins, sp [0], sp [1]);
7556 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7558 /* Use the immediate opcodes if possible */
7559 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7560 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7561 if (imm_opcode != -1) {
7562 ins->opcode = imm_opcode;
7563 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7566 sp [1]->opcode = OP_NOP;
7570 MONO_ADD_INS ((cfg)->cbb, (ins));
7572 *sp++ = mono_decompose_opcode (cfg, ins);
7589 MONO_INST_NEW (cfg, ins, (*ip));
7591 ins->sreg1 = sp [0]->dreg;
7592 ins->sreg2 = sp [1]->dreg;
7593 type_from_op (ins, sp [0], sp [1]);
7595 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7596 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7598 /* FIXME: Pass opcode to is_inst_imm */
7600 /* Use the immediate opcodes if possible */
7601 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7604 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7605 if (imm_opcode != -1) {
7606 ins->opcode = imm_opcode;
7607 if (sp [1]->opcode == OP_I8CONST) {
7608 #if SIZEOF_REGISTER == 8
7609 ins->inst_imm = sp [1]->inst_l;
7611 ins->inst_ls_word = sp [1]->inst_ls_word;
7612 ins->inst_ms_word = sp [1]->inst_ms_word;
7616 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7619 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7620 if (sp [1]->next == NULL)
7621 sp [1]->opcode = OP_NOP;
7624 MONO_ADD_INS ((cfg)->cbb, (ins));
7626 *sp++ = mono_decompose_opcode (cfg, ins);
7639 case CEE_CONV_OVF_I8:
7640 case CEE_CONV_OVF_U8:
7644 /* Special case this earlier so we have long constants in the IR */
7645 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7646 int data = sp [-1]->inst_c0;
7647 sp [-1]->opcode = OP_I8CONST;
7648 sp [-1]->type = STACK_I8;
7649 #if SIZEOF_REGISTER == 8
7650 if ((*ip) == CEE_CONV_U8)
7651 sp [-1]->inst_c0 = (guint32)data;
7653 sp [-1]->inst_c0 = data;
7655 sp [-1]->inst_ls_word = data;
7656 if ((*ip) == CEE_CONV_U8)
7657 sp [-1]->inst_ms_word = 0;
7659 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7661 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7668 case CEE_CONV_OVF_I4:
7669 case CEE_CONV_OVF_I1:
7670 case CEE_CONV_OVF_I2:
7671 case CEE_CONV_OVF_I:
7672 case CEE_CONV_OVF_U:
7675 if (sp [-1]->type == STACK_R8) {
7676 ADD_UNOP (CEE_CONV_OVF_I8);
7683 case CEE_CONV_OVF_U1:
7684 case CEE_CONV_OVF_U2:
7685 case CEE_CONV_OVF_U4:
7688 if (sp [-1]->type == STACK_R8) {
7689 ADD_UNOP (CEE_CONV_OVF_U8);
7696 case CEE_CONV_OVF_I1_UN:
7697 case CEE_CONV_OVF_I2_UN:
7698 case CEE_CONV_OVF_I4_UN:
7699 case CEE_CONV_OVF_I8_UN:
7700 case CEE_CONV_OVF_U1_UN:
7701 case CEE_CONV_OVF_U2_UN:
7702 case CEE_CONV_OVF_U4_UN:
7703 case CEE_CONV_OVF_U8_UN:
7704 case CEE_CONV_OVF_I_UN:
7705 case CEE_CONV_OVF_U_UN:
7712 CHECK_CFG_EXCEPTION;
7716 case CEE_ADD_OVF_UN:
7718 case CEE_MUL_OVF_UN:
7720 case CEE_SUB_OVF_UN:
7728 token = read32 (ip + 1);
7729 klass = mini_get_class (method, token, generic_context);
7730 CHECK_TYPELOAD (klass);
7732 if (generic_class_is_reference_type (cfg, klass)) {
7733 MonoInst *store, *load;
7734 int dreg = alloc_ireg_ref (cfg);
7736 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7737 load->flags |= ins_flag;
7738 MONO_ADD_INS (cfg->cbb, load);
7740 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7741 store->flags |= ins_flag;
7742 MONO_ADD_INS (cfg->cbb, store);
7744 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7745 emit_write_barrier (cfg, sp [0], sp [1], -1);
7747 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7759 token = read32 (ip + 1);
7760 klass = mini_get_class (method, token, generic_context);
7761 CHECK_TYPELOAD (klass);
7763 /* Optimize the common ldobj+stloc combination */
7773 loc_index = ip [5] - CEE_STLOC_0;
7780 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7781 CHECK_LOCAL (loc_index);
7783 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7784 ins->dreg = cfg->locals [loc_index]->dreg;
7790 /* Optimize the ldobj+stobj combination */
7791 /* The reference case ends up being a load+store anyway */
7792 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7797 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7804 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7813 CHECK_STACK_OVF (1);
7815 n = read32 (ip + 1);
7817 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7818 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7819 ins->type = STACK_OBJ;
7822 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7823 MonoInst *iargs [1];
7825 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7826 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7828 if (cfg->opt & MONO_OPT_SHARED) {
7829 MonoInst *iargs [3];
7831 if (cfg->compile_aot) {
7832 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7834 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7835 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7836 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7837 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7838 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7840 if (bblock->out_of_line) {
7841 MonoInst *iargs [2];
7843 if (image == mono_defaults.corlib) {
7845 * Avoid relocations in AOT and save some space by using a
7846 * version of helper_ldstr specialized to mscorlib.
7848 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7849 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7851 /* Avoid creating the string object */
7852 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7853 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7854 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7858 if (cfg->compile_aot) {
7859 NEW_LDSTRCONST (cfg, ins, image, n);
7861 MONO_ADD_INS (bblock, ins);
7864 NEW_PCONST (cfg, ins, NULL);
7865 ins->type = STACK_OBJ;
7866 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7868 OUT_OF_MEMORY_FAILURE;
7871 MONO_ADD_INS (bblock, ins);
7880 MonoInst *iargs [2];
7881 MonoMethodSignature *fsig;
7884 MonoInst *vtable_arg = NULL;
7887 token = read32 (ip + 1);
7888 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7889 if (!cmethod || mono_loader_get_last_error ())
7891 fsig = mono_method_get_signature (cmethod, image, token);
7895 mono_save_token_info (cfg, image, token, cmethod);
7897 if (!mono_class_init (cmethod->klass))
7900 if (cfg->generic_sharing_context)
7901 context_used = mono_method_check_context_used (cmethod);
7903 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7904 if (check_linkdemand (cfg, method, cmethod))
7906 CHECK_CFG_EXCEPTION;
7907 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7908 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7911 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7912 emit_generic_class_init (cfg, cmethod->klass);
7913 CHECK_TYPELOAD (cmethod->klass);
7916 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7917 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7918 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7919 mono_class_vtable (cfg->domain, cmethod->klass);
7920 CHECK_TYPELOAD (cmethod->klass);
7922 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7923 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7926 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7927 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7929 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7931 CHECK_TYPELOAD (cmethod->klass);
7932 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7937 n = fsig->param_count;
7941 * Generate smaller code for the common newobj <exception> instruction in
7942 * argument checking code.
7944 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7945 is_exception_class (cmethod->klass) && n <= 2 &&
7946 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7947 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7948 MonoInst *iargs [3];
7950 g_assert (!vtable_arg);
7954 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7957 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7961 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7966 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7969 g_assert_not_reached ();
7977 /* move the args to allow room for 'this' in the first position */
7983 /* check_call_signature () requires sp[0] to be set */
7984 this_ins.type = STACK_OBJ;
7986 if (check_call_signature (cfg, fsig, sp))
7991 if (mini_class_is_system_array (cmethod->klass)) {
7992 g_assert (!vtable_arg);
7994 *sp = emit_get_rgctx_method (cfg, context_used,
7995 cmethod, MONO_RGCTX_INFO_METHOD);
7997 /* Avoid varargs in the common case */
7998 if (fsig->param_count == 1)
7999 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8000 else if (fsig->param_count == 2)
8001 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8002 else if (fsig->param_count == 3)
8003 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8005 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8006 } else if (cmethod->string_ctor) {
8007 g_assert (!context_used);
8008 g_assert (!vtable_arg);
8009 /* we simply pass a null pointer */
8010 EMIT_NEW_PCONST (cfg, *sp, NULL);
8011 /* now call the string ctor */
8012 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8014 MonoInst* callvirt_this_arg = NULL;
8016 if (cmethod->klass->valuetype) {
8017 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8018 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8019 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8024 * The code generated by mini_emit_virtual_call () expects
8025 * iargs [0] to be a boxed instance, but luckily the vcall
8026 * will be transformed into a normal call there.
8028 } else if (context_used) {
8029 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8032 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8034 CHECK_TYPELOAD (cmethod->klass);
8037 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8038 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8039 * As a workaround, we call class cctors before allocating objects.
8041 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8042 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8043 if (cfg->verbose_level > 2)
8044 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8045 class_inits = g_slist_prepend (class_inits, vtable);
8048 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8051 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8054 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8056 /* Now call the actual ctor */
8057 /* Avoid virtual calls to ctors if possible */
8058 if (cmethod->klass->marshalbyref)
8059 callvirt_this_arg = sp [0];
8062 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8063 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8064 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8069 CHECK_CFG_EXCEPTION;
8070 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8071 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8072 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8073 !g_list_find (dont_inline, cmethod)) {
8076 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8077 cfg->real_offset += 5;
8080 inline_costs += costs - 5;
8083 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8085 } else if (context_used &&
8086 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8087 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8088 MonoInst *cmethod_addr;
8090 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8091 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8093 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8096 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8097 callvirt_this_arg, NULL, vtable_arg);
8101 if (alloc == NULL) {
8103 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8104 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8118 token = read32 (ip + 1);
8119 klass = mini_get_class (method, token, generic_context);
8120 CHECK_TYPELOAD (klass);
8121 if (sp [0]->type != STACK_OBJ)
8124 if (cfg->generic_sharing_context)
8125 context_used = mono_class_check_context_used (klass);
8127 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8128 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8135 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8138 /*FIXME AOT support*/
8139 if (cfg->compile_aot)
8140 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8142 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8144 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8145 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8148 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8149 MonoMethod *mono_castclass;
8150 MonoInst *iargs [1];
8153 mono_castclass = mono_marshal_get_castclass (klass);
8156 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8157 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8158 CHECK_CFG_EXCEPTION;
8159 g_assert (costs > 0);
8162 cfg->real_offset += 5;
8167 inline_costs += costs;
8170 ins = handle_castclass (cfg, klass, *sp, context_used);
8171 CHECK_CFG_EXCEPTION;
8181 token = read32 (ip + 1);
8182 klass = mini_get_class (method, token, generic_context);
8183 CHECK_TYPELOAD (klass);
8184 if (sp [0]->type != STACK_OBJ)
8187 if (cfg->generic_sharing_context)
8188 context_used = mono_class_check_context_used (klass);
8190 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8191 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8198 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8201 /*FIXME AOT support*/
8202 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8204 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8207 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8208 MonoMethod *mono_isinst;
8209 MonoInst *iargs [1];
8212 mono_isinst = mono_marshal_get_isinst (klass);
8215 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8216 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8217 CHECK_CFG_EXCEPTION;
8218 g_assert (costs > 0);
8221 cfg->real_offset += 5;
8226 inline_costs += costs;
8229 ins = handle_isinst (cfg, klass, *sp, context_used);
8230 CHECK_CFG_EXCEPTION;
8237 case CEE_UNBOX_ANY: {
8241 token = read32 (ip + 1);
8242 klass = mini_get_class (method, token, generic_context);
8243 CHECK_TYPELOAD (klass);
8245 mono_save_token_info (cfg, image, token, klass);
8247 if (cfg->generic_sharing_context)
8248 context_used = mono_class_check_context_used (klass);
8250 if (generic_class_is_reference_type (cfg, klass)) {
8251 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8252 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8253 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8260 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8263 /*FIXME AOT support*/
8264 if (cfg->compile_aot)
8265 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8267 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8269 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8270 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8273 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8274 MonoMethod *mono_castclass;
8275 MonoInst *iargs [1];
8278 mono_castclass = mono_marshal_get_castclass (klass);
8281 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8282 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8283 CHECK_CFG_EXCEPTION;
8284 g_assert (costs > 0);
8287 cfg->real_offset += 5;
8291 inline_costs += costs;
8293 ins = handle_castclass (cfg, klass, *sp, context_used);
8294 CHECK_CFG_EXCEPTION;
8302 if (mono_class_is_nullable (klass)) {
8303 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8310 ins = handle_unbox (cfg, klass, sp, context_used);
8316 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8329 token = read32 (ip + 1);
8330 klass = mini_get_class (method, token, generic_context);
8331 CHECK_TYPELOAD (klass);
8333 mono_save_token_info (cfg, image, token, klass);
8335 if (cfg->generic_sharing_context)
8336 context_used = mono_class_check_context_used (klass);
8338 if (generic_class_is_reference_type (cfg, klass)) {
8344 if (klass == mono_defaults.void_class)
8346 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8348 /* frequent check in generic code: box (struct), brtrue */
8350 // FIXME: LLVM can't handle the inconsistent bb linking
8351 if (!mono_class_is_nullable (klass) &&
8352 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8353 (ip [5] == CEE_BRTRUE ||
8354 ip [5] == CEE_BRTRUE_S ||
8355 ip [5] == CEE_BRFALSE ||
8356 ip [5] == CEE_BRFALSE_S)) {
8357 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8359 MonoBasicBlock *true_bb, *false_bb;
8363 if (cfg->verbose_level > 3) {
8364 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8365 printf ("<box+brtrue opt>\n");
8373 target = ip + 1 + (signed char)(*ip);
8380 target = ip + 4 + (gint)(read32 (ip));
8384 g_assert_not_reached ();
8388 * We need to link both bblocks, since it is needed for handling stack
8389 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8390 * Branching to only one of them would lead to inconsistencies, so
8391 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8393 GET_BBLOCK (cfg, true_bb, target);
8394 GET_BBLOCK (cfg, false_bb, ip);
8396 mono_link_bblock (cfg, cfg->cbb, true_bb);
8397 mono_link_bblock (cfg, cfg->cbb, false_bb);
8399 if (sp != stack_start) {
8400 handle_stack_args (cfg, stack_start, sp - stack_start);
8402 CHECK_UNVERIFIABLE (cfg);
8405 if (COMPILE_LLVM (cfg)) {
8406 dreg = alloc_ireg (cfg);
8407 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8410 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8412 /* The JIT can't eliminate the iconst+compare */
8413 MONO_INST_NEW (cfg, ins, OP_BR);
8414 ins->inst_target_bb = is_true ? true_bb : false_bb;
8415 MONO_ADD_INS (cfg->cbb, ins);
8418 start_new_bblock = 1;
8422 *sp++ = handle_box (cfg, val, klass, context_used);
8424 CHECK_CFG_EXCEPTION;
8433 token = read32 (ip + 1);
8434 klass = mini_get_class (method, token, generic_context);
8435 CHECK_TYPELOAD (klass);
8437 mono_save_token_info (cfg, image, token, klass);
8439 if (cfg->generic_sharing_context)
8440 context_used = mono_class_check_context_used (klass);
8442 if (mono_class_is_nullable (klass)) {
8445 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8446 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8450 ins = handle_unbox (cfg, klass, sp, context_used);
8460 MonoClassField *field;
8464 if (*ip == CEE_STFLD) {
8471 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8473 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8476 token = read32 (ip + 1);
8477 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8478 field = mono_method_get_wrapper_data (method, token);
8479 klass = field->parent;
8482 field = mono_field_from_token (image, token, &klass, generic_context);
8486 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8487 FIELD_ACCESS_FAILURE;
8488 mono_class_init (klass);
8490 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8491 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8492 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8493 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8496 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8497 if (*ip == CEE_STFLD) {
8498 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8500 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8501 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8502 MonoInst *iargs [5];
8505 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8506 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8507 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8511 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8512 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8513 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8514 CHECK_CFG_EXCEPTION;
8515 g_assert (costs > 0);
8517 cfg->real_offset += 5;
8520 inline_costs += costs;
8522 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8527 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8529 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8530 if (sp [0]->opcode != OP_LDADDR)
8531 store->flags |= MONO_INST_FAULT;
8533 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8534 /* insert call to write barrier */
8538 dreg = alloc_ireg_mp (cfg);
8539 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8540 emit_write_barrier (cfg, ptr, sp [1], -1);
8543 store->flags |= ins_flag;
8550 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8551 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8552 MonoInst *iargs [4];
8555 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8556 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8557 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8558 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8559 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8560 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8561 CHECK_CFG_EXCEPTION;
8563 g_assert (costs > 0);
8565 cfg->real_offset += 5;
8569 inline_costs += costs;
8571 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8575 if (sp [0]->type == STACK_VTYPE) {
8578 /* Have to compute the address of the variable */
8580 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8582 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8584 g_assert (var->klass == klass);
8586 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8590 if (*ip == CEE_LDFLDA) {
8591 if (sp [0]->type == STACK_OBJ) {
8592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8593 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8596 dreg = alloc_ireg_mp (cfg);
8598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8599 ins->klass = mono_class_from_mono_type (field->type);
8600 ins->type = STACK_MP;
8605 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8607 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8608 load->flags |= ins_flag;
8609 if (sp [0]->opcode != OP_LDADDR)
8610 load->flags |= MONO_INST_FAULT;
8621 MonoClassField *field;
8622 gpointer addr = NULL;
8623 gboolean is_special_static;
8627 token = read32 (ip + 1);
8629 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8630 field = mono_method_get_wrapper_data (method, token);
8631 klass = field->parent;
8634 field = mono_field_from_token (image, token, &klass, generic_context);
8637 mono_class_init (klass);
8638 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8639 FIELD_ACCESS_FAILURE;
8641 /* if the class is Critical then transparent code cannot access it's fields */
8642 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8643 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8646 * We can only support shared generic static
8647 * field access on architectures where the
8648 * trampoline code has been extended to handle
8649 * the generic class init.
8651 #ifndef MONO_ARCH_VTABLE_REG
8652 GENERIC_SHARING_FAILURE (*ip);
8655 if (cfg->generic_sharing_context)
8656 context_used = mono_class_check_context_used (klass);
8658 ftype = mono_field_get_type (field);
8660 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8662 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8663 * to be called here.
8665 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8666 mono_class_vtable (cfg->domain, klass);
8667 CHECK_TYPELOAD (klass);
8669 mono_domain_lock (cfg->domain);
8670 if (cfg->domain->special_static_fields)
8671 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8672 mono_domain_unlock (cfg->domain);
8674 is_special_static = mono_class_field_is_special_static (field);
8676 /* Generate IR to compute the field address */
8677 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8679 * Fast access to TLS data
8680 * Inline version of get_thread_static_data () in
8684 int idx, static_data_reg, array_reg, dreg;
8685 MonoInst *thread_ins;
8687 // offset &= 0x7fffffff;
8688 // idx = (offset >> 24) - 1;
8689 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8691 thread_ins = mono_get_thread_intrinsic (cfg);
8692 MONO_ADD_INS (cfg->cbb, thread_ins);
8693 static_data_reg = alloc_ireg (cfg);
8694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8696 if (cfg->compile_aot) {
8697 int offset_reg, offset2_reg, idx_reg;
8699 /* For TLS variables, this will return the TLS offset */
8700 EMIT_NEW_SFLDACONST (cfg, ins, field);
8701 offset_reg = ins->dreg;
8702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8703 idx_reg = alloc_ireg (cfg);
8704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8707 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8708 array_reg = alloc_ireg (cfg);
8709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8710 offset2_reg = alloc_ireg (cfg);
8711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8712 dreg = alloc_ireg (cfg);
8713 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8715 offset = (gsize)addr & 0x7fffffff;
8716 idx = (offset >> 24) - 1;
8718 array_reg = alloc_ireg (cfg);
8719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8720 dreg = alloc_ireg (cfg);
8721 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8723 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8724 (cfg->compile_aot && is_special_static) ||
8725 (context_used && is_special_static)) {
8726 MonoInst *iargs [2];
8728 g_assert (field->parent);
8729 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8731 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8732 field, MONO_RGCTX_INFO_CLASS_FIELD);
8734 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8736 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8737 } else if (context_used) {
8738 MonoInst *static_data;
8741 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8742 method->klass->name_space, method->klass->name, method->name,
8743 depth, field->offset);
8746 if (mono_class_needs_cctor_run (klass, method))
8747 emit_generic_class_init (cfg, klass);
8750 * The pointer we're computing here is
8752 * super_info.static_data + field->offset
8754 static_data = emit_get_rgctx_klass (cfg, context_used,
8755 klass, MONO_RGCTX_INFO_STATIC_DATA);
8757 if (field->offset == 0) {
8760 int addr_reg = mono_alloc_preg (cfg);
8761 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8763 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8764 MonoInst *iargs [2];
8766 g_assert (field->parent);
8767 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8768 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8769 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8771 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8773 CHECK_TYPELOAD (klass);
8775 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8776 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8777 if (cfg->verbose_level > 2)
8778 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8779 class_inits = g_slist_prepend (class_inits, vtable);
8781 if (cfg->run_cctors) {
8783 /* This makes so that inline cannot trigger */
8784 /* .cctors: too many apps depend on them */
8785 /* running with a specific order... */
8786 if (! vtable->initialized)
8788 ex = mono_runtime_class_init_full (vtable, FALSE);
8790 set_exception_object (cfg, ex);
8791 goto exception_exit;
8795 addr = (char*)vtable->data + field->offset;
8797 if (cfg->compile_aot)
8798 EMIT_NEW_SFLDACONST (cfg, ins, field);
8800 EMIT_NEW_PCONST (cfg, ins, addr);
8802 MonoInst *iargs [1];
8803 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8804 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8808 /* Generate IR to do the actual load/store operation */
8810 if (*ip == CEE_LDSFLDA) {
8811 ins->klass = mono_class_from_mono_type (ftype);
8812 ins->type = STACK_PTR;
8814 } else if (*ip == CEE_STSFLD) {
8819 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8820 store->flags |= ins_flag;
8822 gboolean is_const = FALSE;
8823 MonoVTable *vtable = NULL;
8825 if (!context_used) {
8826 vtable = mono_class_vtable (cfg->domain, klass);
8827 CHECK_TYPELOAD (klass);
8829 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8830 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8831 gpointer addr = (char*)vtable->data + field->offset;
8832 int ro_type = ftype->type;
8833 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8834 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8836 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8839 case MONO_TYPE_BOOLEAN:
8841 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8845 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8848 case MONO_TYPE_CHAR:
8850 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8854 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8859 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8863 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8869 case MONO_TYPE_FNPTR:
8870 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8871 type_to_eval_stack_type ((cfg), field->type, *sp);
8874 case MONO_TYPE_STRING:
8875 case MONO_TYPE_OBJECT:
8876 case MONO_TYPE_CLASS:
8877 case MONO_TYPE_SZARRAY:
8878 case MONO_TYPE_ARRAY:
8879 if (!mono_gc_is_moving ()) {
8880 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8881 type_to_eval_stack_type ((cfg), field->type, *sp);
8889 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8894 case MONO_TYPE_VALUETYPE:
8904 CHECK_STACK_OVF (1);
8906 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8907 load->flags |= ins_flag;
8920 token = read32 (ip + 1);
8921 klass = mini_get_class (method, token, generic_context);
8922 CHECK_TYPELOAD (klass);
8923 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8924 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8925 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8926 generic_class_is_reference_type (cfg, klass)) {
8927 /* insert call to write barrier */
8928 emit_write_barrier (cfg, sp [0], sp [1], -1);
8940 const char *data_ptr;
8942 guint32 field_token;
8948 token = read32 (ip + 1);
8950 klass = mini_get_class (method, token, generic_context);
8951 CHECK_TYPELOAD (klass);
8953 if (cfg->generic_sharing_context)
8954 context_used = mono_class_check_context_used (klass);
8956 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8957 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8958 ins->sreg1 = sp [0]->dreg;
8959 ins->type = STACK_I4;
8960 ins->dreg = alloc_ireg (cfg);
8961 MONO_ADD_INS (cfg->cbb, ins);
8962 *sp = mono_decompose_opcode (cfg, ins);
8967 MonoClass *array_class = mono_array_class_get (klass, 1);
8968 /* FIXME: we cannot get a managed
8969 allocator because we can't get the
8970 open generic class's vtable. We
8971 have the same problem in
8972 handle_alloc(). This
8973 needs to be solved so that we can
8974 have managed allocs of shared
8977 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8978 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8980 MonoMethod *managed_alloc = NULL;
8982 /* FIXME: Decompose later to help abcrem */
8985 args [0] = emit_get_rgctx_klass (cfg, context_used,
8986 array_class, MONO_RGCTX_INFO_VTABLE);
8991 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8993 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8995 if (cfg->opt & MONO_OPT_SHARED) {
8996 /* Decompose now to avoid problems with references to the domainvar */
8997 MonoInst *iargs [3];
8999 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9000 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9003 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9005 /* Decompose later since it is needed by abcrem */
9006 MonoClass *array_type = mono_array_class_get (klass, 1);
9007 mono_class_vtable (cfg->domain, array_type);
9008 CHECK_TYPELOAD (array_type);
9010 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9011 ins->dreg = alloc_ireg_ref (cfg);
9012 ins->sreg1 = sp [0]->dreg;
9013 ins->inst_newa_class = klass;
9014 ins->type = STACK_OBJ;
9016 MONO_ADD_INS (cfg->cbb, ins);
9017 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9018 cfg->cbb->has_array_access = TRUE;
9020 /* Needed so mono_emit_load_get_addr () gets called */
9021 mono_get_got_var (cfg);
9031 * we inline/optimize the initialization sequence if possible.
9032 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9033 * for small sizes open code the memcpy
9034 * ensure the rva field is big enough
9036 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9037 MonoMethod *memcpy_method = get_memcpy_method ();
9038 MonoInst *iargs [3];
9039 int add_reg = alloc_ireg_mp (cfg);
9041 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9042 if (cfg->compile_aot) {
9043 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9045 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9047 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9048 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9057 if (sp [0]->type != STACK_OBJ)
9060 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9061 ins->dreg = alloc_preg (cfg);
9062 ins->sreg1 = sp [0]->dreg;
9063 ins->type = STACK_I4;
9064 /* This flag will be inherited by the decomposition */
9065 ins->flags |= MONO_INST_FAULT;
9066 MONO_ADD_INS (cfg->cbb, ins);
9067 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9068 cfg->cbb->has_array_access = TRUE;
9076 if (sp [0]->type != STACK_OBJ)
9079 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9081 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9082 CHECK_TYPELOAD (klass);
9083 /* we need to make sure that this array is exactly the type it needs
9084 * to be for correctness. the wrappers are lax with their usage
9085 * so we need to ignore them here
9087 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9088 MonoClass *array_class = mono_array_class_get (klass, 1);
9089 mini_emit_check_array_type (cfg, sp [0], array_class);
9090 CHECK_TYPELOAD (array_class);
9094 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9109 case CEE_LDELEM_REF: {
9115 if (*ip == CEE_LDELEM) {
9117 token = read32 (ip + 1);
9118 klass = mini_get_class (method, token, generic_context);
9119 CHECK_TYPELOAD (klass);
9120 mono_class_init (klass);
9123 klass = array_access_to_klass (*ip);
9125 if (sp [0]->type != STACK_OBJ)
9128 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9130 if (sp [1]->opcode == OP_ICONST) {
9131 int array_reg = sp [0]->dreg;
9132 int index_reg = sp [1]->dreg;
9133 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9135 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9136 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9138 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9139 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9142 if (*ip == CEE_LDELEM)
9155 case CEE_STELEM_REF:
9162 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9164 if (*ip == CEE_STELEM) {
9166 token = read32 (ip + 1);
9167 klass = mini_get_class (method, token, generic_context);
9168 CHECK_TYPELOAD (klass);
9169 mono_class_init (klass);
9172 klass = array_access_to_klass (*ip);
9174 if (sp [0]->type != STACK_OBJ)
9177 /* storing a NULL doesn't need any of the complex checks in stelemref */
9178 if (generic_class_is_reference_type (cfg, klass) &&
9179 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9180 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9181 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9182 MonoInst *iargs [3];
9185 mono_class_setup_vtable (obj_array);
9186 g_assert (helper->slot);
9188 if (sp [0]->type != STACK_OBJ)
9190 if (sp [2]->type != STACK_OBJ)
9197 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9199 if (sp [1]->opcode == OP_ICONST) {
9200 int array_reg = sp [0]->dreg;
9201 int index_reg = sp [1]->dreg;
9202 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9204 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9205 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9207 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9208 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9212 if (*ip == CEE_STELEM)
9219 case CEE_CKFINITE: {
9223 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9224 ins->sreg1 = sp [0]->dreg;
9225 ins->dreg = alloc_freg (cfg);
9226 ins->type = STACK_R8;
9227 MONO_ADD_INS (bblock, ins);
9229 *sp++ = mono_decompose_opcode (cfg, ins);
9234 case CEE_REFANYVAL: {
9235 MonoInst *src_var, *src;
9237 int klass_reg = alloc_preg (cfg);
9238 int dreg = alloc_preg (cfg);
9241 MONO_INST_NEW (cfg, ins, *ip);
9244 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9245 CHECK_TYPELOAD (klass);
9246 mono_class_init (klass);
9248 if (cfg->generic_sharing_context)
9249 context_used = mono_class_check_context_used (klass);
9252 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9254 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9255 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9259 MonoInst *klass_ins;
9261 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9262 klass, MONO_RGCTX_INFO_KLASS);
9265 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9266 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9268 mini_emit_class_check (cfg, klass_reg, klass);
9270 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9271 ins->type = STACK_MP;
9276 case CEE_MKREFANY: {
9277 MonoInst *loc, *addr;
9280 MONO_INST_NEW (cfg, ins, *ip);
9283 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9284 CHECK_TYPELOAD (klass);
9285 mono_class_init (klass);
9287 if (cfg->generic_sharing_context)
9288 context_used = mono_class_check_context_used (klass);
9290 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9291 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9294 MonoInst *const_ins;
9295 int type_reg = alloc_preg (cfg);
9297 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9298 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9300 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9301 } else if (cfg->compile_aot) {
9302 int const_reg = alloc_preg (cfg);
9303 int type_reg = alloc_preg (cfg);
9305 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9310 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9311 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9313 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9315 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9316 ins->type = STACK_VTYPE;
9317 ins->klass = mono_defaults.typed_reference_class;
9324 MonoClass *handle_class;
9326 CHECK_STACK_OVF (1);
9329 n = read32 (ip + 1);
9331 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9332 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9333 handle = mono_method_get_wrapper_data (method, n);
9334 handle_class = mono_method_get_wrapper_data (method, n + 1);
9335 if (handle_class == mono_defaults.typehandle_class)
9336 handle = &((MonoClass*)handle)->byval_arg;
9339 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9343 mono_class_init (handle_class);
9344 if (cfg->generic_sharing_context) {
9345 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9346 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9347 /* This case handles ldtoken
9348 of an open type, like for
9351 } else if (handle_class == mono_defaults.typehandle_class) {
9352 /* If we get a MONO_TYPE_CLASS
9353 then we need to provide the
9355 instantiation of it. */
9356 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9359 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9360 } else if (handle_class == mono_defaults.fieldhandle_class)
9361 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9362 else if (handle_class == mono_defaults.methodhandle_class)
9363 context_used = mono_method_check_context_used (handle);
9365 g_assert_not_reached ();
9368 if ((cfg->opt & MONO_OPT_SHARED) &&
9369 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9370 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9371 MonoInst *addr, *vtvar, *iargs [3];
9372 int method_context_used;
9374 if (cfg->generic_sharing_context)
9375 method_context_used = mono_method_check_context_used (method);
9377 method_context_used = 0;
9379 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9381 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9382 EMIT_NEW_ICONST (cfg, iargs [1], n);
9383 if (method_context_used) {
9384 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9385 method, MONO_RGCTX_INFO_METHOD);
9386 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9388 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9389 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9391 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9395 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9397 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9398 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9399 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9400 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9401 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9402 MonoClass *tclass = mono_class_from_mono_type (handle);
9404 mono_class_init (tclass);
9406 ins = emit_get_rgctx_klass (cfg, context_used,
9407 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9408 } else if (cfg->compile_aot) {
9409 if (method->wrapper_type) {
9410 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9411 /* Special case for static synchronized wrappers */
9412 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9414 /* FIXME: n is not a normal token */
9415 cfg->disable_aot = TRUE;
9416 EMIT_NEW_PCONST (cfg, ins, NULL);
9419 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9422 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9424 ins->type = STACK_OBJ;
9425 ins->klass = cmethod->klass;
9428 MonoInst *addr, *vtvar;
9430 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9433 if (handle_class == mono_defaults.typehandle_class) {
9434 ins = emit_get_rgctx_klass (cfg, context_used,
9435 mono_class_from_mono_type (handle),
9436 MONO_RGCTX_INFO_TYPE);
9437 } else if (handle_class == mono_defaults.methodhandle_class) {
9438 ins = emit_get_rgctx_method (cfg, context_used,
9439 handle, MONO_RGCTX_INFO_METHOD);
9440 } else if (handle_class == mono_defaults.fieldhandle_class) {
9441 ins = emit_get_rgctx_field (cfg, context_used,
9442 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9444 g_assert_not_reached ();
9446 } else if (cfg->compile_aot) {
9447 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9449 EMIT_NEW_PCONST (cfg, ins, handle);
9451 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9452 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9453 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9463 MONO_INST_NEW (cfg, ins, OP_THROW);
9465 ins->sreg1 = sp [0]->dreg;
9467 bblock->out_of_line = TRUE;
9468 MONO_ADD_INS (bblock, ins);
9469 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9470 MONO_ADD_INS (bblock, ins);
9473 link_bblock (cfg, bblock, end_bblock);
9474 start_new_bblock = 1;
9476 case CEE_ENDFINALLY:
9477 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9478 MONO_ADD_INS (bblock, ins);
9480 start_new_bblock = 1;
9483 * Control will leave the method so empty the stack, otherwise
9484 * the next basic block will start with a nonempty stack.
9486 while (sp != stack_start) {
9494 if (*ip == CEE_LEAVE) {
9496 target = ip + 5 + (gint32)read32(ip + 1);
9499 target = ip + 2 + (signed char)(ip [1]);
9502 /* empty the stack */
9503 while (sp != stack_start) {
9508 * If this leave statement is in a catch block, check for a
9509 * pending exception, and rethrow it if necessary.
9510 * We avoid doing this in runtime invoke wrappers, since those are called
9511 * by native code which excepts the wrapper to catch all exceptions.
9513 for (i = 0; i < header->num_clauses; ++i) {
9514 MonoExceptionClause *clause = &header->clauses [i];
9517 * Use <= in the final comparison to handle clauses with multiple
9518 * leave statements, like in bug #78024.
9519 * The ordering of the exception clauses guarantees that we find the
9522 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9524 MonoBasicBlock *dont_throw;
9529 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9532 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9534 NEW_BBLOCK (cfg, dont_throw);
9537 * Currently, we always rethrow the abort exception, despite the
9538 * fact that this is not correct. See thread6.cs for an example.
9539 * But propagating the abort exception is more important than
9540 * getting the sematics right.
9542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9544 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9546 MONO_START_BB (cfg, dont_throw);
9551 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9553 MonoExceptionClause *clause;
9555 for (tmp = handlers; tmp; tmp = tmp->next) {
9557 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9559 link_bblock (cfg, bblock, tblock);
9560 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9561 ins->inst_target_bb = tblock;
9562 ins->inst_eh_block = clause;
9563 MONO_ADD_INS (bblock, ins);
9564 bblock->has_call_handler = 1;
9565 if (COMPILE_LLVM (cfg)) {
9566 MonoBasicBlock *target_bb;
9569 * Link the finally bblock with the target, since it will
9570 * conceptually branch there.
9571 * FIXME: Have to link the bblock containing the endfinally.
9573 GET_BBLOCK (cfg, target_bb, target);
9574 link_bblock (cfg, tblock, target_bb);
9577 g_list_free (handlers);
9580 MONO_INST_NEW (cfg, ins, OP_BR);
9581 MONO_ADD_INS (bblock, ins);
9582 GET_BBLOCK (cfg, tblock, target);
9583 link_bblock (cfg, bblock, tblock);
9584 ins->inst_target_bb = tblock;
9585 start_new_bblock = 1;
9587 if (*ip == CEE_LEAVE)
9596 * Mono specific opcodes
9598 case MONO_CUSTOM_PREFIX: {
9600 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9604 case CEE_MONO_ICALL: {
9606 MonoJitICallInfo *info;
9608 token = read32 (ip + 2);
9609 func = mono_method_get_wrapper_data (method, token);
9610 info = mono_find_jit_icall_by_addr (func);
9613 CHECK_STACK (info->sig->param_count);
9614 sp -= info->sig->param_count;
9616 ins = mono_emit_jit_icall (cfg, info->func, sp);
9617 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9621 inline_costs += 10 * num_calls++;
9625 case CEE_MONO_LDPTR: {
9628 CHECK_STACK_OVF (1);
9630 token = read32 (ip + 2);
9632 ptr = mono_method_get_wrapper_data (method, token);
9633 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9634 MonoJitICallInfo *callinfo;
9635 const char *icall_name;
9637 icall_name = method->name + strlen ("__icall_wrapper_");
9638 g_assert (icall_name);
9639 callinfo = mono_find_jit_icall_by_name (icall_name);
9640 g_assert (callinfo);
9642 if (ptr == callinfo->func) {
9643 /* Will be transformed into an AOTCONST later */
9644 EMIT_NEW_PCONST (cfg, ins, ptr);
9650 /* FIXME: Generalize this */
9651 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9652 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9657 EMIT_NEW_PCONST (cfg, ins, ptr);
9660 inline_costs += 10 * num_calls++;
9661 /* Can't embed random pointers into AOT code */
9662 cfg->disable_aot = 1;
9665 case CEE_MONO_ICALL_ADDR: {
9666 MonoMethod *cmethod;
9669 CHECK_STACK_OVF (1);
9671 token = read32 (ip + 2);
9673 cmethod = mono_method_get_wrapper_data (method, token);
9675 if (cfg->compile_aot) {
9676 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9678 ptr = mono_lookup_internal_call (cmethod);
9680 EMIT_NEW_PCONST (cfg, ins, ptr);
9686 case CEE_MONO_VTADDR: {
9687 MonoInst *src_var, *src;
9693 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9694 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9699 case CEE_MONO_NEWOBJ: {
9700 MonoInst *iargs [2];
9702 CHECK_STACK_OVF (1);
9704 token = read32 (ip + 2);
9705 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9706 mono_class_init (klass);
9707 NEW_DOMAINCONST (cfg, iargs [0]);
9708 MONO_ADD_INS (cfg->cbb, iargs [0]);
9709 NEW_CLASSCONST (cfg, iargs [1], klass);
9710 MONO_ADD_INS (cfg->cbb, iargs [1]);
9711 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9713 inline_costs += 10 * num_calls++;
9716 case CEE_MONO_OBJADDR:
9719 MONO_INST_NEW (cfg, ins, OP_MOVE);
9720 ins->dreg = alloc_ireg_mp (cfg);
9721 ins->sreg1 = sp [0]->dreg;
9722 ins->type = STACK_MP;
9723 MONO_ADD_INS (cfg->cbb, ins);
9727 case CEE_MONO_LDNATIVEOBJ:
9729 * Similar to LDOBJ, but instead load the unmanaged
9730 * representation of the vtype to the stack.
9735 token = read32 (ip + 2);
9736 klass = mono_method_get_wrapper_data (method, token);
9737 g_assert (klass->valuetype);
9738 mono_class_init (klass);
9741 MonoInst *src, *dest, *temp;
9744 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9745 temp->backend.is_pinvoke = 1;
9746 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9747 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9749 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9750 dest->type = STACK_VTYPE;
9751 dest->klass = klass;
9757 case CEE_MONO_RETOBJ: {
9759 * Same as RET, but return the native representation of a vtype
9762 g_assert (cfg->ret);
9763 g_assert (mono_method_signature (method)->pinvoke);
9768 token = read32 (ip + 2);
9769 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9771 if (!cfg->vret_addr) {
9772 g_assert (cfg->ret_var_is_local);
9774 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9776 EMIT_NEW_RETLOADA (cfg, ins);
9778 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9780 if (sp != stack_start)
9783 MONO_INST_NEW (cfg, ins, OP_BR);
9784 ins->inst_target_bb = end_bblock;
9785 MONO_ADD_INS (bblock, ins);
9786 link_bblock (cfg, bblock, end_bblock);
9787 start_new_bblock = 1;
9791 case CEE_MONO_CISINST:
9792 case CEE_MONO_CCASTCLASS: {
9797 token = read32 (ip + 2);
9798 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9799 if (ip [1] == CEE_MONO_CISINST)
9800 ins = handle_cisinst (cfg, klass, sp [0]);
9802 ins = handle_ccastclass (cfg, klass, sp [0]);
9808 case CEE_MONO_SAVE_LMF:
9809 case CEE_MONO_RESTORE_LMF:
9810 #ifdef MONO_ARCH_HAVE_LMF_OPS
9811 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9812 MONO_ADD_INS (bblock, ins);
9813 cfg->need_lmf_area = TRUE;
9817 case CEE_MONO_CLASSCONST:
9818 CHECK_STACK_OVF (1);
9820 token = read32 (ip + 2);
9821 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9824 inline_costs += 10 * num_calls++;
9826 case CEE_MONO_NOT_TAKEN:
9827 bblock->out_of_line = TRUE;
9831 CHECK_STACK_OVF (1);
9833 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9834 ins->dreg = alloc_preg (cfg);
9835 ins->inst_offset = (gint32)read32 (ip + 2);
9836 ins->type = STACK_PTR;
9837 MONO_ADD_INS (bblock, ins);
9841 case CEE_MONO_DYN_CALL: {
9844 /* It would be easier to call a trampoline, but that would put an
9845 * extra frame on the stack, confusing exception handling. So
9846 * implement it inline using an opcode for now.
9849 if (!cfg->dyn_call_var) {
9850 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9851 /* prevent it from being register allocated */
9852 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9855 /* Has to use a call inst since it local regalloc expects it */
9856 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9857 ins = (MonoInst*)call;
9859 ins->sreg1 = sp [0]->dreg;
9860 ins->sreg2 = sp [1]->dreg;
9861 MONO_ADD_INS (bblock, ins);
9863 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9864 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9868 inline_costs += 10 * num_calls++;
9873 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9883 /* somewhat similar to LDTOKEN */
9884 MonoInst *addr, *vtvar;
9885 CHECK_STACK_OVF (1);
9886 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9888 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9889 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9891 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9892 ins->type = STACK_VTYPE;
9893 ins->klass = mono_defaults.argumenthandle_class;
9906 * The following transforms:
9907 * CEE_CEQ into OP_CEQ
9908 * CEE_CGT into OP_CGT
9909 * CEE_CGT_UN into OP_CGT_UN
9910 * CEE_CLT into OP_CLT
9911 * CEE_CLT_UN into OP_CLT_UN
9913 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9915 MONO_INST_NEW (cfg, ins, cmp->opcode);
9917 cmp->sreg1 = sp [0]->dreg;
9918 cmp->sreg2 = sp [1]->dreg;
9919 type_from_op (cmp, sp [0], sp [1]);
9921 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9922 cmp->opcode = OP_LCOMPARE;
9923 else if (sp [0]->type == STACK_R8)
9924 cmp->opcode = OP_FCOMPARE;
9926 cmp->opcode = OP_ICOMPARE;
9927 MONO_ADD_INS (bblock, cmp);
9928 ins->type = STACK_I4;
9929 ins->dreg = alloc_dreg (cfg, ins->type);
9930 type_from_op (ins, sp [0], sp [1]);
9932 if (cmp->opcode == OP_FCOMPARE) {
9934 * The backends expect the fceq opcodes to do the
9937 cmp->opcode = OP_NOP;
9938 ins->sreg1 = cmp->sreg1;
9939 ins->sreg2 = cmp->sreg2;
9941 MONO_ADD_INS (bblock, ins);
9948 MonoMethod *cil_method;
9949 gboolean needs_static_rgctx_invoke;
9951 CHECK_STACK_OVF (1);
9953 n = read32 (ip + 2);
9954 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9955 if (!cmethod || mono_loader_get_last_error ())
9957 mono_class_init (cmethod->klass);
9959 mono_save_token_info (cfg, image, n, cmethod);
9961 if (cfg->generic_sharing_context)
9962 context_used = mono_method_check_context_used (cmethod);
9964 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9966 cil_method = cmethod;
9967 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9968 METHOD_ACCESS_FAILURE;
9970 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9971 if (check_linkdemand (cfg, method, cmethod))
9973 CHECK_CFG_EXCEPTION;
9974 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9975 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9979 * Optimize the common case of ldftn+delegate creation
9981 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9982 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9983 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9984 MonoInst *target_ins;
9986 int invoke_context_used = 0;
9988 invoke = mono_get_delegate_invoke (ctor_method->klass);
9989 if (!invoke || !mono_method_signature (invoke))
9992 if (cfg->generic_sharing_context)
9993 invoke_context_used = mono_method_check_context_used (invoke);
9995 target_ins = sp [-1];
9997 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9998 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9999 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10001 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10005 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10006 /* FIXME: SGEN support */
10007 if (invoke_context_used == 0) {
10009 if (cfg->verbose_level > 3)
10010 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10012 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10013 CHECK_CFG_EXCEPTION;
10022 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10023 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10027 inline_costs += 10 * num_calls++;
10030 case CEE_LDVIRTFTN: {
10031 MonoInst *args [2];
10035 n = read32 (ip + 2);
10036 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10037 if (!cmethod || mono_loader_get_last_error ())
10039 mono_class_init (cmethod->klass);
10041 if (cfg->generic_sharing_context)
10042 context_used = mono_method_check_context_used (cmethod);
10044 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10045 if (check_linkdemand (cfg, method, cmethod))
10047 CHECK_CFG_EXCEPTION;
10048 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10049 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10055 args [1] = emit_get_rgctx_method (cfg, context_used,
10056 cmethod, MONO_RGCTX_INFO_METHOD);
10059 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10061 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10064 inline_costs += 10 * num_calls++;
10068 CHECK_STACK_OVF (1);
10070 n = read16 (ip + 2);
10072 EMIT_NEW_ARGLOAD (cfg, ins, n);
10077 CHECK_STACK_OVF (1);
10079 n = read16 (ip + 2);
10081 NEW_ARGLOADA (cfg, ins, n);
10082 MONO_ADD_INS (cfg->cbb, ins);
10090 n = read16 (ip + 2);
10092 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10094 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10098 CHECK_STACK_OVF (1);
10100 n = read16 (ip + 2);
10102 EMIT_NEW_LOCLOAD (cfg, ins, n);
10107 unsigned char *tmp_ip;
10108 CHECK_STACK_OVF (1);
10110 n = read16 (ip + 2);
10113 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10119 EMIT_NEW_LOCLOADA (cfg, ins, n);
10128 n = read16 (ip + 2);
10130 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10132 emit_stloc_ir (cfg, sp, header, n);
10139 if (sp != stack_start)
10141 if (cfg->method != method)
10143 * Inlining this into a loop in a parent could lead to
10144 * stack overflows which is different behavior than the
10145 * non-inlined case, thus disable inlining in this case.
10147 goto inline_failure;
10149 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10150 ins->dreg = alloc_preg (cfg);
10151 ins->sreg1 = sp [0]->dreg;
10152 ins->type = STACK_PTR;
10153 MONO_ADD_INS (cfg->cbb, ins);
10155 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10157 ins->flags |= MONO_INST_INIT;
10162 case CEE_ENDFILTER: {
10163 MonoExceptionClause *clause, *nearest;
10164 int cc, nearest_num;
10168 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10170 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10171 ins->sreg1 = (*sp)->dreg;
10172 MONO_ADD_INS (bblock, ins);
10173 start_new_bblock = 1;
10178 for (cc = 0; cc < header->num_clauses; ++cc) {
10179 clause = &header->clauses [cc];
10180 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10181 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10182 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10187 g_assert (nearest);
10188 if ((ip - header->code) != nearest->handler_offset)
10193 case CEE_UNALIGNED_:
10194 ins_flag |= MONO_INST_UNALIGNED;
10195 /* FIXME: record alignment? we can assume 1 for now */
10199 case CEE_VOLATILE_:
10200 ins_flag |= MONO_INST_VOLATILE;
10204 ins_flag |= MONO_INST_TAILCALL;
10205 cfg->flags |= MONO_CFG_HAS_TAIL;
10206 /* Can't inline tail calls at this time */
10207 inline_costs += 100000;
10214 token = read32 (ip + 2);
10215 klass = mini_get_class (method, token, generic_context);
10216 CHECK_TYPELOAD (klass);
10217 if (generic_class_is_reference_type (cfg, klass))
10218 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10220 mini_emit_initobj (cfg, *sp, NULL, klass);
10224 case CEE_CONSTRAINED_:
10226 token = read32 (ip + 2);
10227 if (method->wrapper_type != MONO_WRAPPER_NONE)
10228 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10230 constrained_call = mono_class_get_full (image, token, generic_context);
10231 CHECK_TYPELOAD (constrained_call);
10235 case CEE_INITBLK: {
10236 MonoInst *iargs [3];
10240 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10241 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10242 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10243 /* emit_memset only works when val == 0 */
10244 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10246 iargs [0] = sp [0];
10247 iargs [1] = sp [1];
10248 iargs [2] = sp [2];
10249 if (ip [1] == CEE_CPBLK) {
10250 MonoMethod *memcpy_method = get_memcpy_method ();
10251 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10253 MonoMethod *memset_method = get_memset_method ();
10254 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10264 ins_flag |= MONO_INST_NOTYPECHECK;
10266 ins_flag |= MONO_INST_NORANGECHECK;
10267 /* we ignore the no-nullcheck for now since we
10268 * really do it explicitly only when doing callvirt->call
10272 case CEE_RETHROW: {
10274 int handler_offset = -1;
10276 for (i = 0; i < header->num_clauses; ++i) {
10277 MonoExceptionClause *clause = &header->clauses [i];
10278 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10279 handler_offset = clause->handler_offset;
10284 bblock->flags |= BB_EXCEPTION_UNSAFE;
10286 g_assert (handler_offset != -1);
10288 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10289 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10290 ins->sreg1 = load->dreg;
10291 MONO_ADD_INS (bblock, ins);
10293 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10294 MONO_ADD_INS (bblock, ins);
10297 link_bblock (cfg, bblock, end_bblock);
10298 start_new_bblock = 1;
10306 CHECK_STACK_OVF (1);
10308 token = read32 (ip + 2);
10309 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10310 MonoType *type = mono_type_create_from_typespec (image, token);
10311 token = mono_type_size (type, &ialign);
10313 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10314 CHECK_TYPELOAD (klass);
10315 mono_class_init (klass);
10316 token = mono_class_value_size (klass, &align);
10318 EMIT_NEW_ICONST (cfg, ins, token);
10323 case CEE_REFANYTYPE: {
10324 MonoInst *src_var, *src;
10330 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10332 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10333 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10334 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10339 case CEE_READONLY_:
10352 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10362 g_warning ("opcode 0x%02x not handled", *ip);
10366 if (start_new_bblock != 1)
10369 bblock->cil_length = ip - bblock->cil_code;
10370 bblock->next_bb = end_bblock;
10372 if (cfg->method == method && cfg->domainvar) {
10374 MonoInst *get_domain;
10376 cfg->cbb = init_localsbb;
10378 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10379 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10382 get_domain->dreg = alloc_preg (cfg);
10383 MONO_ADD_INS (cfg->cbb, get_domain);
10385 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10386 MONO_ADD_INS (cfg->cbb, store);
10389 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10390 if (cfg->compile_aot)
10391 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10392 mono_get_got_var (cfg);
10395 if (cfg->method == method && cfg->got_var)
10396 mono_emit_load_got_addr (cfg);
10401 cfg->cbb = init_localsbb;
10403 for (i = 0; i < header->num_locals; ++i) {
10404 MonoType *ptype = header->locals [i];
10405 int t = ptype->type;
10406 dreg = cfg->locals [i]->dreg;
10408 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10409 t = mono_class_enum_basetype (ptype->data.klass)->type;
10410 if (ptype->byref) {
10411 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10412 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10413 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10414 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10415 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10416 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10417 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10418 ins->type = STACK_R8;
10419 ins->inst_p0 = (void*)&r8_0;
10420 ins->dreg = alloc_dreg (cfg, STACK_R8);
10421 MONO_ADD_INS (init_localsbb, ins);
10422 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10423 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10424 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10425 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10427 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10432 if (cfg->init_ref_vars && cfg->method == method) {
10433 /* Emit initialization for ref vars */
10434 // FIXME: Avoid duplication initialization for IL locals.
10435 for (i = 0; i < cfg->num_varinfo; ++i) {
10436 MonoInst *ins = cfg->varinfo [i];
10438 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10439 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10443 /* Add a sequence point for method entry/exit events */
10445 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10446 MONO_ADD_INS (init_localsbb, ins);
10447 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10448 MONO_ADD_INS (cfg->bb_exit, ins);
10453 if (cfg->method == method) {
10454 MonoBasicBlock *bb;
10455 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10456 bb->region = mono_find_block_region (cfg, bb->real_offset);
10458 mono_create_spvar_for_region (cfg, bb->region);
10459 if (cfg->verbose_level > 2)
10460 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10464 g_slist_free (class_inits);
10465 dont_inline = g_list_remove (dont_inline, method);
10467 if (inline_costs < 0) {
10470 /* Method is too large */
10471 mname = mono_method_full_name (method, TRUE);
10472 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10473 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10475 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10476 mono_basic_block_free (original_bb);
10480 if ((cfg->verbose_level > 2) && (cfg->method == method))
10481 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10483 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10484 mono_basic_block_free (original_bb);
10485 return inline_costs;
10488 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10495 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10499 set_exception_type_from_invalid_il (cfg, method, ip);
10503 g_slist_free (class_inits);
10504 mono_basic_block_free (original_bb);
10505 dont_inline = g_list_remove (dont_inline, method);
10506 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10511 store_membase_reg_to_store_membase_imm (int opcode)
10514 case OP_STORE_MEMBASE_REG:
10515 return OP_STORE_MEMBASE_IMM;
10516 case OP_STOREI1_MEMBASE_REG:
10517 return OP_STOREI1_MEMBASE_IMM;
10518 case OP_STOREI2_MEMBASE_REG:
10519 return OP_STOREI2_MEMBASE_IMM;
10520 case OP_STOREI4_MEMBASE_REG:
10521 return OP_STOREI4_MEMBASE_IMM;
10522 case OP_STOREI8_MEMBASE_REG:
10523 return OP_STOREI8_MEMBASE_IMM;
10525 g_assert_not_reached ();
10531 #endif /* DISABLE_JIT */
10534 mono_op_to_op_imm (int opcode)
10538 return OP_IADD_IMM;
10540 return OP_ISUB_IMM;
10542 return OP_IDIV_IMM;
10544 return OP_IDIV_UN_IMM;
10546 return OP_IREM_IMM;
10548 return OP_IREM_UN_IMM;
10550 return OP_IMUL_IMM;
10552 return OP_IAND_IMM;
10556 return OP_IXOR_IMM;
10558 return OP_ISHL_IMM;
10560 return OP_ISHR_IMM;
10562 return OP_ISHR_UN_IMM;
10565 return OP_LADD_IMM;
10567 return OP_LSUB_IMM;
10569 return OP_LAND_IMM;
10573 return OP_LXOR_IMM;
10575 return OP_LSHL_IMM;
10577 return OP_LSHR_IMM;
10579 return OP_LSHR_UN_IMM;
10582 return OP_COMPARE_IMM;
10584 return OP_ICOMPARE_IMM;
10586 return OP_LCOMPARE_IMM;
10588 case OP_STORE_MEMBASE_REG:
10589 return OP_STORE_MEMBASE_IMM;
10590 case OP_STOREI1_MEMBASE_REG:
10591 return OP_STOREI1_MEMBASE_IMM;
10592 case OP_STOREI2_MEMBASE_REG:
10593 return OP_STOREI2_MEMBASE_IMM;
10594 case OP_STOREI4_MEMBASE_REG:
10595 return OP_STOREI4_MEMBASE_IMM;
10597 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10599 return OP_X86_PUSH_IMM;
10600 case OP_X86_COMPARE_MEMBASE_REG:
10601 return OP_X86_COMPARE_MEMBASE_IMM;
10603 #if defined(TARGET_AMD64)
10604 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10605 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10607 case OP_VOIDCALL_REG:
10608 return OP_VOIDCALL;
10616 return OP_LOCALLOC_IMM;
10623 ldind_to_load_membase (int opcode)
10627 return OP_LOADI1_MEMBASE;
10629 return OP_LOADU1_MEMBASE;
10631 return OP_LOADI2_MEMBASE;
10633 return OP_LOADU2_MEMBASE;
10635 return OP_LOADI4_MEMBASE;
10637 return OP_LOADU4_MEMBASE;
10639 return OP_LOAD_MEMBASE;
10640 case CEE_LDIND_REF:
10641 return OP_LOAD_MEMBASE;
10643 return OP_LOADI8_MEMBASE;
10645 return OP_LOADR4_MEMBASE;
10647 return OP_LOADR8_MEMBASE;
10649 g_assert_not_reached ();
10656 stind_to_store_membase (int opcode)
10660 return OP_STOREI1_MEMBASE_REG;
10662 return OP_STOREI2_MEMBASE_REG;
10664 return OP_STOREI4_MEMBASE_REG;
10666 case CEE_STIND_REF:
10667 return OP_STORE_MEMBASE_REG;
10669 return OP_STOREI8_MEMBASE_REG;
10671 return OP_STORER4_MEMBASE_REG;
10673 return OP_STORER8_MEMBASE_REG;
10675 g_assert_not_reached ();
10682 mono_load_membase_to_load_mem (int opcode)
10684 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10685 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10687 case OP_LOAD_MEMBASE:
10688 return OP_LOAD_MEM;
10689 case OP_LOADU1_MEMBASE:
10690 return OP_LOADU1_MEM;
10691 case OP_LOADU2_MEMBASE:
10692 return OP_LOADU2_MEM;
10693 case OP_LOADI4_MEMBASE:
10694 return OP_LOADI4_MEM;
10695 case OP_LOADU4_MEMBASE:
10696 return OP_LOADU4_MEM;
10697 #if SIZEOF_REGISTER == 8
10698 case OP_LOADI8_MEMBASE:
10699 return OP_LOADI8_MEM;
10708 op_to_op_dest_membase (int store_opcode, int opcode)
10710 #if defined(TARGET_X86)
10711 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10716 return OP_X86_ADD_MEMBASE_REG;
10718 return OP_X86_SUB_MEMBASE_REG;
10720 return OP_X86_AND_MEMBASE_REG;
10722 return OP_X86_OR_MEMBASE_REG;
10724 return OP_X86_XOR_MEMBASE_REG;
10727 return OP_X86_ADD_MEMBASE_IMM;
10730 return OP_X86_SUB_MEMBASE_IMM;
10733 return OP_X86_AND_MEMBASE_IMM;
10736 return OP_X86_OR_MEMBASE_IMM;
10739 return OP_X86_XOR_MEMBASE_IMM;
10745 #if defined(TARGET_AMD64)
10746 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10751 return OP_X86_ADD_MEMBASE_REG;
10753 return OP_X86_SUB_MEMBASE_REG;
10755 return OP_X86_AND_MEMBASE_REG;
10757 return OP_X86_OR_MEMBASE_REG;
10759 return OP_X86_XOR_MEMBASE_REG;
10761 return OP_X86_ADD_MEMBASE_IMM;
10763 return OP_X86_SUB_MEMBASE_IMM;
10765 return OP_X86_AND_MEMBASE_IMM;
10767 return OP_X86_OR_MEMBASE_IMM;
10769 return OP_X86_XOR_MEMBASE_IMM;
10771 return OP_AMD64_ADD_MEMBASE_REG;
10773 return OP_AMD64_SUB_MEMBASE_REG;
10775 return OP_AMD64_AND_MEMBASE_REG;
10777 return OP_AMD64_OR_MEMBASE_REG;
10779 return OP_AMD64_XOR_MEMBASE_REG;
10782 return OP_AMD64_ADD_MEMBASE_IMM;
10785 return OP_AMD64_SUB_MEMBASE_IMM;
10788 return OP_AMD64_AND_MEMBASE_IMM;
10791 return OP_AMD64_OR_MEMBASE_IMM;
10794 return OP_AMD64_XOR_MEMBASE_IMM;
10804 op_to_op_store_membase (int store_opcode, int opcode)
10806 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10809 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10810 return OP_X86_SETEQ_MEMBASE;
10812 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10813 return OP_X86_SETNE_MEMBASE;
10821 op_to_op_src1_membase (int load_opcode, int opcode)
10824 /* FIXME: This has sign extension issues */
10826 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10827 return OP_X86_COMPARE_MEMBASE8_IMM;
10830 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10835 return OP_X86_PUSH_MEMBASE;
10836 case OP_COMPARE_IMM:
10837 case OP_ICOMPARE_IMM:
10838 return OP_X86_COMPARE_MEMBASE_IMM;
10841 return OP_X86_COMPARE_MEMBASE_REG;
10845 #ifdef TARGET_AMD64
10846 /* FIXME: This has sign extension issues */
10848 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10849 return OP_X86_COMPARE_MEMBASE8_IMM;
10854 #ifdef __mono_ilp32__
10855 if (load_opcode == OP_LOADI8_MEMBASE)
10857 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10859 return OP_X86_PUSH_MEMBASE;
10861 /* FIXME: This only works for 32 bit immediates
10862 case OP_COMPARE_IMM:
10863 case OP_LCOMPARE_IMM:
10864 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10865 return OP_AMD64_COMPARE_MEMBASE_IMM;
10867 case OP_ICOMPARE_IMM:
10868 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10869 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10873 #ifdef __mono_ilp32__
10874 if (load_opcode == OP_LOAD_MEMBASE)
10875 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10876 if (load_opcode == OP_LOADI8_MEMBASE)
10878 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10880 return OP_AMD64_COMPARE_MEMBASE_REG;
10883 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10884 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10893 op_to_op_src2_membase (int load_opcode, int opcode)
10896 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10902 return OP_X86_COMPARE_REG_MEMBASE;
10904 return OP_X86_ADD_REG_MEMBASE;
10906 return OP_X86_SUB_REG_MEMBASE;
10908 return OP_X86_AND_REG_MEMBASE;
10910 return OP_X86_OR_REG_MEMBASE;
10912 return OP_X86_XOR_REG_MEMBASE;
10916 #ifdef TARGET_AMD64
10917 #ifdef __mono_ilp32__
10918 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
10920 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10924 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10926 return OP_X86_ADD_REG_MEMBASE;
10928 return OP_X86_SUB_REG_MEMBASE;
10930 return OP_X86_AND_REG_MEMBASE;
10932 return OP_X86_OR_REG_MEMBASE;
10934 return OP_X86_XOR_REG_MEMBASE;
10936 #ifdef __mono_ilp32__
10937 } else if (load_opcode == OP_LOADI8_MEMBASE) {
10939 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10944 return OP_AMD64_COMPARE_REG_MEMBASE;
10946 return OP_AMD64_ADD_REG_MEMBASE;
10948 return OP_AMD64_SUB_REG_MEMBASE;
10950 return OP_AMD64_AND_REG_MEMBASE;
10952 return OP_AMD64_OR_REG_MEMBASE;
10954 return OP_AMD64_XOR_REG_MEMBASE;
10963 mono_op_to_op_imm_noemul (int opcode)
10966 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10972 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10980 return mono_op_to_op_imm (opcode);
10984 #ifndef DISABLE_JIT
10987 * mono_handle_global_vregs:
10989 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10993 mono_handle_global_vregs (MonoCompile *cfg)
10995 gint32 *vreg_to_bb;
10996 MonoBasicBlock *bb;
10999 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11001 #ifdef MONO_ARCH_SIMD_INTRINSICS
11002 if (cfg->uses_simd_intrinsics)
11003 mono_simd_simplify_indirection (cfg);
11006 /* Find local vregs used in more than one bb */
11007 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11008 MonoInst *ins = bb->code;
11009 int block_num = bb->block_num;
11011 if (cfg->verbose_level > 2)
11012 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11015 for (; ins; ins = ins->next) {
11016 const char *spec = INS_INFO (ins->opcode);
11017 int regtype = 0, regindex;
11020 if (G_UNLIKELY (cfg->verbose_level > 2))
11021 mono_print_ins (ins);
11023 g_assert (ins->opcode >= MONO_CEE_LAST);
11025 for (regindex = 0; regindex < 4; regindex ++) {
11028 if (regindex == 0) {
11029 regtype = spec [MONO_INST_DEST];
11030 if (regtype == ' ')
11033 } else if (regindex == 1) {
11034 regtype = spec [MONO_INST_SRC1];
11035 if (regtype == ' ')
11038 } else if (regindex == 2) {
11039 regtype = spec [MONO_INST_SRC2];
11040 if (regtype == ' ')
11043 } else if (regindex == 3) {
11044 regtype = spec [MONO_INST_SRC3];
11045 if (regtype == ' ')
11050 #if SIZEOF_REGISTER == 4
11051 /* In the LLVM case, the long opcodes are not decomposed */
11052 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11054 * Since some instructions reference the original long vreg,
11055 * and some reference the two component vregs, it is quite hard
11056 * to determine when it needs to be global. So be conservative.
11058 if (!get_vreg_to_inst (cfg, vreg)) {
11059 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11061 if (cfg->verbose_level > 2)
11062 printf ("LONG VREG R%d made global.\n", vreg);
11066 * Make the component vregs volatile since the optimizations can
11067 * get confused otherwise.
11069 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11070 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11074 g_assert (vreg != -1);
11076 prev_bb = vreg_to_bb [vreg];
11077 if (prev_bb == 0) {
11078 /* 0 is a valid block num */
11079 vreg_to_bb [vreg] = block_num + 1;
11080 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11081 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11084 if (!get_vreg_to_inst (cfg, vreg)) {
11085 if (G_UNLIKELY (cfg->verbose_level > 2))
11086 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11090 if (vreg_is_ref (cfg, vreg))
11091 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11093 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11096 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11099 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11102 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11105 g_assert_not_reached ();
11109 /* Flag as having been used in more than one bb */
11110 vreg_to_bb [vreg] = -1;
11116 /* If a variable is used in only one bblock, convert it into a local vreg */
11117 for (i = 0; i < cfg->num_varinfo; i++) {
11118 MonoInst *var = cfg->varinfo [i];
11119 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11121 switch (var->type) {
11127 #if SIZEOF_REGISTER == 8
11130 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11131 /* Enabling this screws up the fp stack on x86 */
11134 /* Arguments are implicitly global */
11135 /* Putting R4 vars into registers doesn't work currently */
11136 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11138 * Make that the variable's liveness interval doesn't contain a call, since
11139 * that would cause the lvreg to be spilled, making the whole optimization
11142 /* This is too slow for JIT compilation */
11144 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11146 int def_index, call_index, ins_index;
11147 gboolean spilled = FALSE;
11152 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11153 const char *spec = INS_INFO (ins->opcode);
11155 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11156 def_index = ins_index;
11158 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11159 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11160 if (call_index > def_index) {
11166 if (MONO_IS_CALL (ins))
11167 call_index = ins_index;
11177 if (G_UNLIKELY (cfg->verbose_level > 2))
11178 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11179 var->flags |= MONO_INST_IS_DEAD;
11180 cfg->vreg_to_inst [var->dreg] = NULL;
11187 * Compress the varinfo and vars tables so the liveness computation is faster and
11188 * takes up less space.
11191 for (i = 0; i < cfg->num_varinfo; ++i) {
11192 MonoInst *var = cfg->varinfo [i];
11193 if (pos < i && cfg->locals_start == i)
11194 cfg->locals_start = pos;
11195 if (!(var->flags & MONO_INST_IS_DEAD)) {
11197 cfg->varinfo [pos] = cfg->varinfo [i];
11198 cfg->varinfo [pos]->inst_c0 = pos;
11199 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11200 cfg->vars [pos].idx = pos;
11201 #if SIZEOF_REGISTER == 4
11202 if (cfg->varinfo [pos]->type == STACK_I8) {
11203 /* Modify the two component vars too */
11206 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11207 var1->inst_c0 = pos;
11208 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11209 var1->inst_c0 = pos;
11216 cfg->num_varinfo = pos;
11217 if (cfg->locals_start > cfg->num_varinfo)
11218 cfg->locals_start = cfg->num_varinfo;
11222 * mono_spill_global_vars:
11224 * Generate spill code for variables which are not allocated to registers,
11225 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11226 * code is generated which could be optimized by the local optimization passes.
11229 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11231 MonoBasicBlock *bb;
11233 int orig_next_vreg;
11234 guint32 *vreg_to_lvreg;
11236 guint32 i, lvregs_len;
11237 gboolean dest_has_lvreg = FALSE;
11238 guint32 stacktypes [128];
11239 MonoInst **live_range_start, **live_range_end;
11240 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11242 *need_local_opts = FALSE;
11244 memset (spec2, 0, sizeof (spec2));
11246 /* FIXME: Move this function to mini.c */
11247 stacktypes ['i'] = STACK_PTR;
11248 stacktypes ['l'] = STACK_I8;
11249 stacktypes ['f'] = STACK_R8;
11250 #ifdef MONO_ARCH_SIMD_INTRINSICS
11251 stacktypes ['x'] = STACK_VTYPE;
11254 #if SIZEOF_REGISTER == 4
11255 /* Create MonoInsts for longs */
11256 for (i = 0; i < cfg->num_varinfo; i++) {
11257 MonoInst *ins = cfg->varinfo [i];
11259 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11260 switch (ins->type) {
11265 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11268 g_assert (ins->opcode == OP_REGOFFSET);
11270 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11272 tree->opcode = OP_REGOFFSET;
11273 tree->inst_basereg = ins->inst_basereg;
11274 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11276 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11278 tree->opcode = OP_REGOFFSET;
11279 tree->inst_basereg = ins->inst_basereg;
11280 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11290 if (cfg->compute_gc_maps) {
11291 /* registers need liveness info even for !non refs */
11292 for (i = 0; i < cfg->num_varinfo; i++) {
11293 MonoInst *ins = cfg->varinfo [i];
11295 if (ins->opcode == OP_REGVAR)
11296 ins->flags |= MONO_INST_GC_TRACK;
11300 /* FIXME: widening and truncation */
11303 * As an optimization, when a variable allocated to the stack is first loaded into
11304 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11305 * the variable again.
11307 orig_next_vreg = cfg->next_vreg;
11308 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11309 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11313 * These arrays contain the first and last instructions accessing a given
11315 * Since we emit bblocks in the same order we process them here, and we
11316 * don't split live ranges, these will precisely describe the live range of
11317 * the variable, i.e. the instruction range where a valid value can be found
11318 * in the variables location.
11319 * The live range is computed using the liveness info computed by the liveness pass.
11320 * We can't use vmv->range, since that is an abstract live range, and we need
11321 * one which is instruction precise.
11322 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11324 /* FIXME: Only do this if debugging info is requested */
11325 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11326 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11327 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11328 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11330 /* Add spill loads/stores */
11331 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11334 if (cfg->verbose_level > 2)
11335 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11337 /* Clear vreg_to_lvreg array */
11338 for (i = 0; i < lvregs_len; i++)
11339 vreg_to_lvreg [lvregs [i]] = 0;
11343 MONO_BB_FOR_EACH_INS (bb, ins) {
11344 const char *spec = INS_INFO (ins->opcode);
11345 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11346 gboolean store, no_lvreg;
11347 int sregs [MONO_MAX_SRC_REGS];
11349 if (G_UNLIKELY (cfg->verbose_level > 2))
11350 mono_print_ins (ins);
11352 if (ins->opcode == OP_NOP)
11356 * We handle LDADDR here as well, since it can only be decomposed
11357 * when variable addresses are known.
11359 if (ins->opcode == OP_LDADDR) {
11360 MonoInst *var = ins->inst_p0;
11362 if (var->opcode == OP_VTARG_ADDR) {
11363 /* Happens on SPARC/S390 where vtypes are passed by reference */
11364 MonoInst *vtaddr = var->inst_left;
11365 if (vtaddr->opcode == OP_REGVAR) {
11366 ins->opcode = OP_MOVE;
11367 ins->sreg1 = vtaddr->dreg;
11369 else if (var->inst_left->opcode == OP_REGOFFSET) {
11370 ins->opcode = OP_LOAD_MEMBASE;
11371 ins->inst_basereg = vtaddr->inst_basereg;
11372 ins->inst_offset = vtaddr->inst_offset;
11376 g_assert (var->opcode == OP_REGOFFSET);
11378 ins->opcode = OP_ADD_IMM;
11379 ins->sreg1 = var->inst_basereg;
11380 ins->inst_imm = var->inst_offset;
11383 *need_local_opts = TRUE;
11384 spec = INS_INFO (ins->opcode);
11387 if (ins->opcode < MONO_CEE_LAST) {
11388 mono_print_ins (ins);
11389 g_assert_not_reached ();
11393 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11397 if (MONO_IS_STORE_MEMBASE (ins)) {
11398 tmp_reg = ins->dreg;
11399 ins->dreg = ins->sreg2;
11400 ins->sreg2 = tmp_reg;
11403 spec2 [MONO_INST_DEST] = ' ';
11404 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11405 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11406 spec2 [MONO_INST_SRC3] = ' ';
11408 } else if (MONO_IS_STORE_MEMINDEX (ins))
11409 g_assert_not_reached ();
11414 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11415 printf ("\t %.3s %d", spec, ins->dreg);
11416 num_sregs = mono_inst_get_src_registers (ins, sregs);
11417 for (srcindex = 0; srcindex < 3; ++srcindex)
11418 printf (" %d", sregs [srcindex]);
11425 regtype = spec [MONO_INST_DEST];
11426 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11429 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11430 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11431 MonoInst *store_ins;
11433 MonoInst *def_ins = ins;
11434 int dreg = ins->dreg; /* The original vreg */
11436 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11438 if (var->opcode == OP_REGVAR) {
11439 ins->dreg = var->dreg;
11440 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11442 * Instead of emitting a load+store, use a _membase opcode.
11444 g_assert (var->opcode == OP_REGOFFSET);
11445 if (ins->opcode == OP_MOVE) {
11449 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11450 ins->inst_basereg = var->inst_basereg;
11451 ins->inst_offset = var->inst_offset;
11454 spec = INS_INFO (ins->opcode);
11458 g_assert (var->opcode == OP_REGOFFSET);
11460 prev_dreg = ins->dreg;
11462 /* Invalidate any previous lvreg for this vreg */
11463 vreg_to_lvreg [ins->dreg] = 0;
11467 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11469 store_opcode = OP_STOREI8_MEMBASE_REG;
11472 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11474 if (regtype == 'l') {
11475 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11476 mono_bblock_insert_after_ins (bb, ins, store_ins);
11477 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11478 mono_bblock_insert_after_ins (bb, ins, store_ins);
11479 def_ins = store_ins;
11482 g_assert (store_opcode != OP_STOREV_MEMBASE);
11484 /* Try to fuse the store into the instruction itself */
11485 /* FIXME: Add more instructions */
11486 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11487 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11488 ins->inst_imm = ins->inst_c0;
11489 ins->inst_destbasereg = var->inst_basereg;
11490 ins->inst_offset = var->inst_offset;
11491 spec = INS_INFO (ins->opcode);
11492 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11493 ins->opcode = store_opcode;
11494 ins->inst_destbasereg = var->inst_basereg;
11495 ins->inst_offset = var->inst_offset;
11499 tmp_reg = ins->dreg;
11500 ins->dreg = ins->sreg2;
11501 ins->sreg2 = tmp_reg;
11504 spec2 [MONO_INST_DEST] = ' ';
11505 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11506 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11507 spec2 [MONO_INST_SRC3] = ' ';
11509 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11510 // FIXME: The backends expect the base reg to be in inst_basereg
11511 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11513 ins->inst_basereg = var->inst_basereg;
11514 ins->inst_offset = var->inst_offset;
11515 spec = INS_INFO (ins->opcode);
11517 /* printf ("INS: "); mono_print_ins (ins); */
11518 /* Create a store instruction */
11519 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11521 /* Insert it after the instruction */
11522 mono_bblock_insert_after_ins (bb, ins, store_ins);
11524 def_ins = store_ins;
11527 * We can't assign ins->dreg to var->dreg here, since the
11528 * sregs could use it. So set a flag, and do it after
11531 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11532 dest_has_lvreg = TRUE;
11537 if (def_ins && !live_range_start [dreg]) {
11538 live_range_start [dreg] = def_ins;
11539 live_range_start_bb [dreg] = bb;
11542 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11545 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11546 tmp->inst_c1 = dreg;
11547 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11554 num_sregs = mono_inst_get_src_registers (ins, sregs);
11555 for (srcindex = 0; srcindex < 3; ++srcindex) {
11556 regtype = spec [MONO_INST_SRC1 + srcindex];
11557 sreg = sregs [srcindex];
11559 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11560 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11561 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11562 MonoInst *use_ins = ins;
11563 MonoInst *load_ins;
11564 guint32 load_opcode;
11566 if (var->opcode == OP_REGVAR) {
11567 sregs [srcindex] = var->dreg;
11568 //mono_inst_set_src_registers (ins, sregs);
11569 live_range_end [sreg] = use_ins;
11570 live_range_end_bb [sreg] = bb;
11572 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11575 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11576 /* var->dreg is a hreg */
11577 tmp->inst_c1 = sreg;
11578 mono_bblock_insert_after_ins (bb, ins, tmp);
11584 g_assert (var->opcode == OP_REGOFFSET);
11586 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11588 g_assert (load_opcode != OP_LOADV_MEMBASE);
11590 if (vreg_to_lvreg [sreg]) {
11591 g_assert (vreg_to_lvreg [sreg] != -1);
11593 /* The variable is already loaded to an lvreg */
11594 if (G_UNLIKELY (cfg->verbose_level > 2))
11595 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11596 sregs [srcindex] = vreg_to_lvreg [sreg];
11597 //mono_inst_set_src_registers (ins, sregs);
11601 /* Try to fuse the load into the instruction */
11602 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11603 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11604 sregs [0] = var->inst_basereg;
11605 //mono_inst_set_src_registers (ins, sregs);
11606 ins->inst_offset = var->inst_offset;
11607 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11608 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11609 sregs [1] = var->inst_basereg;
11610 //mono_inst_set_src_registers (ins, sregs);
11611 ins->inst_offset = var->inst_offset;
11613 if (MONO_IS_REAL_MOVE (ins)) {
11614 ins->opcode = OP_NOP;
11617 //printf ("%d ", srcindex); mono_print_ins (ins);
11619 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11621 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11622 if (var->dreg == prev_dreg) {
11624 * sreg refers to the value loaded by the load
11625 * emitted below, but we need to use ins->dreg
11626 * since it refers to the store emitted earlier.
11630 g_assert (sreg != -1);
11631 vreg_to_lvreg [var->dreg] = sreg;
11632 g_assert (lvregs_len < 1024);
11633 lvregs [lvregs_len ++] = var->dreg;
11637 sregs [srcindex] = sreg;
11638 //mono_inst_set_src_registers (ins, sregs);
11640 if (regtype == 'l') {
11641 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11642 mono_bblock_insert_before_ins (bb, ins, load_ins);
11643 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11644 mono_bblock_insert_before_ins (bb, ins, load_ins);
11645 use_ins = load_ins;
11648 #if SIZEOF_REGISTER == 4
11649 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11651 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11652 mono_bblock_insert_before_ins (bb, ins, load_ins);
11653 use_ins = load_ins;
11657 if (var->dreg < orig_next_vreg) {
11658 live_range_end [var->dreg] = use_ins;
11659 live_range_end_bb [var->dreg] = bb;
11662 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11665 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11666 tmp->inst_c1 = var->dreg;
11667 mono_bblock_insert_after_ins (bb, ins, tmp);
11671 mono_inst_set_src_registers (ins, sregs);
11673 if (dest_has_lvreg) {
11674 g_assert (ins->dreg != -1);
11675 vreg_to_lvreg [prev_dreg] = ins->dreg;
11676 g_assert (lvregs_len < 1024);
11677 lvregs [lvregs_len ++] = prev_dreg;
11678 dest_has_lvreg = FALSE;
11682 tmp_reg = ins->dreg;
11683 ins->dreg = ins->sreg2;
11684 ins->sreg2 = tmp_reg;
11687 if (MONO_IS_CALL (ins)) {
11688 /* Clear vreg_to_lvreg array */
11689 for (i = 0; i < lvregs_len; i++)
11690 vreg_to_lvreg [lvregs [i]] = 0;
11692 } else if (ins->opcode == OP_NOP) {
11694 MONO_INST_NULLIFY_SREGS (ins);
11697 if (cfg->verbose_level > 2)
11698 mono_print_ins_index (1, ins);
11701 /* Extend the live range based on the liveness info */
11702 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11703 for (i = 0; i < cfg->num_varinfo; i ++) {
11704 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11706 if (vreg_is_volatile (cfg, vi->vreg))
11707 /* The liveness info is incomplete */
11710 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11711 /* Live from at least the first ins of this bb */
11712 live_range_start [vi->vreg] = bb->code;
11713 live_range_start_bb [vi->vreg] = bb;
11716 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11717 /* Live at least until the last ins of this bb */
11718 live_range_end [vi->vreg] = bb->last_ins;
11719 live_range_end_bb [vi->vreg] = bb;
11725 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11727 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11728 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11730 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11731 for (i = 0; i < cfg->num_varinfo; ++i) {
11732 int vreg = MONO_VARINFO (cfg, i)->vreg;
11735 if (live_range_start [vreg]) {
11736 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11738 ins->inst_c1 = vreg;
11739 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11741 if (live_range_end [vreg]) {
11742 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11744 ins->inst_c1 = vreg;
11745 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11746 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11748 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11754 g_free (live_range_start);
11755 g_free (live_range_end);
11756 g_free (live_range_start_bb);
11757 g_free (live_range_end_bb);
11762 * - use 'iadd' instead of 'int_add'
11763 * - handling ovf opcodes: decompose in method_to_ir.
11764 * - unify iregs/fregs
11765 * -> partly done, the missing parts are:
11766 * - a more complete unification would involve unifying the hregs as well, so
11767 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11768 * would no longer map to the machine hregs, so the code generators would need to
11769 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11770 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11771 * fp/non-fp branches speeds it up by about 15%.
11772 * - use sext/zext opcodes instead of shifts
11774 * - get rid of TEMPLOADs if possible and use vregs instead
11775 * - clean up usage of OP_P/OP_ opcodes
11776 * - cleanup usage of DUMMY_USE
11777 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11779 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11780 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11781 * - make sure handle_stack_args () is called before the branch is emitted
11782 * - when the new IR is done, get rid of all unused stuff
11783 * - COMPARE/BEQ as separate instructions or unify them ?
11784 * - keeping them separate allows specialized compare instructions like
11785 * compare_imm, compare_membase
11786 * - most back ends unify fp compare+branch, fp compare+ceq
11787 * - integrate mono_save_args into inline_method
11788 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11789 * - handle long shift opts on 32 bit platforms somehow: they require
11790 * 3 sregs (2 for arg1 and 1 for arg2)
11791 * - make byref a 'normal' type.
11792 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11793 * variable if needed.
11794 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11795 * like inline_method.
11796 * - remove inlining restrictions
11797 * - fix LNEG and enable cfold of INEG
11798 * - generalize x86 optimizations like ldelema as a peephole optimization
11799 * - add store_mem_imm for amd64
11800 * - optimize the loading of the interruption flag in the managed->native wrappers
11801 * - avoid special handling of OP_NOP in passes
11802 * - move code inserting instructions into one function/macro.
11803 * - try a coalescing phase after liveness analysis
11804 * - add float -> vreg conversion + local optimizations on !x86
11805 * - figure out how to handle decomposed branches during optimizations, ie.
11806 * compare+branch, op_jump_table+op_br etc.
11807 * - promote RuntimeXHandles to vregs
11808 * - vtype cleanups:
11809 * - add a NEW_VARLOADA_VREG macro
11810 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11811 * accessing vtype fields.
11812 * - get rid of I8CONST on 64 bit platforms
11813 * - dealing with the increase in code size due to branches created during opcode
11815 * - use extended basic blocks
11816 * - all parts of the JIT
11817 * - handle_global_vregs () && local regalloc
11818 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11819 * - sources of increase in code size:
11822 * - isinst and castclass
11823 * - lvregs not allocated to global registers even if used multiple times
11824 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11826 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11827 * - add all micro optimizations from the old JIT
11828 * - put tree optimizations into the deadce pass
11829 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11830 * specific function.
11831 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11832 * fcompare + branchCC.
11833 * - create a helper function for allocating a stack slot, taking into account
11834 * MONO_CFG_HAS_SPILLUP.
11836 * - merge the ia64 switch changes.
11837 * - optimize mono_regstate2_alloc_int/float.
11838 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11839 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11840 * parts of the tree could be separated by other instructions, killing the tree
11841 * arguments, or stores killing loads etc. Also, should we fold loads into other
11842 * instructions if the result of the load is used multiple times ?
11843 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11844 * - LAST MERGE: 108395.
11845 * - when returning vtypes in registers, generate IR and append it to the end of the
11846 * last bb instead of doing it in the epilog.
11847 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11855 - When to decompose opcodes:
11856 - earlier: this makes some optimizations hard to implement, since the low level IR
11857 no longer contains the neccessary information. But it is easier to do.
11858 - later: harder to implement, enables more optimizations.
11859 - Branches inside bblocks:
11860 - created when decomposing complex opcodes.
11861 - branches to another bblock: harmless, but not tracked by the branch
11862 optimizations, so need to branch to a label at the start of the bblock.
11863 - branches to inside the same bblock: very problematic, trips up the local
11864 reg allocator. Can be fixed by spitting the current bblock, but that is a
11865 complex operation, since some local vregs can become global vregs etc.
11866 - Local/global vregs:
11867 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11868 local register allocator.
11869 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11870 structure, created by mono_create_var (). Assigned to hregs or the stack by
11871 the global register allocator.
11872 - When to do optimizations like alu->alu_imm:
11873 - earlier -> saves work later on since the IR will be smaller/simpler
11874 - later -> can work on more instructions
11875 - Handling of valuetypes:
11876 - When a vtype is pushed on the stack, a new temporary is created, an
11877 instruction computing its address (LDADDR) is emitted and pushed on
11878 the stack. Need to optimize cases when the vtype is used immediately as in
11879 argument passing, stloc etc.
11880 - Instead of the to_end stuff in the old JIT, simply call the function handling
11881 the values on the stack before emitting the last instruction of the bb.
11884 #endif /* DISABLE_JIT */