2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
241 return alloc_ireg (cfg);
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
251 switch (type->type) {
254 case MONO_TYPE_BOOLEAN:
266 case MONO_TYPE_FNPTR:
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
276 #if SIZEOF_REGISTER == 8
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
293 case MONO_TYPE_TYPEDBYREF:
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
300 g_assert (cfg->generic_sharing_context);
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
408 #define ADD_BINCOND(next_block) do { \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
453 if (from->cil_code) {
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
462 printf ("edge from entry to exit\n");
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
618 inst->klass = klass = mono_class_from_mono_type (type);
620 inst->type = STACK_MP;
625 switch (type->type) {
627 inst->type = STACK_INV;
631 case MONO_TYPE_BOOLEAN:
637 inst->type = STACK_I4;
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
654 inst->type = STACK_I8;
658 inst->type = STACK_R8;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
666 inst->type = STACK_VTYPE;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
840 ins->opcode = OP_ICOMPARE;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
857 ins->opcode += beqops_op_map [src1->type];
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
890 ins->type = STACK_R8;
891 switch (src1->type) {
894 ins->opcode = OP_ICONV_TO_R_UN;
897 ins->opcode = OP_LCONV_TO_R_UN;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 switch (src1->type) {
928 ins->opcode = OP_ICONV_TO_U;
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
935 ins->opcode = OP_MOVE;
939 ins->opcode = OP_LCONV_TO_U;
942 ins->opcode = OP_FCONV_TO_U;
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
967 ins->type = STACK_R8;
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1026 param_table [STACK_MAX] [STACK_MAX] = {
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1035 switch (args->type) {
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1050 if (!sig->params [i]->byref)
1054 if (sig->params [i]->byref)
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1068 if (sig->params [i]->byref)
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1156 case MONO_TYPE_BOOLEAN:
1159 case MONO_TYPE_CHAR:
1166 case MONO_TYPE_FNPTR:
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1190 g_assert_not_reached ();
1197 array_access_to_klass (int opcode)
1201 return mono_defaults.byte_class;
1203 return mono_defaults.uint16_class;
1206 return mono_defaults.int_class;
1209 return mono_defaults.sbyte_class;
1212 return mono_defaults.int16_class;
1215 return mono_defaults.int32_class;
1217 return mono_defaults.uint32_class;
1220 return mono_defaults.int64_class;
1223 return mono_defaults.single_class;
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1231 g_assert_not_reached ();
1237 * We try to share variables when possible
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1322 bb->out_stack = outb->in_stack;
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1385 /* Find a bblock which has a different in_stack */
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1700 g_assert (val == 0);
1705 if ((size <= 4) && (size <= align)) {
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1716 #if SIZEOF_REGISTER == 8
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1732 /* This could be optimized further if neccesary */
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1785 /* This could be optimized further if neccesary */
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1848 case MONO_TYPE_BOOLEAN:
1851 case MONO_TYPE_CHAR:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1898 * Returns: non-0 value if arg can't be stored on a target.
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1921 case MONO_TYPE_BOOLEAN:
1924 case MONO_TYPE_CHAR:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1937 case MONO_TYPE_FNPTR:
1938 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 case MONO_TYPE_CLASS:
1942 case MONO_TYPE_STRING:
1943 case MONO_TYPE_OBJECT:
1944 case MONO_TYPE_SZARRAY:
1945 case MONO_TYPE_ARRAY:
1946 if (arg->type != STACK_OBJ)
1948 /* FIXME: check type compatibility */
1952 if (arg->type != STACK_I8)
1957 if (arg->type != STACK_R8)
1960 case MONO_TYPE_VALUETYPE:
1961 if (arg->type != STACK_VTYPE)
1963 klass = mono_class_from_mono_type (simple_type);
1964 if (klass != arg->klass)
1967 case MONO_TYPE_TYPEDBYREF:
1968 if (arg->type != STACK_VTYPE)
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1974 case MONO_TYPE_GENERICINST:
1975 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1976 if (arg->type != STACK_VTYPE)
1978 klass = mono_class_from_mono_type (simple_type);
1979 if (klass != arg->klass)
1983 if (arg->type != STACK_OBJ)
1985 /* FIXME: check type compatibility */
1989 case MONO_TYPE_MVAR:
1990 /* FIXME: all the arguments must be references for now,
1991 * later look inside cfg and see if the arg num is
1992 * really a reference
1994 g_assert (cfg->generic_sharing_context);
1995 if (arg->type != STACK_OBJ)
1999 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2005 * Prepare arguments for passing to a function call.
2006 * Return a non-zero value if the arguments can't be passed to the given
2008 * The type checks are not yet complete and some conversions may need
2009 * casts on 32 or 64 bit architectures.
2011 * FIXME: implement this using target_type_is_incompatible ()
2014 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2016 MonoType *simple_type;
2020 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2024 for (i = 0; i < sig->param_count; ++i) {
2025 if (sig->params [i]->byref) {
2026 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2030 simple_type = sig->params [i];
2031 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2033 switch (simple_type->type) {
2034 case MONO_TYPE_VOID:
2039 case MONO_TYPE_BOOLEAN:
2042 case MONO_TYPE_CHAR:
2045 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2051 case MONO_TYPE_FNPTR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (args [i]->type != STACK_OBJ)
2065 if (args [i]->type != STACK_I8)
2070 if (args [i]->type != STACK_R8)
2073 case MONO_TYPE_VALUETYPE:
2074 if (simple_type->data.klass->enumtype) {
2075 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2078 if (args [i]->type != STACK_VTYPE)
2081 case MONO_TYPE_TYPEDBYREF:
2082 if (args [i]->type != STACK_VTYPE)
2085 case MONO_TYPE_GENERICINST:
2086 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2090 g_error ("unknown type 0x%02x in check_call_signature",
2098 callvirt_to_call (int opcode)
2103 case OP_VOIDCALLVIRT:
2112 g_assert_not_reached ();
2119 callvirt_to_call_membase (int opcode)
2123 return OP_CALL_MEMBASE;
2124 case OP_VOIDCALLVIRT:
2125 return OP_VOIDCALL_MEMBASE;
2127 return OP_FCALL_MEMBASE;
2129 return OP_LCALL_MEMBASE;
2131 return OP_VCALL_MEMBASE;
2133 g_assert_not_reached ();
2139 #ifdef MONO_ARCH_HAVE_IMT
2141 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2145 if (COMPILE_LLVM (cfg)) {
2146 method_reg = alloc_preg (cfg);
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2161 call->imt_arg_reg = method_reg;
2163 #ifdef MONO_ARCH_IMT_REG
2164 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2166 /* Need this to keep the IMT arg alive */
2167 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2172 #ifdef MONO_ARCH_IMT_REG
2173 method_reg = alloc_preg (cfg);
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2177 } else if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2181 MONO_INST_NEW (cfg, ins, OP_PCONST);
2182 ins->inst_p0 = call->method;
2183 ins->dreg = method_reg;
2184 MONO_ADD_INS (cfg->cbb, ins);
2187 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2189 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2194 static MonoJumpInfo *
2195 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2197 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2201 ji->data.target = target;
2206 inline static MonoCallInst *
2207 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2208 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2211 #ifdef MONO_ARCH_SOFT_FLOAT
2216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2218 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2221 call->signature = sig;
2222 call->rgctx_reg = rgctx;
2224 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 call->vret_var = cfg->vret_addr;
2229 //g_assert_not_reached ();
2231 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2235 temp->backend.is_pinvoke = sig->pinvoke;
2238 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2239 * address of return value to increase optimization opportunities.
2240 * Before vtype decomposition, the dreg of the call ins itself represents the
2241 * fact the call modifies the return value. After decomposition, the call will
2242 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2243 * will be transformed into an LDADDR.
2245 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2246 loada->dreg = alloc_preg (cfg);
2247 loada->inst_p0 = temp;
2248 /* We reference the call too since call->dreg could change during optimization */
2249 loada->inst_p1 = call;
2250 MONO_ADD_INS (cfg->cbb, loada);
2252 call->inst.dreg = temp->dreg;
2254 call->vret_var = loada;
2255 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2256 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2258 #ifdef MONO_ARCH_SOFT_FLOAT
2259 if (COMPILE_SOFT_FLOAT (cfg)) {
2261 * If the call has a float argument, we would need to do an r8->r4 conversion using
2262 * an icall, but that cannot be done during the call sequence since it would clobber
2263 * the call registers + the stack. So we do it before emitting the call.
2265 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2267 MonoInst *in = call->args [i];
2269 if (i >= sig->hasthis)
2270 t = sig->params [i - sig->hasthis];
2272 t = &mono_defaults.int_class->byval_arg;
2273 t = mono_type_get_underlying_type (t);
2275 if (!t->byref && t->type == MONO_TYPE_R4) {
2276 MonoInst *iargs [1];
2280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2282 /* The result will be in an int vreg */
2283 call->args [i] = conv;
2290 if (COMPILE_LLVM (cfg))
2291 mono_llvm_emit_call (cfg, call);
2293 mono_arch_emit_call (cfg, call);
2295 mono_arch_emit_call (cfg, call);
2298 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2299 cfg->flags |= MONO_CFG_HAS_CALLS;
2305 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2307 #ifdef MONO_ARCH_RGCTX_REG
2308 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2309 cfg->uses_rgctx_reg = TRUE;
2310 call->rgctx_reg = TRUE;
2312 call->rgctx_arg_reg = rgctx_reg;
2319 inline static MonoInst*
2320 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2326 rgctx_reg = mono_alloc_preg (cfg);
2327 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2330 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2332 call->inst.sreg1 = addr->dreg;
2334 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2337 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2339 return (MonoInst*)call;
2343 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2345 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2348 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2349 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2351 gboolean might_be_remote;
2352 gboolean virtual = this != NULL;
2353 gboolean enable_for_aot = TRUE;
2359 rgctx_reg = mono_alloc_preg (cfg);
2360 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2363 if (method->string_ctor) {
2364 /* Create the real signature */
2365 /* FIXME: Cache these */
2366 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2367 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2372 context_used = mono_method_check_context_used (method);
2374 might_be_remote = this && sig->hasthis &&
2375 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2376 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2378 if (might_be_remote && context_used) {
2381 g_assert (cfg->generic_sharing_context);
2383 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2385 return mono_emit_calli (cfg, sig, args, addr, NULL);
2388 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2390 if (might_be_remote)
2391 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2393 call->method = method;
2394 call->inst.flags |= MONO_INST_HAS_METHOD;
2395 call->inst.inst_left = this;
2398 int vtable_reg, slot_reg, this_reg;
2400 this_reg = this->dreg;
2402 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2403 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2404 MonoInst *dummy_use;
2406 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2408 /* Make a call to delegate->invoke_impl */
2409 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2410 call->inst.inst_basereg = this_reg;
2411 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2412 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2414 /* We must emit a dummy use here because the delegate trampoline will
2415 replace the 'this' argument with the delegate target making this activation
2416 no longer a root for the delegate.
2417 This is an issue for delegates that target collectible code such as dynamic
2418 methods of GC'able assemblies.
2420 For a test case look into #667921.
2422 FIXME: a dummy use is not the best way to do it as the local register allocator
2423 will put it on a caller save register and spil it around the call.
2424 Ideally, we would either put it on a callee save register or only do the store part.
2426 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2428 return (MonoInst*)call;
2432 if ((!cfg->compile_aot || enable_for_aot) &&
2433 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2434 (MONO_METHOD_IS_FINAL (method) &&
2435 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2436 !(method->klass->marshalbyref && context_used)) {
2438 * the method is not virtual, we just need to ensure this is not null
2439 * and then we can call the method directly.
2441 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2443 * The check above ensures method is not gshared, this is needed since
2444 * gshared methods can't have wrappers.
2446 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2449 if (!method->string_ctor)
2450 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2452 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2453 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2455 * the method is virtual, but we can statically dispatch since either
2456 * it's class or the method itself are sealed.
2457 * But first we need to ensure it's not a null reference.
2459 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2461 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2463 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2465 vtable_reg = alloc_preg (cfg);
2466 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2467 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2469 #ifdef MONO_ARCH_HAVE_IMT
2471 guint32 imt_slot = mono_method_get_imt_slot (method);
2472 emit_imt_argument (cfg, call, imt_arg);
2473 slot_reg = vtable_reg;
2474 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2477 if (slot_reg == -1) {
2478 slot_reg = alloc_preg (cfg);
2479 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2480 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2483 slot_reg = vtable_reg;
2484 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2485 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2486 #ifdef MONO_ARCH_HAVE_IMT
2488 g_assert (mono_method_signature (method)->generic_param_count);
2489 emit_imt_argument (cfg, call, imt_arg);
2494 call->inst.sreg1 = slot_reg;
2495 call->virtual = TRUE;
2499 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2502 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2504 return (MonoInst*)call;
2508 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2510 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2514 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2521 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2524 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 return (MonoInst*)call;
2530 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2532 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2536 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2540 * mono_emit_abs_call:
2542 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2544 inline static MonoInst*
2545 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2546 MonoMethodSignature *sig, MonoInst **args)
2548 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2552 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2555 if (cfg->abs_patches == NULL)
2556 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2557 g_hash_table_insert (cfg->abs_patches, ji, ji);
2558 ins = mono_emit_native_call (cfg, ji, sig, args);
2559 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2564 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2566 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2567 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2571 * Native code might return non register sized integers
2572 * without initializing the upper bits.
2574 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2575 case OP_LOADI1_MEMBASE:
2576 widen_op = OP_ICONV_TO_I1;
2578 case OP_LOADU1_MEMBASE:
2579 widen_op = OP_ICONV_TO_U1;
2581 case OP_LOADI2_MEMBASE:
2582 widen_op = OP_ICONV_TO_I2;
2584 case OP_LOADU2_MEMBASE:
2585 widen_op = OP_ICONV_TO_U2;
2591 if (widen_op != -1) {
2592 int dreg = alloc_preg (cfg);
2595 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2596 widen->type = ins->type;
2606 get_memcpy_method (void)
2608 static MonoMethod *memcpy_method = NULL;
2609 if (!memcpy_method) {
2610 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2612 g_error ("Old corlib found. Install a new one");
2614 return memcpy_method;
2618 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2620 MonoClassField *field;
2621 gpointer iter = NULL;
2623 while ((field = mono_class_get_fields (klass, &iter))) {
2626 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2628 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2629 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2630 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2631 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2633 MonoClass *field_class = mono_class_from_mono_type (field->type);
2634 if (field_class->has_references)
2635 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2641 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2643 int card_table_shift_bits;
2644 gpointer card_table_mask;
2646 MonoInst *dummy_use;
2647 int nursery_shift_bits;
2648 size_t nursery_size;
2649 gboolean has_card_table_wb = FALSE;
2651 if (!cfg->gen_write_barriers)
2654 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2656 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2658 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2659 has_card_table_wb = TRUE;
2662 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2665 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2666 wbarrier->sreg1 = ptr->dreg;
2668 wbarrier->sreg2 = value->dreg;
2670 wbarrier->sreg2 = value_reg;
2671 MONO_ADD_INS (cfg->cbb, wbarrier);
2672 } else if (card_table) {
2673 int offset_reg = alloc_preg (cfg);
2674 int card_reg = alloc_preg (cfg);
2677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2678 if (card_table_mask)
2679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2681 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2682 * IMM's larger than 32bits.
2684 if (cfg->compile_aot) {
2685 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2687 MONO_INST_NEW (cfg, ins, OP_PCONST);
2688 ins->inst_p0 = card_table;
2689 ins->dreg = card_reg;
2690 MONO_ADD_INS (cfg->cbb, ins);
2693 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2694 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2696 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2697 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2701 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2703 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2704 dummy_use->sreg1 = value_reg;
2705 MONO_ADD_INS (cfg->cbb, dummy_use);
2710 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2712 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2713 unsigned need_wb = 0;
2718 /*types with references can't have alignment smaller than sizeof(void*) */
2719 if (align < SIZEOF_VOID_P)
2722 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2723 if (size > 32 * SIZEOF_VOID_P)
2726 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2728 /* We don't unroll more than 5 stores to avoid code bloat. */
2729 if (size > 5 * SIZEOF_VOID_P) {
2730 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2731 size += (SIZEOF_VOID_P - 1);
2732 size &= ~(SIZEOF_VOID_P - 1);
2734 EMIT_NEW_ICONST (cfg, iargs [2], size);
2735 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2736 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2740 destreg = iargs [0]->dreg;
2741 srcreg = iargs [1]->dreg;
2744 dest_ptr_reg = alloc_preg (cfg);
2745 tmp_reg = alloc_preg (cfg);
2748 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2750 while (size >= SIZEOF_VOID_P) {
2751 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2755 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2757 offset += SIZEOF_VOID_P;
2758 size -= SIZEOF_VOID_P;
2761 /*tmp += sizeof (void*)*/
2762 if (size >= SIZEOF_VOID_P) {
2763 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2764 MONO_ADD_INS (cfg->cbb, iargs [0]);
2768 /* Those cannot be references since size < sizeof (void*) */
2770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2794 * Emit code to copy a valuetype of type @klass whose address is stored in
2795 * @src->dreg to memory whose address is stored at @dest->dreg.
2798 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2800 MonoInst *iargs [4];
2803 MonoMethod *memcpy_method;
2807 * This check breaks with spilled vars... need to handle it during verification anyway.
2808 * g_assert (klass && klass == src->klass && klass == dest->klass);
2812 n = mono_class_native_size (klass, &align);
2814 n = mono_class_value_size (klass, &align);
2816 /* if native is true there should be no references in the struct */
2817 if (cfg->gen_write_barriers && klass->has_references && !native) {
2818 /* Avoid barriers when storing to the stack */
2819 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2820 (dest->opcode == OP_LDADDR))) {
2821 int context_used = 0;
2826 if (cfg->generic_sharing_context)
2827 context_used = mono_class_check_context_used (klass);
2829 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2830 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2832 } else if (context_used) {
2833 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2835 if (cfg->compile_aot) {
2836 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2838 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2839 mono_class_compute_gc_descriptor (klass);
2843 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2848 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2849 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2850 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2854 EMIT_NEW_ICONST (cfg, iargs [2], n);
2856 memcpy_method = get_memcpy_method ();
2857 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2862 get_memset_method (void)
2864 static MonoMethod *memset_method = NULL;
2865 if (!memset_method) {
2866 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2868 g_error ("Old corlib found. Install a new one");
2870 return memset_method;
2874 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2876 MonoInst *iargs [3];
2879 MonoMethod *memset_method;
2881 /* FIXME: Optimize this for the case when dest is an LDADDR */
2883 mono_class_init (klass);
2884 n = mono_class_value_size (klass, &align);
2886 if (n <= sizeof (gpointer) * 5) {
2887 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2890 memset_method = get_memset_method ();
2892 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2893 EMIT_NEW_ICONST (cfg, iargs [2], n);
2894 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2899 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2901 MonoInst *this = NULL;
2903 g_assert (cfg->generic_sharing_context);
2905 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2906 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2907 !method->klass->valuetype)
2908 EMIT_NEW_ARGLOAD (cfg, this, 0);
2910 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2911 MonoInst *mrgctx_loc, *mrgctx_var;
2914 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2916 mrgctx_loc = mono_get_vtable_var (cfg);
2917 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2920 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2921 MonoInst *vtable_loc, *vtable_var;
2925 vtable_loc = mono_get_vtable_var (cfg);
2926 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2928 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2929 MonoInst *mrgctx_var = vtable_var;
2932 vtable_reg = alloc_preg (cfg);
2933 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2934 vtable_var->type = STACK_PTR;
2942 vtable_reg = alloc_preg (cfg);
2943 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2948 static MonoJumpInfoRgctxEntry *
2949 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2951 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2952 res->method = method;
2953 res->in_mrgctx = in_mrgctx;
2954 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2955 res->data->type = patch_type;
2956 res->data->data.target = patch_data;
2957 res->info_type = info_type;
2962 static inline MonoInst*
2963 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2965 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2969 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2970 MonoClass *klass, int rgctx_type)
2972 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2973 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2975 return emit_rgctx_fetch (cfg, rgctx, entry);
2979 * emit_get_rgctx_method:
2981 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2982 * normal constants, else emit a load from the rgctx.
2985 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2986 MonoMethod *cmethod, int rgctx_type)
2988 if (!context_used) {
2991 switch (rgctx_type) {
2992 case MONO_RGCTX_INFO_METHOD:
2993 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2995 case MONO_RGCTX_INFO_METHOD_RGCTX:
2996 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2999 g_assert_not_reached ();
3002 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3003 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3005 return emit_rgctx_fetch (cfg, rgctx, entry);
3010 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3011 MonoClassField *field, int rgctx_type)
3013 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3014 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3016 return emit_rgctx_fetch (cfg, rgctx, entry);
3020 * On return the caller must check @klass for load errors.
3023 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3025 MonoInst *vtable_arg;
3027 int context_used = 0;
3029 if (cfg->generic_sharing_context)
3030 context_used = mono_class_check_context_used (klass);
3033 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3034 klass, MONO_RGCTX_INFO_VTABLE);
3036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3040 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3043 if (COMPILE_LLVM (cfg))
3044 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3046 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3047 #ifdef MONO_ARCH_VTABLE_REG
3048 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3049 cfg->uses_vtable_reg = TRUE;
3056 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3058 if (mini_get_debug_options ()->better_cast_details) {
3059 int to_klass_reg = alloc_preg (cfg);
3060 int vtable_reg = alloc_preg (cfg);
3061 int klass_reg = alloc_preg (cfg);
3062 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3065 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3069 MONO_ADD_INS (cfg->cbb, tls_get);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3074 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3080 reset_cast_details (MonoCompile *cfg)
3082 /* Reset the variables holding the cast details */
3083 if (mini_get_debug_options ()->better_cast_details) {
3084 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3086 MONO_ADD_INS (cfg->cbb, tls_get);
3087 /* It is enough to reset the from field */
3088 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3093 * On return the caller must check @array_class for load errors
3096 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3098 int vtable_reg = alloc_preg (cfg);
3099 int context_used = 0;
3101 if (cfg->generic_sharing_context)
3102 context_used = mono_class_check_context_used (array_class);
3104 save_cast_details (cfg, array_class, obj->dreg);
3106 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3108 if (cfg->opt & MONO_OPT_SHARED) {
3109 int class_reg = alloc_preg (cfg);
3110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3111 if (cfg->compile_aot) {
3112 int klass_reg = alloc_preg (cfg);
3113 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3114 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3118 } else if (context_used) {
3119 MonoInst *vtable_ins;
3121 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3122 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3124 if (cfg->compile_aot) {
3128 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3130 vt_reg = alloc_preg (cfg);
3131 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3132 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3135 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3141 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3143 reset_cast_details (cfg);
3147 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3148 * generic code is generated.
3151 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3153 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3156 MonoInst *rgctx, *addr;
3158 /* FIXME: What if the class is shared? We might not
3159 have to get the address of the method from the
3161 addr = emit_get_rgctx_method (cfg, context_used, method,
3162 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3164 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3166 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3168 return mono_emit_method_call (cfg, method, &val, NULL);
3173 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3177 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3178 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3179 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3180 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3182 obj_reg = sp [0]->dreg;
3183 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3184 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3186 /* FIXME: generics */
3187 g_assert (klass->rank == 0);
3190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3191 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3193 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3197 MonoInst *element_class;
3199 /* This assertion is from the unboxcast insn */
3200 g_assert (klass->rank == 0);
3202 element_class = emit_get_rgctx_klass (cfg, context_used,
3203 klass->element_class, MONO_RGCTX_INFO_KLASS);
3205 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3206 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3208 save_cast_details (cfg, klass->element_class, obj_reg);
3209 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3210 reset_cast_details (cfg);
3213 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3214 MONO_ADD_INS (cfg->cbb, add);
3215 add->type = STACK_MP;
3222 * Returns NULL and set the cfg exception on error.
3225 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3227 MonoInst *iargs [2];
3233 MonoInst *iargs [2];
3236 FIXME: we cannot get managed_alloc here because we can't get
3237 the class's vtable (because it's not a closed class)
3239 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3240 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3243 if (cfg->opt & MONO_OPT_SHARED)
3244 rgctx_info = MONO_RGCTX_INFO_KLASS;
3246 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3247 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3249 if (cfg->opt & MONO_OPT_SHARED) {
3250 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3252 alloc_ftn = mono_object_new;
3255 alloc_ftn = mono_object_new_specific;
3258 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3261 if (cfg->opt & MONO_OPT_SHARED) {
3262 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3263 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3265 alloc_ftn = mono_object_new;
3266 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3267 /* This happens often in argument checking code, eg. throw new FooException... */
3268 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3269 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3270 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3272 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3273 MonoMethod *managed_alloc = NULL;
3277 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3278 cfg->exception_ptr = klass;
3282 #ifndef MONO_CROSS_COMPILE
3283 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3286 if (managed_alloc) {
3287 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3288 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3290 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3292 guint32 lw = vtable->klass->instance_size;
3293 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3294 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3295 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3298 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3302 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3306 * Returns NULL and set the cfg exception on error.
3309 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3311 MonoInst *alloc, *ins;
3313 if (mono_class_is_nullable (klass)) {
3314 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3317 /* FIXME: What if the class is shared? We might not
3318 have to get the method address from the RGCTX. */
3319 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3320 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3321 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3323 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3325 return mono_emit_method_call (cfg, method, &val, NULL);
3329 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3333 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3340 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3343 MonoGenericContainer *container;
3344 MonoGenericInst *ginst;
3346 if (klass->generic_class) {
3347 container = klass->generic_class->container_class->generic_container;
3348 ginst = klass->generic_class->context.class_inst;
3349 } else if (klass->generic_container && context_used) {
3350 container = klass->generic_container;
3351 ginst = container->context.class_inst;
3356 for (i = 0; i < container->type_argc; ++i) {
3358 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3360 type = ginst->type_argv [i];
3361 if (mini_type_is_reference (cfg, type))
3367 // FIXME: This doesn't work yet (class libs tests fail?)
3368 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3371 * Returns NULL and set the cfg exception on error.
3374 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3376 MonoBasicBlock *is_null_bb;
3377 int obj_reg = src->dreg;
3378 int vtable_reg = alloc_preg (cfg);
3379 MonoInst *klass_inst = NULL;
3384 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3385 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3386 MonoInst *cache_ins;
3388 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3393 /* klass - it's the second element of the cache entry*/
3394 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3397 args [2] = cache_ins;
3399 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3402 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3404 if (is_complex_isinst (klass)) {
3405 /* Complex case, handle by an icall */
3411 args [1] = klass_inst;
3413 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3415 /* Simple case, handled by the code below */
3419 NEW_BBLOCK (cfg, is_null_bb);
3421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3424 save_cast_details (cfg, klass, obj_reg);
3426 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3428 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3430 int klass_reg = alloc_preg (cfg);
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3434 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3435 /* the remoting code is broken, access the class for now */
3436 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3437 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3439 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3440 cfg->exception_ptr = klass;
3443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3448 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3451 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3455 MONO_START_BB (cfg, is_null_bb);
3457 reset_cast_details (cfg);
3463 * Returns NULL and set the cfg exception on error.
3466 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3469 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3470 int obj_reg = src->dreg;
3471 int vtable_reg = alloc_preg (cfg);
3472 int res_reg = alloc_ireg_ref (cfg);
3473 MonoInst *klass_inst = NULL;
3478 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3479 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3480 MonoInst *cache_ins;
3482 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3487 /* klass - it's the second element of the cache entry*/
3488 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3491 args [2] = cache_ins;
3493 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3496 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3498 if (is_complex_isinst (klass)) {
3499 /* Complex case, handle by an icall */
3505 args [1] = klass_inst;
3507 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3509 /* Simple case, the code below can handle it */
3513 NEW_BBLOCK (cfg, is_null_bb);
3514 NEW_BBLOCK (cfg, false_bb);
3515 NEW_BBLOCK (cfg, end_bb);
3517 /* Do the assignment at the beginning, so the other assignment can be if converted */
3518 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3519 ins->type = STACK_OBJ;
3522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3523 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3527 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3528 g_assert (!context_used);
3529 /* the is_null_bb target simply copies the input register to the output */
3530 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3532 int klass_reg = alloc_preg (cfg);
3535 int rank_reg = alloc_preg (cfg);
3536 int eclass_reg = alloc_preg (cfg);
3538 g_assert (!context_used);
3539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3544 if (klass->cast_class == mono_defaults.object_class) {
3545 int parent_reg = alloc_preg (cfg);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3547 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3548 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3550 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3551 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3552 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3554 } else if (klass->cast_class == mono_defaults.enum_class) {
3555 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3557 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3558 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3560 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3561 /* Check that the object is a vector too */
3562 int bounds_reg = alloc_preg (cfg);
3563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3568 /* the is_null_bb target simply copies the input register to the output */
3569 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3571 } else if (mono_class_is_nullable (klass)) {
3572 g_assert (!context_used);
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3574 /* the is_null_bb target simply copies the input register to the output */
3575 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3577 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3578 g_assert (!context_used);
3579 /* the remoting code is broken, access the class for now */
3580 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3581 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3583 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3584 cfg->exception_ptr = klass;
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3596 /* the is_null_bb target simply copies the input register to the output */
3597 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3602 MONO_START_BB (cfg, false_bb);
3604 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_null_bb);
3609 MONO_START_BB (cfg, end_bb);
3615 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3617 /* This opcode takes as input an object reference and a class, and returns:
3618 0) if the object is an instance of the class,
3619 1) if the object is not instance of the class,
3620 2) if the object is a proxy whose type cannot be determined */
3623 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3624 int obj_reg = src->dreg;
3625 int dreg = alloc_ireg (cfg);
3627 int klass_reg = alloc_preg (cfg);
3629 NEW_BBLOCK (cfg, true_bb);
3630 NEW_BBLOCK (cfg, false_bb);
3631 NEW_BBLOCK (cfg, false2_bb);
3632 NEW_BBLOCK (cfg, end_bb);
3633 NEW_BBLOCK (cfg, no_proxy_bb);
3635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3638 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3639 NEW_BBLOCK (cfg, interface_fail_bb);
3641 tmp_reg = alloc_preg (cfg);
3642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3643 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3644 MONO_START_BB (cfg, interface_fail_bb);
3645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3647 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3649 tmp_reg = alloc_preg (cfg);
3650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3652 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3654 tmp_reg = alloc_preg (cfg);
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3658 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3659 tmp_reg = alloc_preg (cfg);
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3663 tmp_reg = alloc_preg (cfg);
3664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3668 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3669 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3671 MONO_START_BB (cfg, no_proxy_bb);
3673 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3676 MONO_START_BB (cfg, false_bb);
3678 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3681 MONO_START_BB (cfg, false2_bb);
3683 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3686 MONO_START_BB (cfg, true_bb);
3688 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3690 MONO_START_BB (cfg, end_bb);
3693 MONO_INST_NEW (cfg, ins, OP_ICONST);
3695 ins->type = STACK_I4;
3701 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3703 /* This opcode takes as input an object reference and a class, and returns:
3704 0) if the object is an instance of the class,
3705 1) if the object is a proxy whose type cannot be determined
3706 an InvalidCastException exception is thrown otherwhise*/
3709 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3710 int obj_reg = src->dreg;
3711 int dreg = alloc_ireg (cfg);
3712 int tmp_reg = alloc_preg (cfg);
3713 int klass_reg = alloc_preg (cfg);
3715 NEW_BBLOCK (cfg, end_bb);
3716 NEW_BBLOCK (cfg, ok_result_bb);
3718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3721 save_cast_details (cfg, klass, obj_reg);
3723 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3724 NEW_BBLOCK (cfg, interface_fail_bb);
3726 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3727 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3728 MONO_START_BB (cfg, interface_fail_bb);
3729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3731 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3733 tmp_reg = alloc_preg (cfg);
3734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3736 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3738 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3742 NEW_BBLOCK (cfg, no_proxy_bb);
3744 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3746 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3748 tmp_reg = alloc_preg (cfg);
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3752 tmp_reg = alloc_preg (cfg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3757 NEW_BBLOCK (cfg, fail_1_bb);
3759 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3761 MONO_START_BB (cfg, fail_1_bb);
3763 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3766 MONO_START_BB (cfg, no_proxy_bb);
3768 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3771 MONO_START_BB (cfg, ok_result_bb);
3773 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3775 MONO_START_BB (cfg, end_bb);
3778 MONO_INST_NEW (cfg, ins, OP_ICONST);
3780 ins->type = STACK_I4;
3786 * Returns NULL and set the cfg exception on error.
3788 static G_GNUC_UNUSED MonoInst*
3789 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3793 gpointer *trampoline;
3794 MonoInst *obj, *method_ins, *tramp_ins;
3798 obj = handle_alloc (cfg, klass, FALSE, 0);
3802 /* Inline the contents of mono_delegate_ctor */
3804 /* Set target field */
3805 /* Optimize away setting of NULL target */
3806 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3808 if (cfg->gen_write_barriers) {
3809 dreg = alloc_preg (cfg);
3810 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3811 emit_write_barrier (cfg, ptr, target, 0);
3815 /* Set method field */
3816 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3818 if (cfg->gen_write_barriers) {
3819 dreg = alloc_preg (cfg);
3820 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3821 emit_write_barrier (cfg, ptr, method_ins, 0);
3824 * To avoid looking up the compiled code belonging to the target method
3825 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3826 * store it, and we fill it after the method has been compiled.
3828 if (!cfg->compile_aot && !method->dynamic) {
3829 MonoInst *code_slot_ins;
3832 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3834 domain = mono_domain_get ();
3835 mono_domain_lock (domain);
3836 if (!domain_jit_info (domain)->method_code_hash)
3837 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3838 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3840 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3841 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3843 mono_domain_unlock (domain);
3845 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3850 /* Set invoke_impl field */
3851 if (cfg->compile_aot) {
3852 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3854 trampoline = mono_create_delegate_trampoline (klass);
3855 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3859 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3865 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3867 MonoJitICallInfo *info;
3869 /* Need to register the icall so it gets an icall wrapper */
3870 info = mono_get_array_new_va_icall (rank);
3872 cfg->flags |= MONO_CFG_HAS_VARARGS;
3874 /* mono_array_new_va () needs a vararg calling convention */
3875 cfg->disable_llvm = TRUE;
3877 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3878 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3882 mono_emit_load_got_addr (MonoCompile *cfg)
3884 MonoInst *getaddr, *dummy_use;
3886 if (!cfg->got_var || cfg->got_var_allocated)
3889 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3890 getaddr->dreg = cfg->got_var->dreg;
3892 /* Add it to the start of the first bblock */
3893 if (cfg->bb_entry->code) {
3894 getaddr->next = cfg->bb_entry->code;
3895 cfg->bb_entry->code = getaddr;
3898 MONO_ADD_INS (cfg->bb_entry, getaddr);
3900 cfg->got_var_allocated = TRUE;
3903 * Add a dummy use to keep the got_var alive, since real uses might
3904 * only be generated by the back ends.
3905 * Add it to end_bblock, so the variable's lifetime covers the whole
3907 * It would be better to make the usage of the got var explicit in all
3908 * cases when the backend needs it (i.e. calls, throw etc.), so this
3909 * wouldn't be needed.
3911 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3912 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3915 static int inline_limit;
3916 static gboolean inline_limit_inited;
3919 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3921 MonoMethodHeaderSummary header;
3923 #ifdef MONO_ARCH_SOFT_FLOAT
3924 MonoMethodSignature *sig = mono_method_signature (method);
3928 if (cfg->generic_sharing_context)
3931 if (cfg->inline_depth > 10)
3934 #ifdef MONO_ARCH_HAVE_LMF_OPS
3935 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3936 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3937 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3942 if (!mono_method_get_header_summary (method, &header))
3945 /*runtime, icall and pinvoke are checked by summary call*/
3946 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3947 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3948 (method->klass->marshalbyref) ||
3952 /* also consider num_locals? */
3953 /* Do the size check early to avoid creating vtables */
3954 if (!inline_limit_inited) {
3955 if (getenv ("MONO_INLINELIMIT"))
3956 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3958 inline_limit = INLINE_LENGTH_LIMIT;
3959 inline_limit_inited = TRUE;
3961 if (header.code_size >= inline_limit)
3965 * if we can initialize the class of the method right away, we do,
3966 * otherwise we don't allow inlining if the class needs initialization,
3967 * since it would mean inserting a call to mono_runtime_class_init()
3968 * inside the inlined code
3970 if (!(cfg->opt & MONO_OPT_SHARED)) {
3971 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3972 if (cfg->run_cctors && method->klass->has_cctor) {
3973 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3974 if (!method->klass->runtime_info)
3975 /* No vtable created yet */
3977 vtable = mono_class_vtable (cfg->domain, method->klass);
3980 /* This makes so that inline cannot trigger */
3981 /* .cctors: too many apps depend on them */
3982 /* running with a specific order... */
3983 if (! vtable->initialized)
3985 mono_runtime_class_init (vtable);
3987 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3988 if (!method->klass->runtime_info)
3989 /* No vtable created yet */
3991 vtable = mono_class_vtable (cfg->domain, method->klass);
3994 if (!vtable->initialized)
3999 * If we're compiling for shared code
4000 * the cctor will need to be run at aot method load time, for example,
4001 * or at the end of the compilation of the inlining method.
4003 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4008 * CAS - do not inline methods with declarative security
4009 * Note: this has to be before any possible return TRUE;
4011 if (mono_method_has_declsec (method))
4014 #ifdef MONO_ARCH_SOFT_FLOAT
4016 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4018 for (i = 0; i < sig->param_count; ++i)
4019 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4027 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4029 if (vtable->initialized && !cfg->compile_aot)
4032 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4035 if (!mono_class_needs_cctor_run (vtable->klass, method))
4038 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4039 /* The initialization is already done before the method is called */
4046 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4050 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4052 mono_class_init (klass);
4053 size = mono_class_array_element_size (klass);
4055 mult_reg = alloc_preg (cfg);
4056 array_reg = arr->dreg;
4057 index_reg = index->dreg;
4059 #if SIZEOF_REGISTER == 8
4060 /* The array reg is 64 bits but the index reg is only 32 */
4061 if (COMPILE_LLVM (cfg)) {
4063 index2_reg = index_reg;
4065 index2_reg = alloc_preg (cfg);
4066 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4069 if (index->type == STACK_I8) {
4070 index2_reg = alloc_preg (cfg);
4071 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4073 index2_reg = index_reg;
4078 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4080 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4081 if (size == 1 || size == 2 || size == 4 || size == 8) {
4082 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4084 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4085 ins->klass = mono_class_get_element_class (klass);
4086 ins->type = STACK_MP;
4092 add_reg = alloc_ireg_mp (cfg);
4094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4095 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4096 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4097 ins->klass = mono_class_get_element_class (klass);
4098 ins->type = STACK_MP;
4099 MONO_ADD_INS (cfg->cbb, ins);
4104 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4106 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4108 int bounds_reg = alloc_preg (cfg);
4109 int add_reg = alloc_ireg_mp (cfg);
4110 int mult_reg = alloc_preg (cfg);
4111 int mult2_reg = alloc_preg (cfg);
4112 int low1_reg = alloc_preg (cfg);
4113 int low2_reg = alloc_preg (cfg);
4114 int high1_reg = alloc_preg (cfg);
4115 int high2_reg = alloc_preg (cfg);
4116 int realidx1_reg = alloc_preg (cfg);
4117 int realidx2_reg = alloc_preg (cfg);
4118 int sum_reg = alloc_preg (cfg);
4123 mono_class_init (klass);
4124 size = mono_class_array_element_size (klass);
4126 index1 = index_ins1->dreg;
4127 index2 = index_ins2->dreg;
4129 /* range checking */
4130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4131 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4133 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4134 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4135 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4136 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4137 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4138 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4139 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4141 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4142 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4143 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4144 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4145 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4146 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4147 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4149 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4150 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4152 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4153 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4155 ins->type = STACK_MP;
4157 MONO_ADD_INS (cfg->cbb, ins);
4164 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4168 MonoMethod *addr_method;
4171 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4174 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4176 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4177 /* emit_ldelema_2 depends on OP_LMUL */
4178 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4179 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4183 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4184 addr_method = mono_marshal_get_array_address (rank, element_size);
4185 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4190 static MonoBreakPolicy
4191 always_insert_breakpoint (MonoMethod *method)
4193 return MONO_BREAK_POLICY_ALWAYS;
4196 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4199 * mono_set_break_policy:
4200 * policy_callback: the new callback function
4202 * Allow embedders to decide wherther to actually obey breakpoint instructions
4203 * (both break IL instructions and Debugger.Break () method calls), for example
4204 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4205 * untrusted or semi-trusted code.
4207 * @policy_callback will be called every time a break point instruction needs to
4208 * be inserted with the method argument being the method that calls Debugger.Break()
4209 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4210 * if it wants the breakpoint to not be effective in the given method.
4211 * #MONO_BREAK_POLICY_ALWAYS is the default.
4214 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4216 if (policy_callback)
4217 break_policy_func = policy_callback;
4219 break_policy_func = always_insert_breakpoint;
4223 should_insert_brekpoint (MonoMethod *method) {
4224 switch (break_policy_func (method)) {
4225 case MONO_BREAK_POLICY_ALWAYS:
4227 case MONO_BREAK_POLICY_NEVER:
4229 case MONO_BREAK_POLICY_ON_DBG:
4230 return mono_debug_using_mono_debugger ();
4232 g_warning ("Incorrect value returned from break policy callback");
4237 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4239 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4241 MonoInst *addr, *store, *load;
4242 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4244 /* the bounds check is already done by the callers */
4245 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4247 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4248 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4250 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4251 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4257 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4259 MonoInst *ins = NULL;
4260 #ifdef MONO_ARCH_SIMD_INTRINSICS
4261 if (cfg->opt & MONO_OPT_SIMD) {
4262 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4272 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4274 MonoInst *ins = NULL;
4276 static MonoClass *runtime_helpers_class = NULL;
4277 if (! runtime_helpers_class)
4278 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4279 "System.Runtime.CompilerServices", "RuntimeHelpers");
4281 if (cmethod->klass == mono_defaults.string_class) {
4282 if (strcmp (cmethod->name, "get_Chars") == 0) {
4283 int dreg = alloc_ireg (cfg);
4284 int index_reg = alloc_preg (cfg);
4285 int mult_reg = alloc_preg (cfg);
4286 int add_reg = alloc_preg (cfg);
4288 #if SIZEOF_REGISTER == 8
4289 /* The array reg is 64 bits but the index reg is only 32 */
4290 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4292 index_reg = args [1]->dreg;
4294 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4296 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4297 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4298 add_reg = ins->dreg;
4299 /* Avoid a warning */
4301 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4304 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4305 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4306 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4307 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4309 type_from_op (ins, NULL, NULL);
4311 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4312 int dreg = alloc_ireg (cfg);
4313 /* Decompose later to allow more optimizations */
4314 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4315 ins->type = STACK_I4;
4316 ins->flags |= MONO_INST_FAULT;
4317 cfg->cbb->has_array_access = TRUE;
4318 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4321 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4322 int mult_reg = alloc_preg (cfg);
4323 int add_reg = alloc_preg (cfg);
4325 /* The corlib functions check for oob already. */
4326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4327 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4329 return cfg->cbb->last_ins;
4332 } else if (cmethod->klass == mono_defaults.object_class) {
4334 if (strcmp (cmethod->name, "GetType") == 0) {
4335 int dreg = alloc_ireg_ref (cfg);
4336 int vt_reg = alloc_preg (cfg);
4337 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4338 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4339 type_from_op (ins, NULL, NULL);
4342 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4343 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4344 int dreg = alloc_ireg (cfg);
4345 int t1 = alloc_ireg (cfg);
4347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4348 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4349 ins->type = STACK_I4;
4353 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4354 MONO_INST_NEW (cfg, ins, OP_NOP);
4355 MONO_ADD_INS (cfg->cbb, ins);
4359 } else if (cmethod->klass == mono_defaults.array_class) {
4360 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4361 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4363 #ifndef MONO_BIG_ARRAYS
4365 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4368 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4369 int dreg = alloc_ireg (cfg);
4370 int bounds_reg = alloc_ireg_mp (cfg);
4371 MonoBasicBlock *end_bb, *szarray_bb;
4372 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4374 NEW_BBLOCK (cfg, end_bb);
4375 NEW_BBLOCK (cfg, szarray_bb);
4377 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4378 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4380 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4381 /* Non-szarray case */
4383 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4384 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4386 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4387 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4389 MONO_START_BB (cfg, szarray_bb);
4392 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4393 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4395 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4396 MONO_START_BB (cfg, end_bb);
4398 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4399 ins->type = STACK_I4;
4405 if (cmethod->name [0] != 'g')
4408 if (strcmp (cmethod->name, "get_Rank") == 0) {
4409 int dreg = alloc_ireg (cfg);
4410 int vtable_reg = alloc_preg (cfg);
4411 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4412 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4413 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4414 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4415 type_from_op (ins, NULL, NULL);
4418 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4419 int dreg = alloc_ireg (cfg);
4421 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4422 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4423 type_from_op (ins, NULL, NULL);
4428 } else if (cmethod->klass == runtime_helpers_class) {
4430 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4431 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4435 } else if (cmethod->klass == mono_defaults.thread_class) {
4436 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4437 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4438 MONO_ADD_INS (cfg->cbb, ins);
4440 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4441 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4442 MONO_ADD_INS (cfg->cbb, ins);
4445 } else if (cmethod->klass == mono_defaults.monitor_class) {
4446 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4447 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4450 if (COMPILE_LLVM (cfg)) {
4452 * Pass the argument normally, the LLVM backend will handle the
4453 * calling convention problems.
4455 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4457 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4458 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4459 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4460 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4463 return (MonoInst*)call;
4464 } else if (strcmp (cmethod->name, "Exit") == 0) {
4467 if (COMPILE_LLVM (cfg)) {
4468 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4470 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4471 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4472 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4473 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4476 return (MonoInst*)call;
4478 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4479 MonoMethod *fast_method = NULL;
4481 /* Avoid infinite recursion */
4482 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4483 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4484 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4487 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4488 strcmp (cmethod->name, "Exit") == 0)
4489 fast_method = mono_monitor_get_fast_path (cmethod);
4493 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4495 } else if (cmethod->klass->image == mono_defaults.corlib &&
4496 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4497 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4500 #if SIZEOF_REGISTER == 8
4501 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4502 /* 64 bit reads are already atomic */
4503 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4504 ins->dreg = mono_alloc_preg (cfg);
4505 ins->inst_basereg = args [0]->dreg;
4506 ins->inst_offset = 0;
4507 MONO_ADD_INS (cfg->cbb, ins);
4511 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4512 if (strcmp (cmethod->name, "Increment") == 0) {
4513 MonoInst *ins_iconst;
4516 if (fsig->params [0]->type == MONO_TYPE_I4)
4517 opcode = OP_ATOMIC_ADD_NEW_I4;
4518 #if SIZEOF_REGISTER == 8
4519 else if (fsig->params [0]->type == MONO_TYPE_I8)
4520 opcode = OP_ATOMIC_ADD_NEW_I8;
4523 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4524 ins_iconst->inst_c0 = 1;
4525 ins_iconst->dreg = mono_alloc_ireg (cfg);
4526 MONO_ADD_INS (cfg->cbb, ins_iconst);
4528 MONO_INST_NEW (cfg, ins, opcode);
4529 ins->dreg = mono_alloc_ireg (cfg);
4530 ins->inst_basereg = args [0]->dreg;
4531 ins->inst_offset = 0;
4532 ins->sreg2 = ins_iconst->dreg;
4533 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4534 MONO_ADD_INS (cfg->cbb, ins);
4536 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4537 MonoInst *ins_iconst;
4540 if (fsig->params [0]->type == MONO_TYPE_I4)
4541 opcode = OP_ATOMIC_ADD_NEW_I4;
4542 #if SIZEOF_REGISTER == 8
4543 else if (fsig->params [0]->type == MONO_TYPE_I8)
4544 opcode = OP_ATOMIC_ADD_NEW_I8;
4547 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4548 ins_iconst->inst_c0 = -1;
4549 ins_iconst->dreg = mono_alloc_ireg (cfg);
4550 MONO_ADD_INS (cfg->cbb, ins_iconst);
4552 MONO_INST_NEW (cfg, ins, opcode);
4553 ins->dreg = mono_alloc_ireg (cfg);
4554 ins->inst_basereg = args [0]->dreg;
4555 ins->inst_offset = 0;
4556 ins->sreg2 = ins_iconst->dreg;
4557 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4558 MONO_ADD_INS (cfg->cbb, ins);
4560 } else if (strcmp (cmethod->name, "Add") == 0) {
4563 if (fsig->params [0]->type == MONO_TYPE_I4)
4564 opcode = OP_ATOMIC_ADD_NEW_I4;
4565 #if SIZEOF_REGISTER == 8
4566 else if (fsig->params [0]->type == MONO_TYPE_I8)
4567 opcode = OP_ATOMIC_ADD_NEW_I8;
4571 MONO_INST_NEW (cfg, ins, opcode);
4572 ins->dreg = mono_alloc_ireg (cfg);
4573 ins->inst_basereg = args [0]->dreg;
4574 ins->inst_offset = 0;
4575 ins->sreg2 = args [1]->dreg;
4576 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4577 MONO_ADD_INS (cfg->cbb, ins);
4580 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4582 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4583 if (strcmp (cmethod->name, "Exchange") == 0) {
4585 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4587 if (fsig->params [0]->type == MONO_TYPE_I4)
4588 opcode = OP_ATOMIC_EXCHANGE_I4;
4589 #if SIZEOF_REGISTER == 8
4590 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4591 (fsig->params [0]->type == MONO_TYPE_I))
4592 opcode = OP_ATOMIC_EXCHANGE_I8;
4594 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4595 opcode = OP_ATOMIC_EXCHANGE_I4;
4600 MONO_INST_NEW (cfg, ins, opcode);
4601 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4602 ins->inst_basereg = args [0]->dreg;
4603 ins->inst_offset = 0;
4604 ins->sreg2 = args [1]->dreg;
4605 MONO_ADD_INS (cfg->cbb, ins);
4607 switch (fsig->params [0]->type) {
4609 ins->type = STACK_I4;
4613 ins->type = STACK_I8;
4615 case MONO_TYPE_OBJECT:
4616 ins->type = STACK_OBJ;
4619 g_assert_not_reached ();
4622 if (cfg->gen_write_barriers && is_ref)
4623 emit_write_barrier (cfg, args [0], args [1], -1);
4625 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4627 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4628 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4630 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4631 if (fsig->params [1]->type == MONO_TYPE_I4)
4633 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4634 size = sizeof (gpointer);
4635 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4638 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4639 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4640 ins->sreg1 = args [0]->dreg;
4641 ins->sreg2 = args [1]->dreg;
4642 ins->sreg3 = args [2]->dreg;
4643 ins->type = STACK_I4;
4644 MONO_ADD_INS (cfg->cbb, ins);
4645 } else if (size == 8) {
4646 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4647 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4648 ins->sreg1 = args [0]->dreg;
4649 ins->sreg2 = args [1]->dreg;
4650 ins->sreg3 = args [2]->dreg;
4651 ins->type = STACK_I8;
4652 MONO_ADD_INS (cfg->cbb, ins);
4654 /* g_assert_not_reached (); */
4656 if (cfg->gen_write_barriers && is_ref)
4657 emit_write_barrier (cfg, args [0], args [1], -1);
4659 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4663 } else if (cmethod->klass->image == mono_defaults.corlib) {
4664 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4665 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4666 if (should_insert_brekpoint (cfg->method))
4667 MONO_INST_NEW (cfg, ins, OP_BREAK);
4669 MONO_INST_NEW (cfg, ins, OP_NOP);
4670 MONO_ADD_INS (cfg->cbb, ins);
4673 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4674 && strcmp (cmethod->klass->name, "Environment") == 0) {
4676 EMIT_NEW_ICONST (cfg, ins, 1);
4678 EMIT_NEW_ICONST (cfg, ins, 0);
4682 } else if (cmethod->klass == mono_defaults.math_class) {
4684 * There is general branches code for Min/Max, but it does not work for
4686 * http://everything2.com/?node_id=1051618
4690 #ifdef MONO_ARCH_SIMD_INTRINSICS
4691 if (cfg->opt & MONO_OPT_SIMD) {
4692 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4698 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4702 * This entry point could be used later for arbitrary method
4705 inline static MonoInst*
4706 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4707 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4709 if (method->klass == mono_defaults.string_class) {
4710 /* managed string allocation support */
4711 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4712 MonoInst *iargs [2];
4713 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4714 MonoMethod *managed_alloc = NULL;
4716 g_assert (vtable); /*Should not fail since it System.String*/
4717 #ifndef MONO_CROSS_COMPILE
4718 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4722 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4723 iargs [1] = args [0];
4724 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4731 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4733 MonoInst *store, *temp;
4736 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4737 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4740 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4741 * would be different than the MonoInst's used to represent arguments, and
4742 * the ldelema implementation can't deal with that.
4743 * Solution: When ldelema is used on an inline argument, create a var for
4744 * it, emit ldelema on that var, and emit the saving code below in
4745 * inline_method () if needed.
4747 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4748 cfg->args [i] = temp;
4749 /* This uses cfg->args [i] which is set by the preceeding line */
4750 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4751 store->cil_code = sp [0]->cil_code;
4756 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4757 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4759 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4761 check_inline_called_method_name_limit (MonoMethod *called_method)
4764 static char *limit = NULL;
4766 if (limit == NULL) {
4767 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4769 if (limit_string != NULL)
4770 limit = limit_string;
4772 limit = (char *) "";
4775 if (limit [0] != '\0') {
4776 char *called_method_name = mono_method_full_name (called_method, TRUE);
4778 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4779 g_free (called_method_name);
4781 //return (strncmp_result <= 0);
4782 return (strncmp_result == 0);
4789 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4791 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4794 static char *limit = NULL;
4796 if (limit == NULL) {
4797 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4798 if (limit_string != NULL) {
4799 limit = limit_string;
4801 limit = (char *) "";
4805 if (limit [0] != '\0') {
4806 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4808 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4809 g_free (caller_method_name);
4811 //return (strncmp_result <= 0);
4812 return (strncmp_result == 0);
4820 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4821 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4823 MonoInst *ins, *rvar = NULL;
4824 MonoMethodHeader *cheader;
4825 MonoBasicBlock *ebblock, *sbblock;
4827 MonoMethod *prev_inlined_method;
4828 MonoInst **prev_locals, **prev_args;
4829 MonoType **prev_arg_types;
4830 guint prev_real_offset;
4831 GHashTable *prev_cbb_hash;
4832 MonoBasicBlock **prev_cil_offset_to_bb;
4833 MonoBasicBlock *prev_cbb;
4834 unsigned char* prev_cil_start;
4835 guint32 prev_cil_offset_to_bb_len;
4836 MonoMethod *prev_current_method;
4837 MonoGenericContext *prev_generic_context;
4838 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4840 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4842 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4843 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4846 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4847 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4851 if (cfg->verbose_level > 2)
4852 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4854 if (!cmethod->inline_info) {
4855 mono_jit_stats.inlineable_methods++;
4856 cmethod->inline_info = 1;
4859 /* allocate local variables */
4860 cheader = mono_method_get_header (cmethod);
4862 if (cheader == NULL || mono_loader_get_last_error ()) {
4863 MonoLoaderError *error = mono_loader_get_last_error ();
4866 mono_metadata_free_mh (cheader);
4867 if (inline_always && error)
4868 mono_cfg_set_exception (cfg, error->exception_type);
4870 mono_loader_clear_error ();
4874 /*Must verify before creating locals as it can cause the JIT to assert.*/
4875 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4876 mono_metadata_free_mh (cheader);
4880 /* allocate space to store the return value */
4881 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4882 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4885 prev_locals = cfg->locals;
4886 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4887 for (i = 0; i < cheader->num_locals; ++i)
4888 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4890 /* allocate start and end blocks */
4891 /* This is needed so if the inline is aborted, we can clean up */
4892 NEW_BBLOCK (cfg, sbblock);
4893 sbblock->real_offset = real_offset;
4895 NEW_BBLOCK (cfg, ebblock);
4896 ebblock->block_num = cfg->num_bblocks++;
4897 ebblock->real_offset = real_offset;
4899 prev_args = cfg->args;
4900 prev_arg_types = cfg->arg_types;
4901 prev_inlined_method = cfg->inlined_method;
4902 cfg->inlined_method = cmethod;
4903 cfg->ret_var_set = FALSE;
4904 cfg->inline_depth ++;
4905 prev_real_offset = cfg->real_offset;
4906 prev_cbb_hash = cfg->cbb_hash;
4907 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4908 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4909 prev_cil_start = cfg->cil_start;
4910 prev_cbb = cfg->cbb;
4911 prev_current_method = cfg->current_method;
4912 prev_generic_context = cfg->generic_context;
4913 prev_ret_var_set = cfg->ret_var_set;
4915 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4918 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4920 ret_var_set = cfg->ret_var_set;
4922 cfg->inlined_method = prev_inlined_method;
4923 cfg->real_offset = prev_real_offset;
4924 cfg->cbb_hash = prev_cbb_hash;
4925 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4926 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4927 cfg->cil_start = prev_cil_start;
4928 cfg->locals = prev_locals;
4929 cfg->args = prev_args;
4930 cfg->arg_types = prev_arg_types;
4931 cfg->current_method = prev_current_method;
4932 cfg->generic_context = prev_generic_context;
4933 cfg->ret_var_set = prev_ret_var_set;
4934 cfg->inline_depth --;
4936 if ((costs >= 0 && costs < 60) || inline_always) {
4937 if (cfg->verbose_level > 2)
4938 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4940 mono_jit_stats.inlined_methods++;
4942 /* always add some code to avoid block split failures */
4943 MONO_INST_NEW (cfg, ins, OP_NOP);
4944 MONO_ADD_INS (prev_cbb, ins);
4946 prev_cbb->next_bb = sbblock;
4947 link_bblock (cfg, prev_cbb, sbblock);
4950 * Get rid of the begin and end bblocks if possible to aid local
4953 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4955 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4956 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4958 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4959 MonoBasicBlock *prev = ebblock->in_bb [0];
4960 mono_merge_basic_blocks (cfg, prev, ebblock);
4962 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4963 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4964 cfg->cbb = prev_cbb;
4972 * If the inlined method contains only a throw, then the ret var is not
4973 * set, so set it to a dummy value.
4976 static double r8_0 = 0.0;
4978 switch (rvar->type) {
4980 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4983 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4988 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4991 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4992 ins->type = STACK_R8;
4993 ins->inst_p0 = (void*)&r8_0;
4994 ins->dreg = rvar->dreg;
4995 MONO_ADD_INS (cfg->cbb, ins);
4998 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5001 g_assert_not_reached ();
5005 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5008 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5011 if (cfg->verbose_level > 2)
5012 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5013 cfg->exception_type = MONO_EXCEPTION_NONE;
5014 mono_loader_clear_error ();
5016 /* This gets rid of the newly added bblocks */
5017 cfg->cbb = prev_cbb;
5019 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5024 * Some of these comments may well be out-of-date.
5025 * Design decisions: we do a single pass over the IL code (and we do bblock
5026 * splitting/merging in the few cases when it's required: a back jump to an IL
5027 * address that was not already seen as bblock starting point).
5028 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5029 * Complex operations are decomposed in simpler ones right away. We need to let the
5030 * arch-specific code peek and poke inside this process somehow (except when the
5031 * optimizations can take advantage of the full semantic info of coarse opcodes).
5032 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5033 * MonoInst->opcode initially is the IL opcode or some simplification of that
5034 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5035 * opcode with value bigger than OP_LAST.
5036 * At this point the IR can be handed over to an interpreter, a dumb code generator
5037 * or to the optimizing code generator that will translate it to SSA form.
5039 * Profiling directed optimizations.
5040 * We may compile by default with few or no optimizations and instrument the code
5041 * or the user may indicate what methods to optimize the most either in a config file
5042 * or through repeated runs where the compiler applies offline the optimizations to
5043 * each method and then decides if it was worth it.
5046 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5047 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5048 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5049 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5050 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5051 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5052 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5053 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5055 /* offset from br.s -> br like opcodes */
5056 #define BIG_BRANCH_OFFSET 13
5059 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5061 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5063 return b == NULL || b == bb;
5067 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5069 unsigned char *ip = start;
5070 unsigned char *target;
5073 MonoBasicBlock *bblock;
5074 const MonoOpcode *opcode;
5077 cli_addr = ip - start;
5078 i = mono_opcode_value ((const guint8 **)&ip, end);
5081 opcode = &mono_opcodes [i];
5082 switch (opcode->argument) {
5083 case MonoInlineNone:
5086 case MonoInlineString:
5087 case MonoInlineType:
5088 case MonoInlineField:
5089 case MonoInlineMethod:
5092 case MonoShortInlineR:
5099 case MonoShortInlineVar:
5100 case MonoShortInlineI:
5103 case MonoShortInlineBrTarget:
5104 target = start + cli_addr + 2 + (signed char)ip [1];
5105 GET_BBLOCK (cfg, bblock, target);
5108 GET_BBLOCK (cfg, bblock, ip);
5110 case MonoInlineBrTarget:
5111 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5112 GET_BBLOCK (cfg, bblock, target);
5115 GET_BBLOCK (cfg, bblock, ip);
5117 case MonoInlineSwitch: {
5118 guint32 n = read32 (ip + 1);
5121 cli_addr += 5 + 4 * n;
5122 target = start + cli_addr;
5123 GET_BBLOCK (cfg, bblock, target);
5125 for (j = 0; j < n; ++j) {
5126 target = start + cli_addr + (gint32)read32 (ip);
5127 GET_BBLOCK (cfg, bblock, target);
5137 g_assert_not_reached ();
5140 if (i == CEE_THROW) {
5141 unsigned char *bb_start = ip - 1;
5143 /* Find the start of the bblock containing the throw */
5145 while ((bb_start >= start) && !bblock) {
5146 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5150 bblock->out_of_line = 1;
5159 static inline MonoMethod *
5160 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5164 if (m->wrapper_type != MONO_WRAPPER_NONE)
5165 return mono_method_get_wrapper_data (m, token);
5167 method = mono_get_method_full (m->klass->image, token, klass, context);
5172 static inline MonoMethod *
5173 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5175 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5177 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5183 static inline MonoClass*
5184 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5188 if (method->wrapper_type != MONO_WRAPPER_NONE)
5189 klass = mono_method_get_wrapper_data (method, token);
5191 klass = mono_class_get_full (method->klass->image, token, context);
5193 mono_class_init (klass);
5198 * Returns TRUE if the JIT should abort inlining because "callee"
5199 * is influenced by security attributes.
5202 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5206 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5210 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5211 if (result == MONO_JIT_SECURITY_OK)
5214 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5215 /* Generate code to throw a SecurityException before the actual call/link */
5216 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5219 NEW_ICONST (cfg, args [0], 4);
5220 NEW_METHODCONST (cfg, args [1], caller);
5221 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5222 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5223 /* don't hide previous results */
5224 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5225 cfg->exception_data = result;
5233 throw_exception (void)
5235 static MonoMethod *method = NULL;
5238 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5239 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5246 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5248 MonoMethod *thrower = throw_exception ();
5251 EMIT_NEW_PCONST (cfg, args [0], ex);
5252 mono_emit_method_call (cfg, thrower, args, NULL);
5256 * Return the original method is a wrapper is specified. We can only access
5257 * the custom attributes from the original method.
5260 get_original_method (MonoMethod *method)
5262 if (method->wrapper_type == MONO_WRAPPER_NONE)
5265 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5266 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5269 /* in other cases we need to find the original method */
5270 return mono_marshal_method_from_wrapper (method);
5274 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5275 MonoBasicBlock *bblock, unsigned char *ip)
5277 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5278 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5280 emit_throw_exception (cfg, ex);
5284 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5285 MonoBasicBlock *bblock, unsigned char *ip)
5287 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5288 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5290 emit_throw_exception (cfg, ex);
5294 * Check that the IL instructions at ip are the array initialization
5295 * sequence and return the pointer to the data and the size.
5298 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5301 * newarr[System.Int32]
5303 * ldtoken field valuetype ...
5304 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5306 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5307 guint32 token = read32 (ip + 7);
5308 guint32 field_token = read32 (ip + 2);
5309 guint32 field_index = field_token & 0xffffff;
5311 const char *data_ptr;
5313 MonoMethod *cmethod;
5314 MonoClass *dummy_class;
5315 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5321 *out_field_token = field_token;
5323 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5326 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5328 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5329 case MONO_TYPE_BOOLEAN:
5333 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5334 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5335 case MONO_TYPE_CHAR:
5345 return NULL; /* stupid ARM FP swapped format */
5355 if (size > mono_type_size (field->type, &dummy_align))
5358 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5359 if (!method->klass->image->dynamic) {
5360 field_index = read32 (ip + 2) & 0xffffff;
5361 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5362 data_ptr = mono_image_rva_map (method->klass->image, rva);
5363 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5364 /* for aot code we do the lookup on load */
5365 if (aot && data_ptr)
5366 return GUINT_TO_POINTER (rva);
5368 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5370 data_ptr = mono_field_get_data (field);
5378 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5380 char *method_fname = mono_method_full_name (method, TRUE);
5382 MonoMethodHeader *header = mono_method_get_header (method);
5384 if (header->code_size == 0)
5385 method_code = g_strdup ("method body is empty.");
5387 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5389 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5390 g_free (method_fname);
5391 g_free (method_code);
5392 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5396 set_exception_object (MonoCompile *cfg, MonoException *exception)
5398 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5399 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5400 cfg->exception_ptr = exception;
5404 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5406 return mini_type_is_reference (cfg, &klass->byval_arg);
5410 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5413 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5414 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5415 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5416 /* Optimize reg-reg moves away */
5418 * Can't optimize other opcodes, since sp[0] might point to
5419 * the last ins of a decomposed opcode.
5421 sp [0]->dreg = (cfg)->locals [n]->dreg;
5423 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5428 * ldloca inhibits many optimizations so try to get rid of it in common
5431 static inline unsigned char *
5432 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5441 local = read16 (ip + 2);
5445 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5446 gboolean skip = FALSE;
5448 /* From the INITOBJ case */
5449 token = read32 (ip + 2);
5450 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5451 CHECK_TYPELOAD (klass);
5452 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5453 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5454 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5455 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5468 is_exception_class (MonoClass *class)
5471 if (class == mono_defaults.exception_class)
5473 class = class->parent;
5479 * is_jit_optimizer_disabled:
5481 * Determine whenever M's assembly has a DebuggableAttribute with the
5482 * IsJITOptimizerDisabled flag set.
5485 is_jit_optimizer_disabled (MonoMethod *m)
5487 MonoAssembly *ass = m->klass->image->assembly;
5488 MonoCustomAttrInfo* attrs;
5489 static MonoClass *klass;
5491 gboolean val = FALSE;
5494 if (ass->jit_optimizer_disabled_inited)
5495 return ass->jit_optimizer_disabled;
5498 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5501 ass->jit_optimizer_disabled = FALSE;
5502 mono_memory_barrier ();
5503 ass->jit_optimizer_disabled_inited = TRUE;
5507 attrs = mono_custom_attrs_from_assembly (ass);
5509 for (i = 0; i < attrs->num_attrs; ++i) {
5510 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5513 MonoMethodSignature *sig;
5515 if (!attr->ctor || attr->ctor->klass != klass)
5517 /* Decode the attribute. See reflection.c */
5518 len = attr->data_size;
5519 p = (const char*)attr->data;
5520 g_assert (read16 (p) == 0x0001);
5523 // FIXME: Support named parameters
5524 sig = mono_method_signature (attr->ctor);
5525 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5527 /* Two boolean arguments */
5531 mono_custom_attrs_free (attrs);
5534 ass->jit_optimizer_disabled = val;
5535 mono_memory_barrier ();
5536 ass->jit_optimizer_disabled_inited = TRUE;
5542 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5544 gboolean supported_tail_call;
5547 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5548 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5550 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5553 for (i = 0; i < fsig->param_count; ++i) {
5554 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5555 /* These can point to the current method's stack */
5556 supported_tail_call = FALSE;
5558 if (fsig->hasthis && cmethod->klass->valuetype)
5559 /* this might point to the current method's stack */
5560 supported_tail_call = FALSE;
5561 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5562 supported_tail_call = FALSE;
5563 if (cfg->method->save_lmf)
5564 supported_tail_call = FALSE;
5565 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5566 supported_tail_call = FALSE;
5568 /* Debugging support */
5570 if (supported_tail_call) {
5571 static int count = 0;
5573 if (getenv ("COUNT")) {
5574 if (count == atoi (getenv ("COUNT")))
5575 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5576 if (count > atoi (getenv ("COUNT")))
5577 supported_tail_call = FALSE;
5582 return supported_tail_call;
5585 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5586 * it to the thread local value based on the tls_offset field. Every other kind of access to
5587 * the field causes an assert.
5590 is_magic_tls_access (MonoClassField *field)
5592 if (strcmp (field->name, "tlsdata"))
5594 if (strcmp (field->parent->name, "ThreadLocal`1"))
5596 return field->parent->image == mono_defaults.corlib;
5599 /* emits the code needed to access a managed tls var (like ThreadStatic)
5600 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5601 * pointer for the current thread.
5602 * Returns the MonoInst* representing the address of the tls var.
5605 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5608 int static_data_reg, array_reg, dreg;
5609 int offset2_reg, idx_reg;
5610 // inlined access to the tls data
5611 // idx = (offset >> 24) - 1;
5612 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5613 static_data_reg = alloc_ireg (cfg);
5614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5615 idx_reg = alloc_ireg (cfg);
5616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5619 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5620 array_reg = alloc_ireg (cfg);
5621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5622 offset2_reg = alloc_ireg (cfg);
5623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5624 dreg = alloc_ireg (cfg);
5625 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5630 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5631 * this address is cached per-method in cached_tls_addr.
5634 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5636 MonoInst *load, *addr, *temp, *store, *thread_ins;
5637 MonoClassField *offset_field;
5639 if (*cached_tls_addr) {
5640 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5643 thread_ins = mono_get_thread_intrinsic (cfg);
5644 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5646 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5648 MONO_ADD_INS (cfg->cbb, thread_ins);
5650 MonoMethod *thread_method;
5651 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5652 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5654 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5655 addr->klass = mono_class_from_mono_type (tls_field->type);
5656 addr->type = STACK_MP;
5657 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5658 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5660 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5665 * mono_method_to_ir:
5667 * Translate the .net IL into linear IR.
5670 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5671 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5672 guint inline_offset, gboolean is_virtual_call)
5675 MonoInst *ins, **sp, **stack_start;
5676 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5677 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5678 MonoMethod *cmethod, *method_definition;
5679 MonoInst **arg_array;
5680 MonoMethodHeader *header;
5682 guint32 token, ins_flag;
5684 MonoClass *constrained_call = NULL;
5685 unsigned char *ip, *end, *target, *err_pos;
5686 static double r8_0 = 0.0;
5687 MonoMethodSignature *sig;
5688 MonoGenericContext *generic_context = NULL;
5689 MonoGenericContainer *generic_container = NULL;
5690 MonoType **param_types;
5691 int i, n, start_new_bblock, dreg;
5692 int num_calls = 0, inline_costs = 0;
5693 int breakpoint_id = 0;
5695 MonoBoolean security, pinvoke;
5696 MonoSecurityManager* secman = NULL;
5697 MonoDeclSecurityActions actions;
5698 GSList *class_inits = NULL;
5699 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5701 gboolean init_locals, seq_points, skip_dead_blocks;
5702 gboolean disable_inline;
5703 MonoInst *cached_tls_addr = NULL;
5705 disable_inline = is_jit_optimizer_disabled (method);
5707 /* serialization and xdomain stuff may need access to private fields and methods */
5708 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5709 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5710 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5711 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5712 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5713 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5715 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5717 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5718 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5719 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5720 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5721 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5723 image = method->klass->image;
5724 header = mono_method_get_header (method);
5726 MonoLoaderError *error;
5728 if ((error = mono_loader_get_last_error ())) {
5729 mono_cfg_set_exception (cfg, error->exception_type);
5731 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5732 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5734 goto exception_exit;
5736 generic_container = mono_method_get_generic_container (method);
5737 sig = mono_method_signature (method);
5738 num_args = sig->hasthis + sig->param_count;
5739 ip = (unsigned char*)header->code;
5740 cfg->cil_start = ip;
5741 end = ip + header->code_size;
5742 mono_jit_stats.cil_code_size += header->code_size;
5743 init_locals = header->init_locals;
5745 seq_points = cfg->gen_seq_points && cfg->method == method;
5748 * Methods without init_locals set could cause asserts in various passes
5753 method_definition = method;
5754 while (method_definition->is_inflated) {
5755 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5756 method_definition = imethod->declaring;
5759 /* SkipVerification is not allowed if core-clr is enabled */
5760 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5762 dont_verify_stloc = TRUE;
5765 if (mono_debug_using_mono_debugger ())
5766 cfg->keep_cil_nops = TRUE;
5768 if (sig->is_inflated)
5769 generic_context = mono_method_get_context (method);
5770 else if (generic_container)
5771 generic_context = &generic_container->context;
5772 cfg->generic_context = generic_context;
5774 if (!cfg->generic_sharing_context)
5775 g_assert (!sig->has_type_parameters);
5777 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5778 g_assert (method->is_inflated);
5779 g_assert (mono_method_get_context (method)->method_inst);
5781 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5782 g_assert (sig->generic_param_count);
5784 if (cfg->method == method) {
5785 cfg->real_offset = 0;
5787 cfg->real_offset = inline_offset;
5790 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5791 cfg->cil_offset_to_bb_len = header->code_size;
5793 cfg->current_method = method;
5795 if (cfg->verbose_level > 2)
5796 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5798 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5800 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5801 for (n = 0; n < sig->param_count; ++n)
5802 param_types [n + sig->hasthis] = sig->params [n];
5803 cfg->arg_types = param_types;
5805 dont_inline = g_list_prepend (dont_inline, method);
5806 if (cfg->method == method) {
5808 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5809 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5812 NEW_BBLOCK (cfg, start_bblock);
5813 cfg->bb_entry = start_bblock;
5814 start_bblock->cil_code = NULL;
5815 start_bblock->cil_length = 0;
5816 #if defined(__native_client_codegen__)
5817 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5818 ins->dreg = alloc_dreg (cfg, STACK_I4);
5819 MONO_ADD_INS (start_bblock, ins);
5823 NEW_BBLOCK (cfg, end_bblock);
5824 cfg->bb_exit = end_bblock;
5825 end_bblock->cil_code = NULL;
5826 end_bblock->cil_length = 0;
5827 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5828 g_assert (cfg->num_bblocks == 2);
5830 arg_array = cfg->args;
5832 if (header->num_clauses) {
5833 cfg->spvars = g_hash_table_new (NULL, NULL);
5834 cfg->exvars = g_hash_table_new (NULL, NULL);
5836 /* handle exception clauses */
5837 for (i = 0; i < header->num_clauses; ++i) {
5838 MonoBasicBlock *try_bb;
5839 MonoExceptionClause *clause = &header->clauses [i];
5840 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5841 try_bb->real_offset = clause->try_offset;
5842 try_bb->try_start = TRUE;
5843 try_bb->region = ((i + 1) << 8) | clause->flags;
5844 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5845 tblock->real_offset = clause->handler_offset;
5846 tblock->flags |= BB_EXCEPTION_HANDLER;
5848 link_bblock (cfg, try_bb, tblock);
5850 if (*(ip + clause->handler_offset) == CEE_POP)
5851 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5853 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5854 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5855 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5856 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5857 MONO_ADD_INS (tblock, ins);
5860 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5861 MONO_ADD_INS (tblock, ins);
5864 /* todo: is a fault block unsafe to optimize? */
5865 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5866 tblock->flags |= BB_EXCEPTION_UNSAFE;
5870 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5872 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5874 /* catch and filter blocks get the exception object on the stack */
5875 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5876 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5877 MonoInst *dummy_use;
5879 /* mostly like handle_stack_args (), but just sets the input args */
5880 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5881 tblock->in_scount = 1;
5882 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5883 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5886 * Add a dummy use for the exvar so its liveness info will be
5890 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5892 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5893 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5894 tblock->flags |= BB_EXCEPTION_HANDLER;
5895 tblock->real_offset = clause->data.filter_offset;
5896 tblock->in_scount = 1;
5897 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5898 /* The filter block shares the exvar with the handler block */
5899 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5900 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5901 MONO_ADD_INS (tblock, ins);
5905 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5906 clause->data.catch_class &&
5907 cfg->generic_sharing_context &&
5908 mono_class_check_context_used (clause->data.catch_class)) {
5910 * In shared generic code with catch
5911 * clauses containing type variables
5912 * the exception handling code has to
5913 * be able to get to the rgctx.
5914 * Therefore we have to make sure that
5915 * the vtable/mrgctx argument (for
5916 * static or generic methods) or the
5917 * "this" argument (for non-static
5918 * methods) are live.
5920 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5921 mini_method_get_context (method)->method_inst ||
5922 method->klass->valuetype) {
5923 mono_get_vtable_var (cfg);
5925 MonoInst *dummy_use;
5927 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5932 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5933 cfg->cbb = start_bblock;
5934 cfg->args = arg_array;
5935 mono_save_args (cfg, sig, inline_args);
5938 /* FIRST CODE BLOCK */
5939 NEW_BBLOCK (cfg, bblock);
5940 bblock->cil_code = ip;
5944 ADD_BBLOCK (cfg, bblock);
5946 if (cfg->method == method) {
5947 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5948 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5949 MONO_INST_NEW (cfg, ins, OP_BREAK);
5950 MONO_ADD_INS (bblock, ins);
5954 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5955 secman = mono_security_manager_get_methods ();
5957 security = (secman && mono_method_has_declsec (method));
5958 /* at this point having security doesn't mean we have any code to generate */
5959 if (security && (cfg->method == method)) {
5960 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5961 * And we do not want to enter the next section (with allocation) if we
5962 * have nothing to generate */
5963 security = mono_declsec_get_demands (method, &actions);
5966 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5967 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5969 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5970 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5971 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5973 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5974 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5978 mono_custom_attrs_free (custom);
5981 custom = mono_custom_attrs_from_class (wrapped->klass);
5982 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5986 mono_custom_attrs_free (custom);
5989 /* not a P/Invoke after all */
5994 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5995 /* we use a separate basic block for the initialization code */
5996 NEW_BBLOCK (cfg, init_localsbb);
5997 cfg->bb_init = init_localsbb;
5998 init_localsbb->real_offset = cfg->real_offset;
5999 start_bblock->next_bb = init_localsbb;
6000 init_localsbb->next_bb = bblock;
6001 link_bblock (cfg, start_bblock, init_localsbb);
6002 link_bblock (cfg, init_localsbb, bblock);
6004 cfg->cbb = init_localsbb;
6006 start_bblock->next_bb = bblock;
6007 link_bblock (cfg, start_bblock, bblock);
6010 /* at this point we know, if security is TRUE, that some code needs to be generated */
6011 if (security && (cfg->method == method)) {
6014 mono_jit_stats.cas_demand_generation++;
6016 if (actions.demand.blob) {
6017 /* Add code for SecurityAction.Demand */
6018 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6019 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6020 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6021 mono_emit_method_call (cfg, secman->demand, args, NULL);
6023 if (actions.noncasdemand.blob) {
6024 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6025 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6026 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6027 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6028 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6029 mono_emit_method_call (cfg, secman->demand, args, NULL);
6031 if (actions.demandchoice.blob) {
6032 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6033 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6034 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6035 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6036 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6040 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6042 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6045 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6046 /* check if this is native code, e.g. an icall or a p/invoke */
6047 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6048 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6050 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6051 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6053 /* if this ia a native call then it can only be JITted from platform code */
6054 if ((icall || pinvk) && method->klass && method->klass->image) {
6055 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6056 MonoException *ex = icall ? mono_get_exception_security () :
6057 mono_get_exception_method_access ();
6058 emit_throw_exception (cfg, ex);
6065 if (header->code_size == 0)
6068 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6073 if (cfg->method == method)
6074 mono_debug_init_method (cfg, bblock, breakpoint_id);
6076 for (n = 0; n < header->num_locals; ++n) {
6077 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6082 /* We force the vtable variable here for all shared methods
6083 for the possibility that they might show up in a stack
6084 trace where their exact instantiation is needed. */
6085 if (cfg->generic_sharing_context && method == cfg->method) {
6086 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6087 mini_method_get_context (method)->method_inst ||
6088 method->klass->valuetype) {
6089 mono_get_vtable_var (cfg);
6091 /* FIXME: Is there a better way to do this?
6092 We need the variable live for the duration
6093 of the whole method. */
6094 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6098 /* add a check for this != NULL to inlined methods */
6099 if (is_virtual_call) {
6102 NEW_ARGLOAD (cfg, arg_ins, 0);
6103 MONO_ADD_INS (cfg->cbb, arg_ins);
6104 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6107 skip_dead_blocks = !dont_verify;
6108 if (skip_dead_blocks) {
6109 original_bb = bb = mono_basic_block_split (method, &error);
6110 if (!mono_error_ok (&error)) {
6111 mono_error_cleanup (&error);
6117 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6118 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6121 start_new_bblock = 0;
6124 if (cfg->method == method)
6125 cfg->real_offset = ip - header->code;
6127 cfg->real_offset = inline_offset;
6132 if (start_new_bblock) {
6133 bblock->cil_length = ip - bblock->cil_code;
6134 if (start_new_bblock == 2) {
6135 g_assert (ip == tblock->cil_code);
6137 GET_BBLOCK (cfg, tblock, ip);
6139 bblock->next_bb = tblock;
6142 start_new_bblock = 0;
6143 for (i = 0; i < bblock->in_scount; ++i) {
6144 if (cfg->verbose_level > 3)
6145 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6146 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6150 g_slist_free (class_inits);
6153 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6154 link_bblock (cfg, bblock, tblock);
6155 if (sp != stack_start) {
6156 handle_stack_args (cfg, stack_start, sp - stack_start);
6158 CHECK_UNVERIFIABLE (cfg);
6160 bblock->next_bb = tblock;
6163 for (i = 0; i < bblock->in_scount; ++i) {
6164 if (cfg->verbose_level > 3)
6165 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6166 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6169 g_slist_free (class_inits);
6174 if (skip_dead_blocks) {
6175 int ip_offset = ip - header->code;
6177 if (ip_offset == bb->end)
6181 int op_size = mono_opcode_size (ip, end);
6182 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6184 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6186 if (ip_offset + op_size == bb->end) {
6187 MONO_INST_NEW (cfg, ins, OP_NOP);
6188 MONO_ADD_INS (bblock, ins);
6189 start_new_bblock = 1;
6197 * Sequence points are points where the debugger can place a breakpoint.
6198 * Currently, we generate these automatically at points where the IL
6201 if (seq_points && sp == stack_start) {
6202 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6203 MONO_ADD_INS (cfg->cbb, ins);
6206 bblock->real_offset = cfg->real_offset;
6208 if ((cfg->method == method) && cfg->coverage_info) {
6209 guint32 cil_offset = ip - header->code;
6210 cfg->coverage_info->data [cil_offset].cil_code = ip;
6212 /* TODO: Use an increment here */
6213 #if defined(TARGET_X86)
6214 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6215 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6217 MONO_ADD_INS (cfg->cbb, ins);
6219 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6220 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6224 if (cfg->verbose_level > 3)
6225 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6229 if (cfg->keep_cil_nops)
6230 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6232 MONO_INST_NEW (cfg, ins, OP_NOP);
6234 MONO_ADD_INS (bblock, ins);
6237 if (should_insert_brekpoint (cfg->method))
6238 MONO_INST_NEW (cfg, ins, OP_BREAK);
6240 MONO_INST_NEW (cfg, ins, OP_NOP);
6242 MONO_ADD_INS (bblock, ins);
6248 CHECK_STACK_OVF (1);
6249 n = (*ip)-CEE_LDARG_0;
6251 EMIT_NEW_ARGLOAD (cfg, ins, n);
6259 CHECK_STACK_OVF (1);
6260 n = (*ip)-CEE_LDLOC_0;
6262 EMIT_NEW_LOCLOAD (cfg, ins, n);
6271 n = (*ip)-CEE_STLOC_0;
6274 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6276 emit_stloc_ir (cfg, sp, header, n);
6283 CHECK_STACK_OVF (1);
6286 EMIT_NEW_ARGLOAD (cfg, ins, n);
6292 CHECK_STACK_OVF (1);
6295 NEW_ARGLOADA (cfg, ins, n);
6296 MONO_ADD_INS (cfg->cbb, ins);
6306 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6308 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6313 CHECK_STACK_OVF (1);
6316 EMIT_NEW_LOCLOAD (cfg, ins, n);
6320 case CEE_LDLOCA_S: {
6321 unsigned char *tmp_ip;
6323 CHECK_STACK_OVF (1);
6324 CHECK_LOCAL (ip [1]);
6326 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6332 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6341 CHECK_LOCAL (ip [1]);
6342 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6344 emit_stloc_ir (cfg, sp, header, ip [1]);
6349 CHECK_STACK_OVF (1);
6350 EMIT_NEW_PCONST (cfg, ins, NULL);
6351 ins->type = STACK_OBJ;
6356 CHECK_STACK_OVF (1);
6357 EMIT_NEW_ICONST (cfg, ins, -1);
6370 CHECK_STACK_OVF (1);
6371 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6377 CHECK_STACK_OVF (1);
6379 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6385 CHECK_STACK_OVF (1);
6386 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6392 CHECK_STACK_OVF (1);
6393 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6394 ins->type = STACK_I8;
6395 ins->dreg = alloc_dreg (cfg, STACK_I8);
6397 ins->inst_l = (gint64)read64 (ip);
6398 MONO_ADD_INS (bblock, ins);
6404 gboolean use_aotconst = FALSE;
6406 #ifdef TARGET_POWERPC
6407 /* FIXME: Clean this up */
6408 if (cfg->compile_aot)
6409 use_aotconst = TRUE;
6412 /* FIXME: we should really allocate this only late in the compilation process */
6413 f = mono_domain_alloc (cfg->domain, sizeof (float));
6415 CHECK_STACK_OVF (1);
6421 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6423 dreg = alloc_freg (cfg);
6424 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6425 ins->type = STACK_R8;
6427 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6428 ins->type = STACK_R8;
6429 ins->dreg = alloc_dreg (cfg, STACK_R8);
6431 MONO_ADD_INS (bblock, ins);
6441 gboolean use_aotconst = FALSE;
6443 #ifdef TARGET_POWERPC
6444 /* FIXME: Clean this up */
6445 if (cfg->compile_aot)
6446 use_aotconst = TRUE;
6449 /* FIXME: we should really allocate this only late in the compilation process */
6450 d = mono_domain_alloc (cfg->domain, sizeof (double));
6452 CHECK_STACK_OVF (1);
6458 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6460 dreg = alloc_freg (cfg);
6461 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6462 ins->type = STACK_R8;
6464 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6465 ins->type = STACK_R8;
6466 ins->dreg = alloc_dreg (cfg, STACK_R8);
6468 MONO_ADD_INS (bblock, ins);
6477 MonoInst *temp, *store;
6479 CHECK_STACK_OVF (1);
6483 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6484 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6486 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6489 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6502 if (sp [0]->type == STACK_R8)
6503 /* we need to pop the value from the x86 FP stack */
6504 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6513 if (stack_start != sp)
6515 token = read32 (ip + 1);
6516 /* FIXME: check the signature matches */
6517 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6519 if (!cmethod || mono_loader_get_last_error ())
6522 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6523 GENERIC_SHARING_FAILURE (CEE_JMP);
6525 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6526 CHECK_CFG_EXCEPTION;
6528 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6530 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6533 /* Handle tail calls similarly to calls */
6534 n = fsig->param_count + fsig->hasthis;
6536 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6537 call->method = cmethod;
6538 call->tail_call = TRUE;
6539 call->signature = mono_method_signature (cmethod);
6540 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6541 call->inst.inst_p0 = cmethod;
6542 for (i = 0; i < n; ++i)
6543 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6545 mono_arch_emit_call (cfg, call);
6546 MONO_ADD_INS (bblock, (MonoInst*)call);
6549 for (i = 0; i < num_args; ++i)
6550 /* Prevent arguments from being optimized away */
6551 arg_array [i]->flags |= MONO_INST_VOLATILE;
6553 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6554 ins = (MonoInst*)call;
6555 ins->inst_p0 = cmethod;
6556 MONO_ADD_INS (bblock, ins);
6560 start_new_bblock = 1;
6565 case CEE_CALLVIRT: {
6566 MonoInst *addr = NULL;
6567 MonoMethodSignature *fsig = NULL;
6569 int virtual = *ip == CEE_CALLVIRT;
6570 int calli = *ip == CEE_CALLI;
6571 gboolean pass_imt_from_rgctx = FALSE;
6572 MonoInst *imt_arg = NULL;
6573 gboolean pass_vtable = FALSE;
6574 gboolean pass_mrgctx = FALSE;
6575 MonoInst *vtable_arg = NULL;
6576 gboolean check_this = FALSE;
6577 gboolean supported_tail_call = FALSE;
6580 token = read32 (ip + 1);
6587 if (method->wrapper_type != MONO_WRAPPER_NONE)
6588 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6590 fsig = mono_metadata_parse_signature (image, token);
6592 n = fsig->param_count + fsig->hasthis;
6594 if (method->dynamic && fsig->pinvoke) {
6598 * This is a call through a function pointer using a pinvoke
6599 * signature. Have to create a wrapper and call that instead.
6600 * FIXME: This is very slow, need to create a wrapper at JIT time
6601 * instead based on the signature.
6603 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6604 EMIT_NEW_PCONST (cfg, args [1], fsig);
6606 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6609 MonoMethod *cil_method;
6611 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6612 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6613 cil_method = cmethod;
6614 } else if (constrained_call) {
6615 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6617 * This is needed since get_method_constrained can't find
6618 * the method in klass representing a type var.
6619 * The type var is guaranteed to be a reference type in this
6622 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6623 cil_method = cmethod;
6624 g_assert (!cmethod->klass->valuetype);
6626 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6629 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6630 cil_method = cmethod;
6633 if (!cmethod || mono_loader_get_last_error ())
6635 if (!dont_verify && !cfg->skip_visibility) {
6636 MonoMethod *target_method = cil_method;
6637 if (method->is_inflated) {
6638 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6640 if (!mono_method_can_access_method (method_definition, target_method) &&
6641 !mono_method_can_access_method (method, cil_method))
6642 METHOD_ACCESS_FAILURE;
6645 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6646 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6648 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6649 /* MS.NET seems to silently convert this to a callvirt */
6654 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6655 * converts to a callvirt.
6657 * tests/bug-515884.il is an example of this behavior
6659 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6660 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6661 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6665 if (!cmethod->klass->inited)
6666 if (!mono_class_init (cmethod->klass))
6669 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6670 mini_class_is_system_array (cmethod->klass)) {
6671 array_rank = cmethod->klass->rank;
6672 fsig = mono_method_signature (cmethod);
6674 fsig = mono_method_signature (cmethod);
6679 if (fsig->pinvoke) {
6680 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6681 check_for_pending_exc, FALSE);
6682 fsig = mono_method_signature (wrapper);
6683 } else if (constrained_call) {
6684 fsig = mono_method_signature (cmethod);
6686 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6690 mono_save_token_info (cfg, image, token, cil_method);
6692 n = fsig->param_count + fsig->hasthis;
6694 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6695 if (check_linkdemand (cfg, method, cmethod))
6697 CHECK_CFG_EXCEPTION;
6700 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6701 g_assert_not_reached ();
6704 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6707 if (!cfg->generic_sharing_context && cmethod)
6708 g_assert (!mono_method_check_context_used (cmethod));
6712 //g_assert (!virtual || fsig->hasthis);
6716 if (constrained_call) {
6718 * We have the `constrained.' prefix opcode.
6720 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6722 * The type parameter is instantiated as a valuetype,
6723 * but that type doesn't override the method we're
6724 * calling, so we need to box `this'.
6726 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6727 ins->klass = constrained_call;
6728 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6729 CHECK_CFG_EXCEPTION;
6730 } else if (!constrained_call->valuetype) {
6731 int dreg = alloc_ireg_ref (cfg);
6734 * The type parameter is instantiated as a reference
6735 * type. We have a managed pointer on the stack, so
6736 * we need to dereference it here.
6738 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6739 ins->type = STACK_OBJ;
6741 } else if (cmethod->klass->valuetype)
6743 constrained_call = NULL;
6746 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6750 * If the callee is a shared method, then its static cctor
6751 * might not get called after the call was patched.
6753 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6754 emit_generic_class_init (cfg, cmethod->klass);
6755 CHECK_TYPELOAD (cmethod->klass);
6758 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6759 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6760 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6761 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6762 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6765 * Pass vtable iff target method might
6766 * be shared, which means that sharing
6767 * is enabled for its class and its
6768 * context is sharable (and it's not a
6771 if (sharing_enabled && context_sharable &&
6772 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6776 if (cmethod && mini_method_get_context (cmethod) &&
6777 mini_method_get_context (cmethod)->method_inst) {
6778 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6779 MonoGenericContext *context = mini_method_get_context (cmethod);
6780 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6782 g_assert (!pass_vtable);
6784 if (sharing_enabled && context_sharable)
6788 if (cfg->generic_sharing_context && cmethod) {
6789 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6791 context_used = mono_method_check_context_used (cmethod);
6793 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6794 /* Generic method interface
6795 calls are resolved via a
6796 helper function and don't
6798 if (!cmethod_context || !cmethod_context->method_inst)
6799 pass_imt_from_rgctx = TRUE;
6803 * If a shared method calls another
6804 * shared method then the caller must
6805 * have a generic sharing context
6806 * because the magic trampoline
6807 * requires it. FIXME: We shouldn't
6808 * have to force the vtable/mrgctx
6809 * variable here. Instead there
6810 * should be a flag in the cfg to
6811 * request a generic sharing context.
6814 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6815 mono_get_vtable_var (cfg);
6820 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6822 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6824 CHECK_TYPELOAD (cmethod->klass);
6825 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6830 g_assert (!vtable_arg);
6832 if (!cfg->compile_aot) {
6834 * emit_get_rgctx_method () calls mono_class_vtable () so check
6835 * for type load errors before.
6837 mono_class_setup_vtable (cmethod->klass);
6838 CHECK_TYPELOAD (cmethod->klass);
6841 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6843 /* !marshalbyref is needed to properly handle generic methods + remoting */
6844 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6845 MONO_METHOD_IS_FINAL (cmethod)) &&
6846 !cmethod->klass->marshalbyref) {
6853 if (pass_imt_from_rgctx) {
6854 g_assert (!pass_vtable);
6857 imt_arg = emit_get_rgctx_method (cfg, context_used,
6858 cmethod, MONO_RGCTX_INFO_METHOD);
6862 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6864 /* Calling virtual generic methods */
6865 if (cmethod && virtual &&
6866 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6867 !(MONO_METHOD_IS_FINAL (cmethod) &&
6868 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6869 mono_method_signature (cmethod)->generic_param_count) {
6870 MonoInst *this_temp, *this_arg_temp, *store;
6871 MonoInst *iargs [4];
6873 g_assert (mono_method_signature (cmethod)->is_inflated);
6875 /* Prevent inlining of methods that contain indirect calls */
6878 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6879 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6880 g_assert (!imt_arg);
6882 g_assert (cmethod->is_inflated);
6883 imt_arg = emit_get_rgctx_method (cfg, context_used,
6884 cmethod, MONO_RGCTX_INFO_METHOD);
6885 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6889 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6890 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6891 MONO_ADD_INS (bblock, store);
6893 /* FIXME: This should be a managed pointer */
6894 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6896 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6897 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6898 cmethod, MONO_RGCTX_INFO_METHOD);
6899 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6900 addr = mono_emit_jit_icall (cfg,
6901 mono_helper_compile_generic_method, iargs);
6903 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6905 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6908 if (!MONO_TYPE_IS_VOID (fsig->ret))
6909 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6911 CHECK_CFG_EXCEPTION;
6919 * Implement a workaround for the inherent races involved in locking:
6925 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6926 * try block, the Exit () won't be executed, see:
6927 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6928 * To work around this, we extend such try blocks to include the last x bytes
6929 * of the Monitor.Enter () call.
6931 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6932 MonoBasicBlock *tbb;
6934 GET_BBLOCK (cfg, tbb, ip + 5);
6936 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6937 * from Monitor.Enter like ArgumentNullException.
6939 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6940 /* Mark this bblock as needing to be extended */
6941 tbb->extend_try_block = TRUE;
6945 /* Conversion to a JIT intrinsic */
6946 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6948 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6949 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6954 CHECK_CFG_EXCEPTION;
6962 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6963 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6964 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6965 !g_list_find (dont_inline, cmethod)) {
6967 gboolean always = FALSE;
6969 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6970 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6971 /* Prevent inlining of methods that call wrappers */
6973 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6977 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6979 cfg->real_offset += 5;
6982 if (!MONO_TYPE_IS_VOID (fsig->ret))
6983 /* *sp is already set by inline_method */
6986 inline_costs += costs;
6992 inline_costs += 10 * num_calls++;
6994 /* Tail recursion elimination */
6995 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6996 gboolean has_vtargs = FALSE;
6999 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7002 /* keep it simple */
7003 for (i = fsig->param_count - 1; i >= 0; i--) {
7004 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7009 for (i = 0; i < n; ++i)
7010 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7011 MONO_INST_NEW (cfg, ins, OP_BR);
7012 MONO_ADD_INS (bblock, ins);
7013 tblock = start_bblock->out_bb [0];
7014 link_bblock (cfg, bblock, tblock);
7015 ins->inst_target_bb = tblock;
7016 start_new_bblock = 1;
7018 /* skip the CEE_RET, too */
7019 if (ip_in_bb (cfg, bblock, ip + 5))
7029 /* Generic sharing */
7030 /* FIXME: only do this for generic methods if
7031 they are not shared! */
7032 if (context_used && !imt_arg && !array_rank &&
7033 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7034 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7035 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7036 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7039 g_assert (cfg->generic_sharing_context && cmethod);
7043 * We are compiling a call to a
7044 * generic method from shared code,
7045 * which means that we have to look up
7046 * the method in the rgctx and do an
7049 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7052 /* Indirect calls */
7054 g_assert (!imt_arg);
7056 if (*ip == CEE_CALL)
7057 g_assert (context_used);
7058 else if (*ip == CEE_CALLI)
7059 g_assert (!vtable_arg);
7061 /* FIXME: what the hell is this??? */
7062 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7063 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7065 /* Prevent inlining of methods with indirect calls */
7071 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7072 call = (MonoCallInst*)ins;
7074 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7076 * Instead of emitting an indirect call, emit a direct call
7077 * with the contents of the aotconst as the patch info.
7079 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7081 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7082 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7085 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7088 if (!MONO_TYPE_IS_VOID (fsig->ret))
7089 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7091 CHECK_CFG_EXCEPTION;
7102 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7103 MonoInst *val = sp [fsig->param_count];
7105 if (val->type == STACK_OBJ) {
7106 MonoInst *iargs [2];
7111 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7114 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7115 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7116 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7117 emit_write_barrier (cfg, addr, val, 0);
7118 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7119 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7121 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7124 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7125 if (!cmethod->klass->element_class->valuetype && !readonly)
7126 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7127 CHECK_TYPELOAD (cmethod->klass);
7130 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7133 g_assert_not_reached ();
7136 CHECK_CFG_EXCEPTION;
7143 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7145 if (!MONO_TYPE_IS_VOID (fsig->ret))
7146 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7148 CHECK_CFG_EXCEPTION;
7155 /* Tail prefix / tail call optimization */
7157 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7158 /* FIXME: runtime generic context pointer for jumps? */
7159 /* FIXME: handle this for generic sharing eventually */
7160 supported_tail_call = cmethod &&
7161 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7162 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7163 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7165 if (supported_tail_call) {
7168 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7171 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7173 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7174 /* Handle tail calls similarly to calls */
7175 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7177 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7178 call->tail_call = TRUE;
7179 call->method = cmethod;
7180 call->signature = mono_method_signature (cmethod);
7183 * We implement tail calls by storing the actual arguments into the
7184 * argument variables, then emitting a CEE_JMP.
7186 for (i = 0; i < n; ++i) {
7187 /* Prevent argument from being register allocated */
7188 arg_array [i]->flags |= MONO_INST_VOLATILE;
7189 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7193 ins = (MonoInst*)call;
7194 ins->inst_p0 = cmethod;
7195 ins->inst_p1 = arg_array [0];
7196 MONO_ADD_INS (bblock, ins);
7197 link_bblock (cfg, bblock, end_bblock);
7198 start_new_bblock = 1;
7200 CHECK_CFG_EXCEPTION;
7205 // FIXME: Eliminate unreachable epilogs
7208 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7209 * only reachable from this call.
7211 GET_BBLOCK (cfg, tblock, ip);
7212 if (tblock == bblock || tblock->in_count == 0)
7219 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7220 imt_arg, vtable_arg);
7222 if (!MONO_TYPE_IS_VOID (fsig->ret))
7223 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7225 CHECK_CFG_EXCEPTION;
7232 if (cfg->method != method) {
7233 /* return from inlined method */
7235 * If in_count == 0, that means the ret is unreachable due to
7236 * being preceeded by a throw. In that case, inline_method () will
7237 * handle setting the return value
7238 * (test case: test_0_inline_throw ()).
7240 if (return_var && cfg->cbb->in_count) {
7244 //g_assert (returnvar != -1);
7245 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7246 cfg->ret_var_set = TRUE;
7250 MonoType *ret_type = mono_method_signature (method)->ret;
7254 * Place a seq point here too even through the IL stack is not
7255 * empty, so a step over on
7258 * will work correctly.
7260 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7261 MONO_ADD_INS (cfg->cbb, ins);
7264 g_assert (!return_var);
7268 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7271 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7274 if (!cfg->vret_addr) {
7277 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7279 EMIT_NEW_RETLOADA (cfg, ret_addr);
7281 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7282 ins->klass = mono_class_from_mono_type (ret_type);
7285 #ifdef MONO_ARCH_SOFT_FLOAT
7286 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7287 MonoInst *iargs [1];
7291 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7292 mono_arch_emit_setret (cfg, method, conv);
7294 mono_arch_emit_setret (cfg, method, *sp);
7297 mono_arch_emit_setret (cfg, method, *sp);
7302 if (sp != stack_start)
7304 MONO_INST_NEW (cfg, ins, OP_BR);
7306 ins->inst_target_bb = end_bblock;
7307 MONO_ADD_INS (bblock, ins);
7308 link_bblock (cfg, bblock, end_bblock);
7309 start_new_bblock = 1;
7313 MONO_INST_NEW (cfg, ins, OP_BR);
7315 target = ip + 1 + (signed char)(*ip);
7317 GET_BBLOCK (cfg, tblock, target);
7318 link_bblock (cfg, bblock, tblock);
7319 ins->inst_target_bb = tblock;
7320 if (sp != stack_start) {
7321 handle_stack_args (cfg, stack_start, sp - stack_start);
7323 CHECK_UNVERIFIABLE (cfg);
7325 MONO_ADD_INS (bblock, ins);
7326 start_new_bblock = 1;
7327 inline_costs += BRANCH_COST;
7341 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7343 target = ip + 1 + *(signed char*)ip;
7349 inline_costs += BRANCH_COST;
7353 MONO_INST_NEW (cfg, ins, OP_BR);
7356 target = ip + 4 + (gint32)read32(ip);
7358 GET_BBLOCK (cfg, tblock, target);
7359 link_bblock (cfg, bblock, tblock);
7360 ins->inst_target_bb = tblock;
7361 if (sp != stack_start) {
7362 handle_stack_args (cfg, stack_start, sp - stack_start);
7364 CHECK_UNVERIFIABLE (cfg);
7367 MONO_ADD_INS (bblock, ins);
7369 start_new_bblock = 1;
7370 inline_costs += BRANCH_COST;
7377 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7378 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7379 guint32 opsize = is_short ? 1 : 4;
7381 CHECK_OPSIZE (opsize);
7383 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7386 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7391 GET_BBLOCK (cfg, tblock, target);
7392 link_bblock (cfg, bblock, tblock);
7393 GET_BBLOCK (cfg, tblock, ip);
7394 link_bblock (cfg, bblock, tblock);
7396 if (sp != stack_start) {
7397 handle_stack_args (cfg, stack_start, sp - stack_start);
7398 CHECK_UNVERIFIABLE (cfg);
7401 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7402 cmp->sreg1 = sp [0]->dreg;
7403 type_from_op (cmp, sp [0], NULL);
7406 #if SIZEOF_REGISTER == 4
7407 if (cmp->opcode == OP_LCOMPARE_IMM) {
7408 /* Convert it to OP_LCOMPARE */
7409 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7410 ins->type = STACK_I8;
7411 ins->dreg = alloc_dreg (cfg, STACK_I8);
7413 MONO_ADD_INS (bblock, ins);
7414 cmp->opcode = OP_LCOMPARE;
7415 cmp->sreg2 = ins->dreg;
7418 MONO_ADD_INS (bblock, cmp);
7420 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7421 type_from_op (ins, sp [0], NULL);
7422 MONO_ADD_INS (bblock, ins);
7423 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7424 GET_BBLOCK (cfg, tblock, target);
7425 ins->inst_true_bb = tblock;
7426 GET_BBLOCK (cfg, tblock, ip);
7427 ins->inst_false_bb = tblock;
7428 start_new_bblock = 2;
7431 inline_costs += BRANCH_COST;
7446 MONO_INST_NEW (cfg, ins, *ip);
7448 target = ip + 4 + (gint32)read32(ip);
7454 inline_costs += BRANCH_COST;
7458 MonoBasicBlock **targets;
7459 MonoBasicBlock *default_bblock;
7460 MonoJumpInfoBBTable *table;
7461 int offset_reg = alloc_preg (cfg);
7462 int target_reg = alloc_preg (cfg);
7463 int table_reg = alloc_preg (cfg);
7464 int sum_reg = alloc_preg (cfg);
7465 gboolean use_op_switch;
7469 n = read32 (ip + 1);
7472 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7476 CHECK_OPSIZE (n * sizeof (guint32));
7477 target = ip + n * sizeof (guint32);
7479 GET_BBLOCK (cfg, default_bblock, target);
7480 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7482 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7483 for (i = 0; i < n; ++i) {
7484 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7485 targets [i] = tblock;
7486 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7490 if (sp != stack_start) {
7492 * Link the current bb with the targets as well, so handle_stack_args
7493 * will set their in_stack correctly.
7495 link_bblock (cfg, bblock, default_bblock);
7496 for (i = 0; i < n; ++i)
7497 link_bblock (cfg, bblock, targets [i]);
7499 handle_stack_args (cfg, stack_start, sp - stack_start);
7501 CHECK_UNVERIFIABLE (cfg);
7504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7508 for (i = 0; i < n; ++i)
7509 link_bblock (cfg, bblock, targets [i]);
7511 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7512 table->table = targets;
7513 table->table_size = n;
7515 use_op_switch = FALSE;
7517 /* ARM implements SWITCH statements differently */
7518 /* FIXME: Make it use the generic implementation */
7519 if (!cfg->compile_aot)
7520 use_op_switch = TRUE;
7523 if (COMPILE_LLVM (cfg))
7524 use_op_switch = TRUE;
7526 cfg->cbb->has_jump_table = 1;
7528 if (use_op_switch) {
7529 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7530 ins->sreg1 = src1->dreg;
7531 ins->inst_p0 = table;
7532 ins->inst_many_bb = targets;
7533 ins->klass = GUINT_TO_POINTER (n);
7534 MONO_ADD_INS (cfg->cbb, ins);
7536 if (sizeof (gpointer) == 8)
7537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7541 #if SIZEOF_REGISTER == 8
7542 /* The upper word might not be zero, and we add it to a 64 bit address later */
7543 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7546 if (cfg->compile_aot) {
7547 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7549 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7550 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7551 ins->inst_p0 = table;
7552 ins->dreg = table_reg;
7553 MONO_ADD_INS (cfg->cbb, ins);
7556 /* FIXME: Use load_memindex */
7557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7559 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7561 start_new_bblock = 1;
7562 inline_costs += (BRANCH_COST * 2);
7582 dreg = alloc_freg (cfg);
7585 dreg = alloc_lreg (cfg);
7588 dreg = alloc_ireg_ref (cfg);
7591 dreg = alloc_preg (cfg);
7594 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7595 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7596 ins->flags |= ins_flag;
7598 MONO_ADD_INS (bblock, ins);
7613 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7614 ins->flags |= ins_flag;
7616 MONO_ADD_INS (bblock, ins);
7618 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7619 emit_write_barrier (cfg, sp [0], sp [1], -1);
7628 MONO_INST_NEW (cfg, ins, (*ip));
7630 ins->sreg1 = sp [0]->dreg;
7631 ins->sreg2 = sp [1]->dreg;
7632 type_from_op (ins, sp [0], sp [1]);
7634 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7636 /* Use the immediate opcodes if possible */
7637 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7638 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7639 if (imm_opcode != -1) {
7640 ins->opcode = imm_opcode;
7641 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7644 sp [1]->opcode = OP_NOP;
7648 MONO_ADD_INS ((cfg)->cbb, (ins));
7650 *sp++ = mono_decompose_opcode (cfg, ins);
7667 MONO_INST_NEW (cfg, ins, (*ip));
7669 ins->sreg1 = sp [0]->dreg;
7670 ins->sreg2 = sp [1]->dreg;
7671 type_from_op (ins, sp [0], sp [1]);
7673 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7674 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7676 /* FIXME: Pass opcode to is_inst_imm */
7678 /* Use the immediate opcodes if possible */
7679 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7682 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7683 if (imm_opcode != -1) {
7684 ins->opcode = imm_opcode;
7685 if (sp [1]->opcode == OP_I8CONST) {
7686 #if SIZEOF_REGISTER == 8
7687 ins->inst_imm = sp [1]->inst_l;
7689 ins->inst_ls_word = sp [1]->inst_ls_word;
7690 ins->inst_ms_word = sp [1]->inst_ms_word;
7694 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7697 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7698 if (sp [1]->next == NULL)
7699 sp [1]->opcode = OP_NOP;
7702 MONO_ADD_INS ((cfg)->cbb, (ins));
7704 *sp++ = mono_decompose_opcode (cfg, ins);
7717 case CEE_CONV_OVF_I8:
7718 case CEE_CONV_OVF_U8:
7722 /* Special case this earlier so we have long constants in the IR */
7723 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7724 int data = sp [-1]->inst_c0;
7725 sp [-1]->opcode = OP_I8CONST;
7726 sp [-1]->type = STACK_I8;
7727 #if SIZEOF_REGISTER == 8
7728 if ((*ip) == CEE_CONV_U8)
7729 sp [-1]->inst_c0 = (guint32)data;
7731 sp [-1]->inst_c0 = data;
7733 sp [-1]->inst_ls_word = data;
7734 if ((*ip) == CEE_CONV_U8)
7735 sp [-1]->inst_ms_word = 0;
7737 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7739 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7746 case CEE_CONV_OVF_I4:
7747 case CEE_CONV_OVF_I1:
7748 case CEE_CONV_OVF_I2:
7749 case CEE_CONV_OVF_I:
7750 case CEE_CONV_OVF_U:
7753 if (sp [-1]->type == STACK_R8) {
7754 ADD_UNOP (CEE_CONV_OVF_I8);
7761 case CEE_CONV_OVF_U1:
7762 case CEE_CONV_OVF_U2:
7763 case CEE_CONV_OVF_U4:
7766 if (sp [-1]->type == STACK_R8) {
7767 ADD_UNOP (CEE_CONV_OVF_U8);
7774 case CEE_CONV_OVF_I1_UN:
7775 case CEE_CONV_OVF_I2_UN:
7776 case CEE_CONV_OVF_I4_UN:
7777 case CEE_CONV_OVF_I8_UN:
7778 case CEE_CONV_OVF_U1_UN:
7779 case CEE_CONV_OVF_U2_UN:
7780 case CEE_CONV_OVF_U4_UN:
7781 case CEE_CONV_OVF_U8_UN:
7782 case CEE_CONV_OVF_I_UN:
7783 case CEE_CONV_OVF_U_UN:
7790 CHECK_CFG_EXCEPTION;
7794 case CEE_ADD_OVF_UN:
7796 case CEE_MUL_OVF_UN:
7798 case CEE_SUB_OVF_UN:
7806 token = read32 (ip + 1);
7807 klass = mini_get_class (method, token, generic_context);
7808 CHECK_TYPELOAD (klass);
7810 if (generic_class_is_reference_type (cfg, klass)) {
7811 MonoInst *store, *load;
7812 int dreg = alloc_ireg_ref (cfg);
7814 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7815 load->flags |= ins_flag;
7816 MONO_ADD_INS (cfg->cbb, load);
7818 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7819 store->flags |= ins_flag;
7820 MONO_ADD_INS (cfg->cbb, store);
7822 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7823 emit_write_barrier (cfg, sp [0], sp [1], -1);
7825 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7837 token = read32 (ip + 1);
7838 klass = mini_get_class (method, token, generic_context);
7839 CHECK_TYPELOAD (klass);
7841 /* Optimize the common ldobj+stloc combination */
7851 loc_index = ip [5] - CEE_STLOC_0;
7858 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7859 CHECK_LOCAL (loc_index);
7861 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7862 ins->dreg = cfg->locals [loc_index]->dreg;
7868 /* Optimize the ldobj+stobj combination */
7869 /* The reference case ends up being a load+store anyway */
7870 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7875 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7882 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7891 CHECK_STACK_OVF (1);
7893 n = read32 (ip + 1);
7895 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7896 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7897 ins->type = STACK_OBJ;
7900 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7901 MonoInst *iargs [1];
7903 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7904 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7906 if (cfg->opt & MONO_OPT_SHARED) {
7907 MonoInst *iargs [3];
7909 if (cfg->compile_aot) {
7910 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7912 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7913 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7914 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7915 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7916 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7918 if (bblock->out_of_line) {
7919 MonoInst *iargs [2];
7921 if (image == mono_defaults.corlib) {
7923 * Avoid relocations in AOT and save some space by using a
7924 * version of helper_ldstr specialized to mscorlib.
7926 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7927 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7929 /* Avoid creating the string object */
7930 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7931 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7932 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7936 if (cfg->compile_aot) {
7937 NEW_LDSTRCONST (cfg, ins, image, n);
7939 MONO_ADD_INS (bblock, ins);
7942 NEW_PCONST (cfg, ins, NULL);
7943 ins->type = STACK_OBJ;
7944 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7946 OUT_OF_MEMORY_FAILURE;
7949 MONO_ADD_INS (bblock, ins);
7958 MonoInst *iargs [2];
7959 MonoMethodSignature *fsig;
7962 MonoInst *vtable_arg = NULL;
7965 token = read32 (ip + 1);
7966 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7967 if (!cmethod || mono_loader_get_last_error ())
7969 fsig = mono_method_get_signature (cmethod, image, token);
7973 mono_save_token_info (cfg, image, token, cmethod);
7975 if (!mono_class_init (cmethod->klass))
7978 if (cfg->generic_sharing_context)
7979 context_used = mono_method_check_context_used (cmethod);
7981 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7982 if (check_linkdemand (cfg, method, cmethod))
7984 CHECK_CFG_EXCEPTION;
7985 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7986 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7989 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7990 emit_generic_class_init (cfg, cmethod->klass);
7991 CHECK_TYPELOAD (cmethod->klass);
7994 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7995 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7996 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7997 mono_class_vtable (cfg->domain, cmethod->klass);
7998 CHECK_TYPELOAD (cmethod->klass);
8000 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8001 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8004 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8005 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8007 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8009 CHECK_TYPELOAD (cmethod->klass);
8010 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8015 n = fsig->param_count;
8019 * Generate smaller code for the common newobj <exception> instruction in
8020 * argument checking code.
8022 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8023 is_exception_class (cmethod->klass) && n <= 2 &&
8024 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8025 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8026 MonoInst *iargs [3];
8028 g_assert (!vtable_arg);
8032 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8035 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8039 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8044 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8047 g_assert_not_reached ();
8055 /* move the args to allow room for 'this' in the first position */
8061 /* check_call_signature () requires sp[0] to be set */
8062 this_ins.type = STACK_OBJ;
8064 if (check_call_signature (cfg, fsig, sp))
8069 if (mini_class_is_system_array (cmethod->klass)) {
8070 g_assert (!vtable_arg);
8072 *sp = emit_get_rgctx_method (cfg, context_used,
8073 cmethod, MONO_RGCTX_INFO_METHOD);
8075 /* Avoid varargs in the common case */
8076 if (fsig->param_count == 1)
8077 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8078 else if (fsig->param_count == 2)
8079 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8080 else if (fsig->param_count == 3)
8081 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8083 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8084 } else if (cmethod->string_ctor) {
8085 g_assert (!context_used);
8086 g_assert (!vtable_arg);
8087 /* we simply pass a null pointer */
8088 EMIT_NEW_PCONST (cfg, *sp, NULL);
8089 /* now call the string ctor */
8090 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8092 MonoInst* callvirt_this_arg = NULL;
8094 if (cmethod->klass->valuetype) {
8095 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8096 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8097 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8102 * The code generated by mini_emit_virtual_call () expects
8103 * iargs [0] to be a boxed instance, but luckily the vcall
8104 * will be transformed into a normal call there.
8106 } else if (context_used) {
8107 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8110 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8112 CHECK_TYPELOAD (cmethod->klass);
8115 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8116 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8117 * As a workaround, we call class cctors before allocating objects.
8119 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8120 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8121 if (cfg->verbose_level > 2)
8122 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8123 class_inits = g_slist_prepend (class_inits, vtable);
8126 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8129 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8132 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8134 /* Now call the actual ctor */
8135 /* Avoid virtual calls to ctors if possible */
8136 if (cmethod->klass->marshalbyref)
8137 callvirt_this_arg = sp [0];
8140 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8141 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8142 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8147 CHECK_CFG_EXCEPTION;
8148 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8149 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8150 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8151 !g_list_find (dont_inline, cmethod)) {
8154 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8155 cfg->real_offset += 5;
8158 inline_costs += costs - 5;
8161 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8163 } else if (context_used &&
8164 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8165 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8166 MonoInst *cmethod_addr;
8168 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8169 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8171 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8174 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8175 callvirt_this_arg, NULL, vtable_arg);
8179 if (alloc == NULL) {
8181 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8182 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8196 token = read32 (ip + 1);
8197 klass = mini_get_class (method, token, generic_context);
8198 CHECK_TYPELOAD (klass);
8199 if (sp [0]->type != STACK_OBJ)
8202 if (cfg->generic_sharing_context)
8203 context_used = mono_class_check_context_used (klass);
8205 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8206 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8213 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8216 /*FIXME AOT support*/
8217 if (cfg->compile_aot)
8218 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8220 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8222 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8223 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8226 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8227 MonoMethod *mono_castclass;
8228 MonoInst *iargs [1];
8231 mono_castclass = mono_marshal_get_castclass (klass);
8234 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8235 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8236 CHECK_CFG_EXCEPTION;
8237 g_assert (costs > 0);
8240 cfg->real_offset += 5;
8245 inline_costs += costs;
8248 ins = handle_castclass (cfg, klass, *sp, context_used);
8249 CHECK_CFG_EXCEPTION;
8259 token = read32 (ip + 1);
8260 klass = mini_get_class (method, token, generic_context);
8261 CHECK_TYPELOAD (klass);
8262 if (sp [0]->type != STACK_OBJ)
8265 if (cfg->generic_sharing_context)
8266 context_used = mono_class_check_context_used (klass);
8268 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8269 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8276 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8279 /*FIXME AOT support*/
8280 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8282 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8285 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8286 MonoMethod *mono_isinst;
8287 MonoInst *iargs [1];
8290 mono_isinst = mono_marshal_get_isinst (klass);
8293 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8294 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8295 CHECK_CFG_EXCEPTION;
8296 g_assert (costs > 0);
8299 cfg->real_offset += 5;
8304 inline_costs += costs;
8307 ins = handle_isinst (cfg, klass, *sp, context_used);
8308 CHECK_CFG_EXCEPTION;
8315 case CEE_UNBOX_ANY: {
8319 token = read32 (ip + 1);
8320 klass = mini_get_class (method, token, generic_context);
8321 CHECK_TYPELOAD (klass);
8323 mono_save_token_info (cfg, image, token, klass);
8325 if (cfg->generic_sharing_context)
8326 context_used = mono_class_check_context_used (klass);
8328 if (generic_class_is_reference_type (cfg, klass)) {
8329 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8330 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8331 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8338 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8341 /*FIXME AOT support*/
8342 if (cfg->compile_aot)
8343 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8345 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8347 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8348 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8351 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8352 MonoMethod *mono_castclass;
8353 MonoInst *iargs [1];
8356 mono_castclass = mono_marshal_get_castclass (klass);
8359 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8360 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8361 CHECK_CFG_EXCEPTION;
8362 g_assert (costs > 0);
8365 cfg->real_offset += 5;
8369 inline_costs += costs;
8371 ins = handle_castclass (cfg, klass, *sp, context_used);
8372 CHECK_CFG_EXCEPTION;
8380 if (mono_class_is_nullable (klass)) {
8381 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8388 ins = handle_unbox (cfg, klass, sp, context_used);
8394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8407 token = read32 (ip + 1);
8408 klass = mini_get_class (method, token, generic_context);
8409 CHECK_TYPELOAD (klass);
8411 mono_save_token_info (cfg, image, token, klass);
8413 if (cfg->generic_sharing_context)
8414 context_used = mono_class_check_context_used (klass);
8416 if (generic_class_is_reference_type (cfg, klass)) {
8422 if (klass == mono_defaults.void_class)
8424 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8426 /* frequent check in generic code: box (struct), brtrue */
8428 // FIXME: LLVM can't handle the inconsistent bb linking
8429 if (!mono_class_is_nullable (klass) &&
8430 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8431 (ip [5] == CEE_BRTRUE ||
8432 ip [5] == CEE_BRTRUE_S ||
8433 ip [5] == CEE_BRFALSE ||
8434 ip [5] == CEE_BRFALSE_S)) {
8435 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8437 MonoBasicBlock *true_bb, *false_bb;
8441 if (cfg->verbose_level > 3) {
8442 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8443 printf ("<box+brtrue opt>\n");
8451 target = ip + 1 + (signed char)(*ip);
8458 target = ip + 4 + (gint)(read32 (ip));
8462 g_assert_not_reached ();
8466 * We need to link both bblocks, since it is needed for handling stack
8467 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8468 * Branching to only one of them would lead to inconsistencies, so
8469 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8471 GET_BBLOCK (cfg, true_bb, target);
8472 GET_BBLOCK (cfg, false_bb, ip);
8474 mono_link_bblock (cfg, cfg->cbb, true_bb);
8475 mono_link_bblock (cfg, cfg->cbb, false_bb);
8477 if (sp != stack_start) {
8478 handle_stack_args (cfg, stack_start, sp - stack_start);
8480 CHECK_UNVERIFIABLE (cfg);
8483 if (COMPILE_LLVM (cfg)) {
8484 dreg = alloc_ireg (cfg);
8485 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8488 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8490 /* The JIT can't eliminate the iconst+compare */
8491 MONO_INST_NEW (cfg, ins, OP_BR);
8492 ins->inst_target_bb = is_true ? true_bb : false_bb;
8493 MONO_ADD_INS (cfg->cbb, ins);
8496 start_new_bblock = 1;
8500 *sp++ = handle_box (cfg, val, klass, context_used);
8502 CHECK_CFG_EXCEPTION;
8511 token = read32 (ip + 1);
8512 klass = mini_get_class (method, token, generic_context);
8513 CHECK_TYPELOAD (klass);
8515 mono_save_token_info (cfg, image, token, klass);
8517 if (cfg->generic_sharing_context)
8518 context_used = mono_class_check_context_used (klass);
8520 if (mono_class_is_nullable (klass)) {
8523 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8524 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8528 ins = handle_unbox (cfg, klass, sp, context_used);
8538 MonoClassField *field;
8542 if (*ip == CEE_STFLD) {
8549 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8551 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8554 token = read32 (ip + 1);
8555 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8556 field = mono_method_get_wrapper_data (method, token);
8557 klass = field->parent;
8560 field = mono_field_from_token (image, token, &klass, generic_context);
8564 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8565 FIELD_ACCESS_FAILURE;
8566 mono_class_init (klass);
8568 if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
8570 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8571 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8572 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8573 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8576 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8577 if (*ip == CEE_STFLD) {
8578 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8580 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8581 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8582 MonoInst *iargs [5];
8585 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8586 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8587 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8591 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8592 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8593 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8594 CHECK_CFG_EXCEPTION;
8595 g_assert (costs > 0);
8597 cfg->real_offset += 5;
8600 inline_costs += costs;
8602 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8607 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8609 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8610 if (sp [0]->opcode != OP_LDADDR)
8611 store->flags |= MONO_INST_FAULT;
8613 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8614 /* insert call to write barrier */
8618 dreg = alloc_ireg_mp (cfg);
8619 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8620 emit_write_barrier (cfg, ptr, sp [1], -1);
8623 store->flags |= ins_flag;
8630 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8631 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8632 MonoInst *iargs [4];
8635 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8636 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8637 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8638 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8639 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8640 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8641 CHECK_CFG_EXCEPTION;
8643 g_assert (costs > 0);
8645 cfg->real_offset += 5;
8649 inline_costs += costs;
8651 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8655 if (sp [0]->type == STACK_VTYPE) {
8658 /* Have to compute the address of the variable */
8660 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8662 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8664 g_assert (var->klass == klass);
8666 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8670 if (*ip == CEE_LDFLDA) {
8671 if (is_magic_tls_access (field)) {
8673 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8675 if (sp [0]->type == STACK_OBJ) {
8676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8677 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8680 dreg = alloc_ireg_mp (cfg);
8682 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8683 ins->klass = mono_class_from_mono_type (field->type);
8684 ins->type = STACK_MP;
8690 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8692 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8693 load->flags |= ins_flag;
8694 if (sp [0]->opcode != OP_LDADDR)
8695 load->flags |= MONO_INST_FAULT;
8706 MonoClassField *field;
8707 gpointer addr = NULL;
8708 gboolean is_special_static;
8712 token = read32 (ip + 1);
8714 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8715 field = mono_method_get_wrapper_data (method, token);
8716 klass = field->parent;
8719 field = mono_field_from_token (image, token, &klass, generic_context);
8722 mono_class_init (klass);
8723 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8724 FIELD_ACCESS_FAILURE;
8726 /* if the class is Critical then transparent code cannot access it's fields */
8727 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8728 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8731 * We can only support shared generic static
8732 * field access on architectures where the
8733 * trampoline code has been extended to handle
8734 * the generic class init.
8736 #ifndef MONO_ARCH_VTABLE_REG
8737 GENERIC_SHARING_FAILURE (*ip);
8740 if (cfg->generic_sharing_context)
8741 context_used = mono_class_check_context_used (klass);
8743 ftype = mono_field_get_type (field);
8745 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8747 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8748 * to be called here.
8750 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8751 mono_class_vtable (cfg->domain, klass);
8752 CHECK_TYPELOAD (klass);
8754 mono_domain_lock (cfg->domain);
8755 if (cfg->domain->special_static_fields)
8756 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8757 mono_domain_unlock (cfg->domain);
8759 is_special_static = mono_class_field_is_special_static (field);
8761 /* Generate IR to compute the field address */
8762 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8764 * Fast access to TLS data
8765 * Inline version of get_thread_static_data () in
8769 int idx, static_data_reg, array_reg, dreg;
8770 MonoInst *thread_ins;
8772 // offset &= 0x7fffffff;
8773 // idx = (offset >> 24) - 1;
8774 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8776 thread_ins = mono_get_thread_intrinsic (cfg);
8777 MONO_ADD_INS (cfg->cbb, thread_ins);
8778 static_data_reg = alloc_ireg (cfg);
8779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8781 if (cfg->compile_aot) {
8782 int offset_reg, offset2_reg, idx_reg;
8784 /* For TLS variables, this will return the TLS offset */
8785 EMIT_NEW_SFLDACONST (cfg, ins, field);
8786 offset_reg = ins->dreg;
8787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8788 idx_reg = alloc_ireg (cfg);
8789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8792 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8793 array_reg = alloc_ireg (cfg);
8794 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8795 offset2_reg = alloc_ireg (cfg);
8796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8797 dreg = alloc_ireg (cfg);
8798 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8800 offset = (gsize)addr & 0x7fffffff;
8801 idx = (offset >> 24) - 1;
8803 array_reg = alloc_ireg (cfg);
8804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8805 dreg = alloc_ireg (cfg);
8806 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8808 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8809 (cfg->compile_aot && is_special_static) ||
8810 (context_used && is_special_static)) {
8811 MonoInst *iargs [2];
8813 g_assert (field->parent);
8814 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8816 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8817 field, MONO_RGCTX_INFO_CLASS_FIELD);
8819 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8821 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8822 } else if (context_used) {
8823 MonoInst *static_data;
8826 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8827 method->klass->name_space, method->klass->name, method->name,
8828 depth, field->offset);
8831 if (mono_class_needs_cctor_run (klass, method))
8832 emit_generic_class_init (cfg, klass);
8835 * The pointer we're computing here is
8837 * super_info.static_data + field->offset
8839 static_data = emit_get_rgctx_klass (cfg, context_used,
8840 klass, MONO_RGCTX_INFO_STATIC_DATA);
8842 if (field->offset == 0) {
8845 int addr_reg = mono_alloc_preg (cfg);
8846 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8848 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8849 MonoInst *iargs [2];
8851 g_assert (field->parent);
8852 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8853 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8854 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8856 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8858 CHECK_TYPELOAD (klass);
8860 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8861 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8862 if (cfg->verbose_level > 2)
8863 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8864 class_inits = g_slist_prepend (class_inits, vtable);
8866 if (cfg->run_cctors) {
8868 /* This makes so that inline cannot trigger */
8869 /* .cctors: too many apps depend on them */
8870 /* running with a specific order... */
8871 if (! vtable->initialized)
8873 ex = mono_runtime_class_init_full (vtable, FALSE);
8875 set_exception_object (cfg, ex);
8876 goto exception_exit;
8880 addr = (char*)vtable->data + field->offset;
8882 if (cfg->compile_aot)
8883 EMIT_NEW_SFLDACONST (cfg, ins, field);
8885 EMIT_NEW_PCONST (cfg, ins, addr);
8887 MonoInst *iargs [1];
8888 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8889 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8893 /* Generate IR to do the actual load/store operation */
8895 if (*ip == CEE_LDSFLDA) {
8896 ins->klass = mono_class_from_mono_type (ftype);
8897 ins->type = STACK_PTR;
8899 } else if (*ip == CEE_STSFLD) {
8904 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8905 store->flags |= ins_flag;
8907 gboolean is_const = FALSE;
8908 MonoVTable *vtable = NULL;
8910 if (!context_used) {
8911 vtable = mono_class_vtable (cfg->domain, klass);
8912 CHECK_TYPELOAD (klass);
8914 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8915 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8916 gpointer addr = (char*)vtable->data + field->offset;
8917 int ro_type = ftype->type;
8918 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8919 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8921 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8924 case MONO_TYPE_BOOLEAN:
8926 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8930 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8933 case MONO_TYPE_CHAR:
8935 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8939 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8944 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8948 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8954 case MONO_TYPE_FNPTR:
8955 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8956 type_to_eval_stack_type ((cfg), field->type, *sp);
8959 case MONO_TYPE_STRING:
8960 case MONO_TYPE_OBJECT:
8961 case MONO_TYPE_CLASS:
8962 case MONO_TYPE_SZARRAY:
8963 case MONO_TYPE_ARRAY:
8964 if (!mono_gc_is_moving ()) {
8965 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8966 type_to_eval_stack_type ((cfg), field->type, *sp);
8974 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8979 case MONO_TYPE_VALUETYPE:
8989 CHECK_STACK_OVF (1);
8991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8992 load->flags |= ins_flag;
9005 token = read32 (ip + 1);
9006 klass = mini_get_class (method, token, generic_context);
9007 CHECK_TYPELOAD (klass);
9008 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9009 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9010 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9011 generic_class_is_reference_type (cfg, klass)) {
9012 /* insert call to write barrier */
9013 emit_write_barrier (cfg, sp [0], sp [1], -1);
9025 const char *data_ptr;
9027 guint32 field_token;
9033 token = read32 (ip + 1);
9035 klass = mini_get_class (method, token, generic_context);
9036 CHECK_TYPELOAD (klass);
9038 if (cfg->generic_sharing_context)
9039 context_used = mono_class_check_context_used (klass);
9041 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9042 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9043 ins->sreg1 = sp [0]->dreg;
9044 ins->type = STACK_I4;
9045 ins->dreg = alloc_ireg (cfg);
9046 MONO_ADD_INS (cfg->cbb, ins);
9047 *sp = mono_decompose_opcode (cfg, ins);
9052 MonoClass *array_class = mono_array_class_get (klass, 1);
9053 /* FIXME: we cannot get a managed
9054 allocator because we can't get the
9055 open generic class's vtable. We
9056 have the same problem in
9057 handle_alloc(). This
9058 needs to be solved so that we can
9059 have managed allocs of shared
9062 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9063 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9065 MonoMethod *managed_alloc = NULL;
9067 /* FIXME: Decompose later to help abcrem */
9070 args [0] = emit_get_rgctx_klass (cfg, context_used,
9071 array_class, MONO_RGCTX_INFO_VTABLE);
9076 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9078 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9080 if (cfg->opt & MONO_OPT_SHARED) {
9081 /* Decompose now to avoid problems with references to the domainvar */
9082 MonoInst *iargs [3];
9084 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9085 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9088 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9090 /* Decompose later since it is needed by abcrem */
9091 MonoClass *array_type = mono_array_class_get (klass, 1);
9092 mono_class_vtable (cfg->domain, array_type);
9093 CHECK_TYPELOAD (array_type);
9095 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9096 ins->dreg = alloc_ireg_ref (cfg);
9097 ins->sreg1 = sp [0]->dreg;
9098 ins->inst_newa_class = klass;
9099 ins->type = STACK_OBJ;
9101 MONO_ADD_INS (cfg->cbb, ins);
9102 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9103 cfg->cbb->has_array_access = TRUE;
9105 /* Needed so mono_emit_load_get_addr () gets called */
9106 mono_get_got_var (cfg);
9116 * we inline/optimize the initialization sequence if possible.
9117 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9118 * for small sizes open code the memcpy
9119 * ensure the rva field is big enough
9121 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9122 MonoMethod *memcpy_method = get_memcpy_method ();
9123 MonoInst *iargs [3];
9124 int add_reg = alloc_ireg_mp (cfg);
9126 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9127 if (cfg->compile_aot) {
9128 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9130 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9132 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9133 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9142 if (sp [0]->type != STACK_OBJ)
9145 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9146 ins->dreg = alloc_preg (cfg);
9147 ins->sreg1 = sp [0]->dreg;
9148 ins->type = STACK_I4;
9149 /* This flag will be inherited by the decomposition */
9150 ins->flags |= MONO_INST_FAULT;
9151 MONO_ADD_INS (cfg->cbb, ins);
9152 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9153 cfg->cbb->has_array_access = TRUE;
9161 if (sp [0]->type != STACK_OBJ)
9164 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9166 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9167 CHECK_TYPELOAD (klass);
9168 /* we need to make sure that this array is exactly the type it needs
9169 * to be for correctness. the wrappers are lax with their usage
9170 * so we need to ignore them here
9172 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9173 MonoClass *array_class = mono_array_class_get (klass, 1);
9174 mini_emit_check_array_type (cfg, sp [0], array_class);
9175 CHECK_TYPELOAD (array_class);
9179 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9194 case CEE_LDELEM_REF: {
9200 if (*ip == CEE_LDELEM) {
9202 token = read32 (ip + 1);
9203 klass = mini_get_class (method, token, generic_context);
9204 CHECK_TYPELOAD (klass);
9205 mono_class_init (klass);
9208 klass = array_access_to_klass (*ip);
9210 if (sp [0]->type != STACK_OBJ)
9213 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9215 if (sp [1]->opcode == OP_ICONST) {
9216 int array_reg = sp [0]->dreg;
9217 int index_reg = sp [1]->dreg;
9218 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9220 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9221 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9223 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9224 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9227 if (*ip == CEE_LDELEM)
9240 case CEE_STELEM_REF:
9247 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9249 if (*ip == CEE_STELEM) {
9251 token = read32 (ip + 1);
9252 klass = mini_get_class (method, token, generic_context);
9253 CHECK_TYPELOAD (klass);
9254 mono_class_init (klass);
9257 klass = array_access_to_klass (*ip);
9259 if (sp [0]->type != STACK_OBJ)
9262 /* storing a NULL doesn't need any of the complex checks in stelemref */
9263 if (generic_class_is_reference_type (cfg, klass) &&
9264 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9265 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9266 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9267 MonoInst *iargs [3];
9270 mono_class_setup_vtable (obj_array);
9271 g_assert (helper->slot);
9273 if (sp [0]->type != STACK_OBJ)
9275 if (sp [2]->type != STACK_OBJ)
9282 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9284 if (sp [1]->opcode == OP_ICONST) {
9285 int array_reg = sp [0]->dreg;
9286 int index_reg = sp [1]->dreg;
9287 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9289 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9290 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9292 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9293 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9297 if (*ip == CEE_STELEM)
9304 case CEE_CKFINITE: {
9308 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9309 ins->sreg1 = sp [0]->dreg;
9310 ins->dreg = alloc_freg (cfg);
9311 ins->type = STACK_R8;
9312 MONO_ADD_INS (bblock, ins);
9314 *sp++ = mono_decompose_opcode (cfg, ins);
9319 case CEE_REFANYVAL: {
9320 MonoInst *src_var, *src;
9322 int klass_reg = alloc_preg (cfg);
9323 int dreg = alloc_preg (cfg);
9326 MONO_INST_NEW (cfg, ins, *ip);
9329 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9330 CHECK_TYPELOAD (klass);
9331 mono_class_init (klass);
9333 if (cfg->generic_sharing_context)
9334 context_used = mono_class_check_context_used (klass);
9337 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9339 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9340 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9344 MonoInst *klass_ins;
9346 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9347 klass, MONO_RGCTX_INFO_KLASS);
9350 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9351 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9353 mini_emit_class_check (cfg, klass_reg, klass);
9355 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9356 ins->type = STACK_MP;
9361 case CEE_MKREFANY: {
9362 MonoInst *loc, *addr;
9365 MONO_INST_NEW (cfg, ins, *ip);
9368 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9369 CHECK_TYPELOAD (klass);
9370 mono_class_init (klass);
9372 if (cfg->generic_sharing_context)
9373 context_used = mono_class_check_context_used (klass);
9375 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9376 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9379 MonoInst *const_ins;
9380 int type_reg = alloc_preg (cfg);
9382 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9385 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9386 } else if (cfg->compile_aot) {
9387 int const_reg = alloc_preg (cfg);
9388 int type_reg = alloc_preg (cfg);
9390 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9395 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9396 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9400 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9401 ins->type = STACK_VTYPE;
9402 ins->klass = mono_defaults.typed_reference_class;
9409 MonoClass *handle_class;
9411 CHECK_STACK_OVF (1);
9414 n = read32 (ip + 1);
9416 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9417 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9418 handle = mono_method_get_wrapper_data (method, n);
9419 handle_class = mono_method_get_wrapper_data (method, n + 1);
9420 if (handle_class == mono_defaults.typehandle_class)
9421 handle = &((MonoClass*)handle)->byval_arg;
9424 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9428 mono_class_init (handle_class);
9429 if (cfg->generic_sharing_context) {
9430 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9431 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9432 /* This case handles ldtoken
9433 of an open type, like for
9436 } else if (handle_class == mono_defaults.typehandle_class) {
9437 /* If we get a MONO_TYPE_CLASS
9438 then we need to provide the
9440 instantiation of it. */
9441 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9444 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9445 } else if (handle_class == mono_defaults.fieldhandle_class)
9446 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9447 else if (handle_class == mono_defaults.methodhandle_class)
9448 context_used = mono_method_check_context_used (handle);
9450 g_assert_not_reached ();
9453 if ((cfg->opt & MONO_OPT_SHARED) &&
9454 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9455 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9456 MonoInst *addr, *vtvar, *iargs [3];
9457 int method_context_used;
9459 if (cfg->generic_sharing_context)
9460 method_context_used = mono_method_check_context_used (method);
9462 method_context_used = 0;
9464 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9466 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9467 EMIT_NEW_ICONST (cfg, iargs [1], n);
9468 if (method_context_used) {
9469 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9470 method, MONO_RGCTX_INFO_METHOD);
9471 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9473 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9474 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9476 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9478 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9480 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9482 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9483 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9484 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9485 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9486 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9487 MonoClass *tclass = mono_class_from_mono_type (handle);
9489 mono_class_init (tclass);
9491 ins = emit_get_rgctx_klass (cfg, context_used,
9492 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9493 } else if (cfg->compile_aot) {
9494 if (method->wrapper_type) {
9495 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9496 /* Special case for static synchronized wrappers */
9497 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9499 /* FIXME: n is not a normal token */
9500 cfg->disable_aot = TRUE;
9501 EMIT_NEW_PCONST (cfg, ins, NULL);
9504 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9507 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9509 ins->type = STACK_OBJ;
9510 ins->klass = cmethod->klass;
9513 MonoInst *addr, *vtvar;
9515 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9518 if (handle_class == mono_defaults.typehandle_class) {
9519 ins = emit_get_rgctx_klass (cfg, context_used,
9520 mono_class_from_mono_type (handle),
9521 MONO_RGCTX_INFO_TYPE);
9522 } else if (handle_class == mono_defaults.methodhandle_class) {
9523 ins = emit_get_rgctx_method (cfg, context_used,
9524 handle, MONO_RGCTX_INFO_METHOD);
9525 } else if (handle_class == mono_defaults.fieldhandle_class) {
9526 ins = emit_get_rgctx_field (cfg, context_used,
9527 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9529 g_assert_not_reached ();
9531 } else if (cfg->compile_aot) {
9532 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9534 EMIT_NEW_PCONST (cfg, ins, handle);
9536 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9538 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9548 MONO_INST_NEW (cfg, ins, OP_THROW);
9550 ins->sreg1 = sp [0]->dreg;
9552 bblock->out_of_line = TRUE;
9553 MONO_ADD_INS (bblock, ins);
9554 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9555 MONO_ADD_INS (bblock, ins);
9558 link_bblock (cfg, bblock, end_bblock);
9559 start_new_bblock = 1;
9561 case CEE_ENDFINALLY:
9562 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9563 MONO_ADD_INS (bblock, ins);
9565 start_new_bblock = 1;
9568 * Control will leave the method so empty the stack, otherwise
9569 * the next basic block will start with a nonempty stack.
9571 while (sp != stack_start) {
9579 if (*ip == CEE_LEAVE) {
9581 target = ip + 5 + (gint32)read32(ip + 1);
9584 target = ip + 2 + (signed char)(ip [1]);
9587 /* empty the stack */
9588 while (sp != stack_start) {
9593 * If this leave statement is in a catch block, check for a
9594 * pending exception, and rethrow it if necessary.
9595 * We avoid doing this in runtime invoke wrappers, since those are called
9596 * by native code which excepts the wrapper to catch all exceptions.
9598 for (i = 0; i < header->num_clauses; ++i) {
9599 MonoExceptionClause *clause = &header->clauses [i];
9602 * Use <= in the final comparison to handle clauses with multiple
9603 * leave statements, like in bug #78024.
9604 * The ordering of the exception clauses guarantees that we find the
9607 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9609 MonoBasicBlock *dont_throw;
9614 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9617 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9619 NEW_BBLOCK (cfg, dont_throw);
9622 * Currently, we always rethrow the abort exception, despite the
9623 * fact that this is not correct. See thread6.cs for an example.
9624 * But propagating the abort exception is more important than
9625 * getting the sematics right.
9627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9629 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9631 MONO_START_BB (cfg, dont_throw);
9636 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9638 MonoExceptionClause *clause;
9640 for (tmp = handlers; tmp; tmp = tmp->next) {
9642 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9644 link_bblock (cfg, bblock, tblock);
9645 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9646 ins->inst_target_bb = tblock;
9647 ins->inst_eh_block = clause;
9648 MONO_ADD_INS (bblock, ins);
9649 bblock->has_call_handler = 1;
9650 if (COMPILE_LLVM (cfg)) {
9651 MonoBasicBlock *target_bb;
9654 * Link the finally bblock with the target, since it will
9655 * conceptually branch there.
9656 * FIXME: Have to link the bblock containing the endfinally.
9658 GET_BBLOCK (cfg, target_bb, target);
9659 link_bblock (cfg, tblock, target_bb);
9662 g_list_free (handlers);
9665 MONO_INST_NEW (cfg, ins, OP_BR);
9666 MONO_ADD_INS (bblock, ins);
9667 GET_BBLOCK (cfg, tblock, target);
9668 link_bblock (cfg, bblock, tblock);
9669 ins->inst_target_bb = tblock;
9670 start_new_bblock = 1;
9672 if (*ip == CEE_LEAVE)
9681 * Mono specific opcodes
9683 case MONO_CUSTOM_PREFIX: {
9685 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9689 case CEE_MONO_ICALL: {
9691 MonoJitICallInfo *info;
9693 token = read32 (ip + 2);
9694 func = mono_method_get_wrapper_data (method, token);
9695 info = mono_find_jit_icall_by_addr (func);
9698 CHECK_STACK (info->sig->param_count);
9699 sp -= info->sig->param_count;
9701 ins = mono_emit_jit_icall (cfg, info->func, sp);
9702 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9706 inline_costs += 10 * num_calls++;
9710 case CEE_MONO_LDPTR: {
9713 CHECK_STACK_OVF (1);
9715 token = read32 (ip + 2);
9717 ptr = mono_method_get_wrapper_data (method, token);
9718 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9719 MonoJitICallInfo *callinfo;
9720 const char *icall_name;
9722 icall_name = method->name + strlen ("__icall_wrapper_");
9723 g_assert (icall_name);
9724 callinfo = mono_find_jit_icall_by_name (icall_name);
9725 g_assert (callinfo);
9727 if (ptr == callinfo->func) {
9728 /* Will be transformed into an AOTCONST later */
9729 EMIT_NEW_PCONST (cfg, ins, ptr);
9735 /* FIXME: Generalize this */
9736 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9737 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9742 EMIT_NEW_PCONST (cfg, ins, ptr);
9745 inline_costs += 10 * num_calls++;
9746 /* Can't embed random pointers into AOT code */
9747 cfg->disable_aot = 1;
9750 case CEE_MONO_ICALL_ADDR: {
9751 MonoMethod *cmethod;
9754 CHECK_STACK_OVF (1);
9756 token = read32 (ip + 2);
9758 cmethod = mono_method_get_wrapper_data (method, token);
9760 if (cfg->compile_aot) {
9761 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9763 ptr = mono_lookup_internal_call (cmethod);
9765 EMIT_NEW_PCONST (cfg, ins, ptr);
9771 case CEE_MONO_VTADDR: {
9772 MonoInst *src_var, *src;
9778 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9779 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9784 case CEE_MONO_NEWOBJ: {
9785 MonoInst *iargs [2];
9787 CHECK_STACK_OVF (1);
9789 token = read32 (ip + 2);
9790 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9791 mono_class_init (klass);
9792 NEW_DOMAINCONST (cfg, iargs [0]);
9793 MONO_ADD_INS (cfg->cbb, iargs [0]);
9794 NEW_CLASSCONST (cfg, iargs [1], klass);
9795 MONO_ADD_INS (cfg->cbb, iargs [1]);
9796 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9798 inline_costs += 10 * num_calls++;
9801 case CEE_MONO_OBJADDR:
9804 MONO_INST_NEW (cfg, ins, OP_MOVE);
9805 ins->dreg = alloc_ireg_mp (cfg);
9806 ins->sreg1 = sp [0]->dreg;
9807 ins->type = STACK_MP;
9808 MONO_ADD_INS (cfg->cbb, ins);
9812 case CEE_MONO_LDNATIVEOBJ:
9814 * Similar to LDOBJ, but instead load the unmanaged
9815 * representation of the vtype to the stack.
9820 token = read32 (ip + 2);
9821 klass = mono_method_get_wrapper_data (method, token);
9822 g_assert (klass->valuetype);
9823 mono_class_init (klass);
9826 MonoInst *src, *dest, *temp;
9829 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9830 temp->backend.is_pinvoke = 1;
9831 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9832 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9834 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9835 dest->type = STACK_VTYPE;
9836 dest->klass = klass;
9842 case CEE_MONO_RETOBJ: {
9844 * Same as RET, but return the native representation of a vtype
9847 g_assert (cfg->ret);
9848 g_assert (mono_method_signature (method)->pinvoke);
9853 token = read32 (ip + 2);
9854 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9856 if (!cfg->vret_addr) {
9857 g_assert (cfg->ret_var_is_local);
9859 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9861 EMIT_NEW_RETLOADA (cfg, ins);
9863 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9865 if (sp != stack_start)
9868 MONO_INST_NEW (cfg, ins, OP_BR);
9869 ins->inst_target_bb = end_bblock;
9870 MONO_ADD_INS (bblock, ins);
9871 link_bblock (cfg, bblock, end_bblock);
9872 start_new_bblock = 1;
9876 case CEE_MONO_CISINST:
9877 case CEE_MONO_CCASTCLASS: {
9882 token = read32 (ip + 2);
9883 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9884 if (ip [1] == CEE_MONO_CISINST)
9885 ins = handle_cisinst (cfg, klass, sp [0]);
9887 ins = handle_ccastclass (cfg, klass, sp [0]);
9893 case CEE_MONO_SAVE_LMF:
9894 case CEE_MONO_RESTORE_LMF:
9895 #ifdef MONO_ARCH_HAVE_LMF_OPS
9896 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9897 MONO_ADD_INS (bblock, ins);
9898 cfg->need_lmf_area = TRUE;
9902 case CEE_MONO_CLASSCONST:
9903 CHECK_STACK_OVF (1);
9905 token = read32 (ip + 2);
9906 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9909 inline_costs += 10 * num_calls++;
9911 case CEE_MONO_NOT_TAKEN:
9912 bblock->out_of_line = TRUE;
9916 CHECK_STACK_OVF (1);
9918 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9919 ins->dreg = alloc_preg (cfg);
9920 ins->inst_offset = (gint32)read32 (ip + 2);
9921 ins->type = STACK_PTR;
9922 MONO_ADD_INS (bblock, ins);
9926 case CEE_MONO_DYN_CALL: {
9929 /* It would be easier to call a trampoline, but that would put an
9930 * extra frame on the stack, confusing exception handling. So
9931 * implement it inline using an opcode for now.
9934 if (!cfg->dyn_call_var) {
9935 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9936 /* prevent it from being register allocated */
9937 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9940 /* Has to use a call inst since it local regalloc expects it */
9941 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9942 ins = (MonoInst*)call;
9944 ins->sreg1 = sp [0]->dreg;
9945 ins->sreg2 = sp [1]->dreg;
9946 MONO_ADD_INS (bblock, ins);
9948 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9949 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9953 inline_costs += 10 * num_calls++;
9958 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9968 /* somewhat similar to LDTOKEN */
9969 MonoInst *addr, *vtvar;
9970 CHECK_STACK_OVF (1);
9971 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9973 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9974 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9976 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9977 ins->type = STACK_VTYPE;
9978 ins->klass = mono_defaults.argumenthandle_class;
9991 * The following transforms:
9992 * CEE_CEQ into OP_CEQ
9993 * CEE_CGT into OP_CGT
9994 * CEE_CGT_UN into OP_CGT_UN
9995 * CEE_CLT into OP_CLT
9996 * CEE_CLT_UN into OP_CLT_UN
9998 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10000 MONO_INST_NEW (cfg, ins, cmp->opcode);
10002 cmp->sreg1 = sp [0]->dreg;
10003 cmp->sreg2 = sp [1]->dreg;
10004 type_from_op (cmp, sp [0], sp [1]);
10006 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10007 cmp->opcode = OP_LCOMPARE;
10008 else if (sp [0]->type == STACK_R8)
10009 cmp->opcode = OP_FCOMPARE;
10011 cmp->opcode = OP_ICOMPARE;
10012 MONO_ADD_INS (bblock, cmp);
10013 ins->type = STACK_I4;
10014 ins->dreg = alloc_dreg (cfg, ins->type);
10015 type_from_op (ins, sp [0], sp [1]);
10017 if (cmp->opcode == OP_FCOMPARE) {
10019 * The backends expect the fceq opcodes to do the
10022 cmp->opcode = OP_NOP;
10023 ins->sreg1 = cmp->sreg1;
10024 ins->sreg2 = cmp->sreg2;
10026 MONO_ADD_INS (bblock, ins);
10032 MonoInst *argconst;
10033 MonoMethod *cil_method;
10034 gboolean needs_static_rgctx_invoke;
10036 CHECK_STACK_OVF (1);
10038 n = read32 (ip + 2);
10039 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10040 if (!cmethod || mono_loader_get_last_error ())
10042 mono_class_init (cmethod->klass);
10044 mono_save_token_info (cfg, image, n, cmethod);
10046 if (cfg->generic_sharing_context)
10047 context_used = mono_method_check_context_used (cmethod);
10049 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
10051 cil_method = cmethod;
10052 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10053 METHOD_ACCESS_FAILURE;
10055 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10056 if (check_linkdemand (cfg, method, cmethod))
10058 CHECK_CFG_EXCEPTION;
10059 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10060 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10064 * Optimize the common case of ldftn+delegate creation
10066 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10067 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10068 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10069 MonoInst *target_ins;
10070 MonoMethod *invoke;
10071 int invoke_context_used = 0;
10073 invoke = mono_get_delegate_invoke (ctor_method->klass);
10074 if (!invoke || !mono_method_signature (invoke))
10077 if (cfg->generic_sharing_context)
10078 invoke_context_used = mono_method_check_context_used (invoke);
10080 target_ins = sp [-1];
10082 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10083 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10084 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10086 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10090 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10091 /* FIXME: SGEN support */
10092 if (invoke_context_used == 0) {
10094 if (cfg->verbose_level > 3)
10095 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10097 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10098 CHECK_CFG_EXCEPTION;
10107 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10108 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10112 inline_costs += 10 * num_calls++;
10115 case CEE_LDVIRTFTN: {
10116 MonoInst *args [2];
10120 n = read32 (ip + 2);
10121 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10122 if (!cmethod || mono_loader_get_last_error ())
10124 mono_class_init (cmethod->klass);
10126 if (cfg->generic_sharing_context)
10127 context_used = mono_method_check_context_used (cmethod);
10129 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10130 if (check_linkdemand (cfg, method, cmethod))
10132 CHECK_CFG_EXCEPTION;
10133 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10134 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10140 args [1] = emit_get_rgctx_method (cfg, context_used,
10141 cmethod, MONO_RGCTX_INFO_METHOD);
10144 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10146 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10149 inline_costs += 10 * num_calls++;
10153 CHECK_STACK_OVF (1);
10155 n = read16 (ip + 2);
10157 EMIT_NEW_ARGLOAD (cfg, ins, n);
10162 CHECK_STACK_OVF (1);
10164 n = read16 (ip + 2);
10166 NEW_ARGLOADA (cfg, ins, n);
10167 MONO_ADD_INS (cfg->cbb, ins);
10175 n = read16 (ip + 2);
10177 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10179 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10183 CHECK_STACK_OVF (1);
10185 n = read16 (ip + 2);
10187 EMIT_NEW_LOCLOAD (cfg, ins, n);
10192 unsigned char *tmp_ip;
10193 CHECK_STACK_OVF (1);
10195 n = read16 (ip + 2);
10198 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10204 EMIT_NEW_LOCLOADA (cfg, ins, n);
10213 n = read16 (ip + 2);
10215 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10217 emit_stloc_ir (cfg, sp, header, n);
10224 if (sp != stack_start)
10226 if (cfg->method != method)
10228 * Inlining this into a loop in a parent could lead to
10229 * stack overflows which is different behavior than the
10230 * non-inlined case, thus disable inlining in this case.
10232 goto inline_failure;
10234 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10235 ins->dreg = alloc_preg (cfg);
10236 ins->sreg1 = sp [0]->dreg;
10237 ins->type = STACK_PTR;
10238 MONO_ADD_INS (cfg->cbb, ins);
10240 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10242 ins->flags |= MONO_INST_INIT;
10247 case CEE_ENDFILTER: {
10248 MonoExceptionClause *clause, *nearest;
10249 int cc, nearest_num;
10253 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10255 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10256 ins->sreg1 = (*sp)->dreg;
10257 MONO_ADD_INS (bblock, ins);
10258 start_new_bblock = 1;
10263 for (cc = 0; cc < header->num_clauses; ++cc) {
10264 clause = &header->clauses [cc];
10265 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10266 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10267 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10272 g_assert (nearest);
10273 if ((ip - header->code) != nearest->handler_offset)
10278 case CEE_UNALIGNED_:
10279 ins_flag |= MONO_INST_UNALIGNED;
10280 /* FIXME: record alignment? we can assume 1 for now */
10284 case CEE_VOLATILE_:
10285 ins_flag |= MONO_INST_VOLATILE;
10289 ins_flag |= MONO_INST_TAILCALL;
10290 cfg->flags |= MONO_CFG_HAS_TAIL;
10291 /* Can't inline tail calls at this time */
10292 inline_costs += 100000;
10299 token = read32 (ip + 2);
10300 klass = mini_get_class (method, token, generic_context);
10301 CHECK_TYPELOAD (klass);
10302 if (generic_class_is_reference_type (cfg, klass))
10303 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10305 mini_emit_initobj (cfg, *sp, NULL, klass);
10309 case CEE_CONSTRAINED_:
10311 token = read32 (ip + 2);
10312 if (method->wrapper_type != MONO_WRAPPER_NONE)
10313 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10315 constrained_call = mono_class_get_full (image, token, generic_context);
10316 CHECK_TYPELOAD (constrained_call);
10320 case CEE_INITBLK: {
10321 MonoInst *iargs [3];
10325 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10326 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10327 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10328 /* emit_memset only works when val == 0 */
10329 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10331 iargs [0] = sp [0];
10332 iargs [1] = sp [1];
10333 iargs [2] = sp [2];
10334 if (ip [1] == CEE_CPBLK) {
10335 MonoMethod *memcpy_method = get_memcpy_method ();
10336 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10338 MonoMethod *memset_method = get_memset_method ();
10339 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10349 ins_flag |= MONO_INST_NOTYPECHECK;
10351 ins_flag |= MONO_INST_NORANGECHECK;
10352 /* we ignore the no-nullcheck for now since we
10353 * really do it explicitly only when doing callvirt->call
10357 case CEE_RETHROW: {
10359 int handler_offset = -1;
10361 for (i = 0; i < header->num_clauses; ++i) {
10362 MonoExceptionClause *clause = &header->clauses [i];
10363 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10364 handler_offset = clause->handler_offset;
10369 bblock->flags |= BB_EXCEPTION_UNSAFE;
10371 g_assert (handler_offset != -1);
10373 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10374 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10375 ins->sreg1 = load->dreg;
10376 MONO_ADD_INS (bblock, ins);
10378 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10379 MONO_ADD_INS (bblock, ins);
10382 link_bblock (cfg, bblock, end_bblock);
10383 start_new_bblock = 1;
10391 CHECK_STACK_OVF (1);
10393 token = read32 (ip + 2);
10394 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10395 MonoType *type = mono_type_create_from_typespec (image, token);
10396 token = mono_type_size (type, &ialign);
10398 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10399 CHECK_TYPELOAD (klass);
10400 mono_class_init (klass);
10401 token = mono_class_value_size (klass, &align);
10403 EMIT_NEW_ICONST (cfg, ins, token);
10408 case CEE_REFANYTYPE: {
10409 MonoInst *src_var, *src;
10415 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10417 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10418 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10419 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10424 case CEE_READONLY_:
10437 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10447 g_warning ("opcode 0x%02x not handled", *ip);
10451 if (start_new_bblock != 1)
10454 bblock->cil_length = ip - bblock->cil_code;
10455 bblock->next_bb = end_bblock;
10457 if (cfg->method == method && cfg->domainvar) {
10459 MonoInst *get_domain;
10461 cfg->cbb = init_localsbb;
10463 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10464 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10467 get_domain->dreg = alloc_preg (cfg);
10468 MONO_ADD_INS (cfg->cbb, get_domain);
10470 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10471 MONO_ADD_INS (cfg->cbb, store);
10474 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10475 if (cfg->compile_aot)
10476 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10477 mono_get_got_var (cfg);
10480 if (cfg->method == method && cfg->got_var)
10481 mono_emit_load_got_addr (cfg);
10486 cfg->cbb = init_localsbb;
10488 for (i = 0; i < header->num_locals; ++i) {
10489 MonoType *ptype = header->locals [i];
10490 int t = ptype->type;
10491 dreg = cfg->locals [i]->dreg;
10493 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10494 t = mono_class_enum_basetype (ptype->data.klass)->type;
10495 if (ptype->byref) {
10496 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10497 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10498 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10499 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10500 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10501 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10502 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10503 ins->type = STACK_R8;
10504 ins->inst_p0 = (void*)&r8_0;
10505 ins->dreg = alloc_dreg (cfg, STACK_R8);
10506 MONO_ADD_INS (init_localsbb, ins);
10507 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10508 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10509 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10510 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10512 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10517 if (cfg->init_ref_vars && cfg->method == method) {
10518 /* Emit initialization for ref vars */
10519 // FIXME: Avoid duplication initialization for IL locals.
10520 for (i = 0; i < cfg->num_varinfo; ++i) {
10521 MonoInst *ins = cfg->varinfo [i];
10523 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10524 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10528 /* Add a sequence point for method entry/exit events */
10530 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10531 MONO_ADD_INS (init_localsbb, ins);
10532 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10533 MONO_ADD_INS (cfg->bb_exit, ins);
10538 if (cfg->method == method) {
10539 MonoBasicBlock *bb;
10540 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10541 bb->region = mono_find_block_region (cfg, bb->real_offset);
10543 mono_create_spvar_for_region (cfg, bb->region);
10544 if (cfg->verbose_level > 2)
10545 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10549 g_slist_free (class_inits);
10550 dont_inline = g_list_remove (dont_inline, method);
10552 if (inline_costs < 0) {
10555 /* Method is too large */
10556 mname = mono_method_full_name (method, TRUE);
10557 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10558 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10560 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10561 mono_basic_block_free (original_bb);
10565 if ((cfg->verbose_level > 2) && (cfg->method == method))
10566 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10568 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10569 mono_basic_block_free (original_bb);
10570 return inline_costs;
10573 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10580 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10584 set_exception_type_from_invalid_il (cfg, method, ip);
10588 g_slist_free (class_inits);
10589 mono_basic_block_free (original_bb);
10590 dont_inline = g_list_remove (dont_inline, method);
10591 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10596 store_membase_reg_to_store_membase_imm (int opcode)
10599 case OP_STORE_MEMBASE_REG:
10600 return OP_STORE_MEMBASE_IMM;
10601 case OP_STOREI1_MEMBASE_REG:
10602 return OP_STOREI1_MEMBASE_IMM;
10603 case OP_STOREI2_MEMBASE_REG:
10604 return OP_STOREI2_MEMBASE_IMM;
10605 case OP_STOREI4_MEMBASE_REG:
10606 return OP_STOREI4_MEMBASE_IMM;
10607 case OP_STOREI8_MEMBASE_REG:
10608 return OP_STOREI8_MEMBASE_IMM;
10610 g_assert_not_reached ();
10616 #endif /* DISABLE_JIT */
10619 mono_op_to_op_imm (int opcode)
10623 return OP_IADD_IMM;
10625 return OP_ISUB_IMM;
10627 return OP_IDIV_IMM;
10629 return OP_IDIV_UN_IMM;
10631 return OP_IREM_IMM;
10633 return OP_IREM_UN_IMM;
10635 return OP_IMUL_IMM;
10637 return OP_IAND_IMM;
10641 return OP_IXOR_IMM;
10643 return OP_ISHL_IMM;
10645 return OP_ISHR_IMM;
10647 return OP_ISHR_UN_IMM;
10650 return OP_LADD_IMM;
10652 return OP_LSUB_IMM;
10654 return OP_LAND_IMM;
10658 return OP_LXOR_IMM;
10660 return OP_LSHL_IMM;
10662 return OP_LSHR_IMM;
10664 return OP_LSHR_UN_IMM;
10667 return OP_COMPARE_IMM;
10669 return OP_ICOMPARE_IMM;
10671 return OP_LCOMPARE_IMM;
10673 case OP_STORE_MEMBASE_REG:
10674 return OP_STORE_MEMBASE_IMM;
10675 case OP_STOREI1_MEMBASE_REG:
10676 return OP_STOREI1_MEMBASE_IMM;
10677 case OP_STOREI2_MEMBASE_REG:
10678 return OP_STOREI2_MEMBASE_IMM;
10679 case OP_STOREI4_MEMBASE_REG:
10680 return OP_STOREI4_MEMBASE_IMM;
10682 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10684 return OP_X86_PUSH_IMM;
10685 case OP_X86_COMPARE_MEMBASE_REG:
10686 return OP_X86_COMPARE_MEMBASE_IMM;
10688 #if defined(TARGET_AMD64)
10689 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10690 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10692 case OP_VOIDCALL_REG:
10693 return OP_VOIDCALL;
10701 return OP_LOCALLOC_IMM;
10708 ldind_to_load_membase (int opcode)
10712 return OP_LOADI1_MEMBASE;
10714 return OP_LOADU1_MEMBASE;
10716 return OP_LOADI2_MEMBASE;
10718 return OP_LOADU2_MEMBASE;
10720 return OP_LOADI4_MEMBASE;
10722 return OP_LOADU4_MEMBASE;
10724 return OP_LOAD_MEMBASE;
10725 case CEE_LDIND_REF:
10726 return OP_LOAD_MEMBASE;
10728 return OP_LOADI8_MEMBASE;
10730 return OP_LOADR4_MEMBASE;
10732 return OP_LOADR8_MEMBASE;
10734 g_assert_not_reached ();
10741 stind_to_store_membase (int opcode)
10745 return OP_STOREI1_MEMBASE_REG;
10747 return OP_STOREI2_MEMBASE_REG;
10749 return OP_STOREI4_MEMBASE_REG;
10751 case CEE_STIND_REF:
10752 return OP_STORE_MEMBASE_REG;
10754 return OP_STOREI8_MEMBASE_REG;
10756 return OP_STORER4_MEMBASE_REG;
10758 return OP_STORER8_MEMBASE_REG;
10760 g_assert_not_reached ();
10767 mono_load_membase_to_load_mem (int opcode)
10769 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10770 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10772 case OP_LOAD_MEMBASE:
10773 return OP_LOAD_MEM;
10774 case OP_LOADU1_MEMBASE:
10775 return OP_LOADU1_MEM;
10776 case OP_LOADU2_MEMBASE:
10777 return OP_LOADU2_MEM;
10778 case OP_LOADI4_MEMBASE:
10779 return OP_LOADI4_MEM;
10780 case OP_LOADU4_MEMBASE:
10781 return OP_LOADU4_MEM;
10782 #if SIZEOF_REGISTER == 8
10783 case OP_LOADI8_MEMBASE:
10784 return OP_LOADI8_MEM;
10793 op_to_op_dest_membase (int store_opcode, int opcode)
10795 #if defined(TARGET_X86)
10796 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10801 return OP_X86_ADD_MEMBASE_REG;
10803 return OP_X86_SUB_MEMBASE_REG;
10805 return OP_X86_AND_MEMBASE_REG;
10807 return OP_X86_OR_MEMBASE_REG;
10809 return OP_X86_XOR_MEMBASE_REG;
10812 return OP_X86_ADD_MEMBASE_IMM;
10815 return OP_X86_SUB_MEMBASE_IMM;
10818 return OP_X86_AND_MEMBASE_IMM;
10821 return OP_X86_OR_MEMBASE_IMM;
10824 return OP_X86_XOR_MEMBASE_IMM;
10830 #if defined(TARGET_AMD64)
10831 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10836 return OP_X86_ADD_MEMBASE_REG;
10838 return OP_X86_SUB_MEMBASE_REG;
10840 return OP_X86_AND_MEMBASE_REG;
10842 return OP_X86_OR_MEMBASE_REG;
10844 return OP_X86_XOR_MEMBASE_REG;
10846 return OP_X86_ADD_MEMBASE_IMM;
10848 return OP_X86_SUB_MEMBASE_IMM;
10850 return OP_X86_AND_MEMBASE_IMM;
10852 return OP_X86_OR_MEMBASE_IMM;
10854 return OP_X86_XOR_MEMBASE_IMM;
10856 return OP_AMD64_ADD_MEMBASE_REG;
10858 return OP_AMD64_SUB_MEMBASE_REG;
10860 return OP_AMD64_AND_MEMBASE_REG;
10862 return OP_AMD64_OR_MEMBASE_REG;
10864 return OP_AMD64_XOR_MEMBASE_REG;
10867 return OP_AMD64_ADD_MEMBASE_IMM;
10870 return OP_AMD64_SUB_MEMBASE_IMM;
10873 return OP_AMD64_AND_MEMBASE_IMM;
10876 return OP_AMD64_OR_MEMBASE_IMM;
10879 return OP_AMD64_XOR_MEMBASE_IMM;
10889 op_to_op_store_membase (int store_opcode, int opcode)
10891 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10894 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10895 return OP_X86_SETEQ_MEMBASE;
10897 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10898 return OP_X86_SETNE_MEMBASE;
10906 op_to_op_src1_membase (int load_opcode, int opcode)
10909 /* FIXME: This has sign extension issues */
10911 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10912 return OP_X86_COMPARE_MEMBASE8_IMM;
10915 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10920 return OP_X86_PUSH_MEMBASE;
10921 case OP_COMPARE_IMM:
10922 case OP_ICOMPARE_IMM:
10923 return OP_X86_COMPARE_MEMBASE_IMM;
10926 return OP_X86_COMPARE_MEMBASE_REG;
10930 #ifdef TARGET_AMD64
10931 /* FIXME: This has sign extension issues */
10933 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10934 return OP_X86_COMPARE_MEMBASE8_IMM;
10939 #ifdef __mono_ilp32__
10940 if (load_opcode == OP_LOADI8_MEMBASE)
10942 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10944 return OP_X86_PUSH_MEMBASE;
10946 /* FIXME: This only works for 32 bit immediates
10947 case OP_COMPARE_IMM:
10948 case OP_LCOMPARE_IMM:
10949 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10950 return OP_AMD64_COMPARE_MEMBASE_IMM;
10952 case OP_ICOMPARE_IMM:
10953 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10954 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10958 #ifdef __mono_ilp32__
10959 if (load_opcode == OP_LOAD_MEMBASE)
10960 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10961 if (load_opcode == OP_LOADI8_MEMBASE)
10963 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10965 return OP_AMD64_COMPARE_MEMBASE_REG;
10968 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10969 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10978 op_to_op_src2_membase (int load_opcode, int opcode)
10981 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10987 return OP_X86_COMPARE_REG_MEMBASE;
10989 return OP_X86_ADD_REG_MEMBASE;
10991 return OP_X86_SUB_REG_MEMBASE;
10993 return OP_X86_AND_REG_MEMBASE;
10995 return OP_X86_OR_REG_MEMBASE;
10997 return OP_X86_XOR_REG_MEMBASE;
11001 #ifdef TARGET_AMD64
11002 #ifdef __mono_ilp32__
11003 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11005 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11009 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11011 return OP_X86_ADD_REG_MEMBASE;
11013 return OP_X86_SUB_REG_MEMBASE;
11015 return OP_X86_AND_REG_MEMBASE;
11017 return OP_X86_OR_REG_MEMBASE;
11019 return OP_X86_XOR_REG_MEMBASE;
11021 #ifdef __mono_ilp32__
11022 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11024 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11029 return OP_AMD64_COMPARE_REG_MEMBASE;
11031 return OP_AMD64_ADD_REG_MEMBASE;
11033 return OP_AMD64_SUB_REG_MEMBASE;
11035 return OP_AMD64_AND_REG_MEMBASE;
11037 return OP_AMD64_OR_REG_MEMBASE;
11039 return OP_AMD64_XOR_REG_MEMBASE;
11048 mono_op_to_op_imm_noemul (int opcode)
11051 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11057 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11065 return mono_op_to_op_imm (opcode);
11069 #ifndef DISABLE_JIT
11072 * mono_handle_global_vregs:
11074 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11078 mono_handle_global_vregs (MonoCompile *cfg)
11080 gint32 *vreg_to_bb;
11081 MonoBasicBlock *bb;
11084 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11086 #ifdef MONO_ARCH_SIMD_INTRINSICS
11087 if (cfg->uses_simd_intrinsics)
11088 mono_simd_simplify_indirection (cfg);
11091 /* Find local vregs used in more than one bb */
11092 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11093 MonoInst *ins = bb->code;
11094 int block_num = bb->block_num;
11096 if (cfg->verbose_level > 2)
11097 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11100 for (; ins; ins = ins->next) {
11101 const char *spec = INS_INFO (ins->opcode);
11102 int regtype = 0, regindex;
11105 if (G_UNLIKELY (cfg->verbose_level > 2))
11106 mono_print_ins (ins);
11108 g_assert (ins->opcode >= MONO_CEE_LAST);
11110 for (regindex = 0; regindex < 4; regindex ++) {
11113 if (regindex == 0) {
11114 regtype = spec [MONO_INST_DEST];
11115 if (regtype == ' ')
11118 } else if (regindex == 1) {
11119 regtype = spec [MONO_INST_SRC1];
11120 if (regtype == ' ')
11123 } else if (regindex == 2) {
11124 regtype = spec [MONO_INST_SRC2];
11125 if (regtype == ' ')
11128 } else if (regindex == 3) {
11129 regtype = spec [MONO_INST_SRC3];
11130 if (regtype == ' ')
11135 #if SIZEOF_REGISTER == 4
11136 /* In the LLVM case, the long opcodes are not decomposed */
11137 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11139 * Since some instructions reference the original long vreg,
11140 * and some reference the two component vregs, it is quite hard
11141 * to determine when it needs to be global. So be conservative.
11143 if (!get_vreg_to_inst (cfg, vreg)) {
11144 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11146 if (cfg->verbose_level > 2)
11147 printf ("LONG VREG R%d made global.\n", vreg);
11151 * Make the component vregs volatile since the optimizations can
11152 * get confused otherwise.
11154 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11155 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11159 g_assert (vreg != -1);
11161 prev_bb = vreg_to_bb [vreg];
11162 if (prev_bb == 0) {
11163 /* 0 is a valid block num */
11164 vreg_to_bb [vreg] = block_num + 1;
11165 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11166 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11169 if (!get_vreg_to_inst (cfg, vreg)) {
11170 if (G_UNLIKELY (cfg->verbose_level > 2))
11171 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11175 if (vreg_is_ref (cfg, vreg))
11176 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11178 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11181 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11184 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11187 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11190 g_assert_not_reached ();
11194 /* Flag as having been used in more than one bb */
11195 vreg_to_bb [vreg] = -1;
11201 /* If a variable is used in only one bblock, convert it into a local vreg */
11202 for (i = 0; i < cfg->num_varinfo; i++) {
11203 MonoInst *var = cfg->varinfo [i];
11204 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11206 switch (var->type) {
11212 #if SIZEOF_REGISTER == 8
11215 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11216 /* Enabling this screws up the fp stack on x86 */
11219 /* Arguments are implicitly global */
11220 /* Putting R4 vars into registers doesn't work currently */
11221 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11223 * Make that the variable's liveness interval doesn't contain a call, since
11224 * that would cause the lvreg to be spilled, making the whole optimization
11227 /* This is too slow for JIT compilation */
11229 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11231 int def_index, call_index, ins_index;
11232 gboolean spilled = FALSE;
11237 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11238 const char *spec = INS_INFO (ins->opcode);
11240 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11241 def_index = ins_index;
11243 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11244 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11245 if (call_index > def_index) {
11251 if (MONO_IS_CALL (ins))
11252 call_index = ins_index;
11262 if (G_UNLIKELY (cfg->verbose_level > 2))
11263 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11264 var->flags |= MONO_INST_IS_DEAD;
11265 cfg->vreg_to_inst [var->dreg] = NULL;
11272 * Compress the varinfo and vars tables so the liveness computation is faster and
11273 * takes up less space.
11276 for (i = 0; i < cfg->num_varinfo; ++i) {
11277 MonoInst *var = cfg->varinfo [i];
11278 if (pos < i && cfg->locals_start == i)
11279 cfg->locals_start = pos;
11280 if (!(var->flags & MONO_INST_IS_DEAD)) {
11282 cfg->varinfo [pos] = cfg->varinfo [i];
11283 cfg->varinfo [pos]->inst_c0 = pos;
11284 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11285 cfg->vars [pos].idx = pos;
11286 #if SIZEOF_REGISTER == 4
11287 if (cfg->varinfo [pos]->type == STACK_I8) {
11288 /* Modify the two component vars too */
11291 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11292 var1->inst_c0 = pos;
11293 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11294 var1->inst_c0 = pos;
11301 cfg->num_varinfo = pos;
11302 if (cfg->locals_start > cfg->num_varinfo)
11303 cfg->locals_start = cfg->num_varinfo;
11307 * mono_spill_global_vars:
11309 * Generate spill code for variables which are not allocated to registers,
11310 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11311 * code is generated which could be optimized by the local optimization passes.
11314 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11316 MonoBasicBlock *bb;
11318 int orig_next_vreg;
11319 guint32 *vreg_to_lvreg;
11321 guint32 i, lvregs_len;
11322 gboolean dest_has_lvreg = FALSE;
11323 guint32 stacktypes [128];
11324 MonoInst **live_range_start, **live_range_end;
11325 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11327 *need_local_opts = FALSE;
11329 memset (spec2, 0, sizeof (spec2));
11331 /* FIXME: Move this function to mini.c */
11332 stacktypes ['i'] = STACK_PTR;
11333 stacktypes ['l'] = STACK_I8;
11334 stacktypes ['f'] = STACK_R8;
11335 #ifdef MONO_ARCH_SIMD_INTRINSICS
11336 stacktypes ['x'] = STACK_VTYPE;
11339 #if SIZEOF_REGISTER == 4
11340 /* Create MonoInsts for longs */
11341 for (i = 0; i < cfg->num_varinfo; i++) {
11342 MonoInst *ins = cfg->varinfo [i];
11344 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11345 switch (ins->type) {
11350 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11353 g_assert (ins->opcode == OP_REGOFFSET);
11355 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11357 tree->opcode = OP_REGOFFSET;
11358 tree->inst_basereg = ins->inst_basereg;
11359 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11361 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11363 tree->opcode = OP_REGOFFSET;
11364 tree->inst_basereg = ins->inst_basereg;
11365 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11375 if (cfg->compute_gc_maps) {
11376 /* registers need liveness info even for !non refs */
11377 for (i = 0; i < cfg->num_varinfo; i++) {
11378 MonoInst *ins = cfg->varinfo [i];
11380 if (ins->opcode == OP_REGVAR)
11381 ins->flags |= MONO_INST_GC_TRACK;
11385 /* FIXME: widening and truncation */
11388 * As an optimization, when a variable allocated to the stack is first loaded into
11389 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11390 * the variable again.
11392 orig_next_vreg = cfg->next_vreg;
11393 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11394 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11398 * These arrays contain the first and last instructions accessing a given
11400 * Since we emit bblocks in the same order we process them here, and we
11401 * don't split live ranges, these will precisely describe the live range of
11402 * the variable, i.e. the instruction range where a valid value can be found
11403 * in the variables location.
11404 * The live range is computed using the liveness info computed by the liveness pass.
11405 * We can't use vmv->range, since that is an abstract live range, and we need
11406 * one which is instruction precise.
11407 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11409 /* FIXME: Only do this if debugging info is requested */
11410 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11411 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11412 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11413 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11415 /* Add spill loads/stores */
11416 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11419 if (cfg->verbose_level > 2)
11420 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11422 /* Clear vreg_to_lvreg array */
11423 for (i = 0; i < lvregs_len; i++)
11424 vreg_to_lvreg [lvregs [i]] = 0;
11428 MONO_BB_FOR_EACH_INS (bb, ins) {
11429 const char *spec = INS_INFO (ins->opcode);
11430 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11431 gboolean store, no_lvreg;
11432 int sregs [MONO_MAX_SRC_REGS];
11434 if (G_UNLIKELY (cfg->verbose_level > 2))
11435 mono_print_ins (ins);
11437 if (ins->opcode == OP_NOP)
11441 * We handle LDADDR here as well, since it can only be decomposed
11442 * when variable addresses are known.
11444 if (ins->opcode == OP_LDADDR) {
11445 MonoInst *var = ins->inst_p0;
11447 if (var->opcode == OP_VTARG_ADDR) {
11448 /* Happens on SPARC/S390 where vtypes are passed by reference */
11449 MonoInst *vtaddr = var->inst_left;
11450 if (vtaddr->opcode == OP_REGVAR) {
11451 ins->opcode = OP_MOVE;
11452 ins->sreg1 = vtaddr->dreg;
11454 else if (var->inst_left->opcode == OP_REGOFFSET) {
11455 ins->opcode = OP_LOAD_MEMBASE;
11456 ins->inst_basereg = vtaddr->inst_basereg;
11457 ins->inst_offset = vtaddr->inst_offset;
11461 g_assert (var->opcode == OP_REGOFFSET);
11463 ins->opcode = OP_ADD_IMM;
11464 ins->sreg1 = var->inst_basereg;
11465 ins->inst_imm = var->inst_offset;
11468 *need_local_opts = TRUE;
11469 spec = INS_INFO (ins->opcode);
11472 if (ins->opcode < MONO_CEE_LAST) {
11473 mono_print_ins (ins);
11474 g_assert_not_reached ();
11478 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11482 if (MONO_IS_STORE_MEMBASE (ins)) {
11483 tmp_reg = ins->dreg;
11484 ins->dreg = ins->sreg2;
11485 ins->sreg2 = tmp_reg;
11488 spec2 [MONO_INST_DEST] = ' ';
11489 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11490 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11491 spec2 [MONO_INST_SRC3] = ' ';
11493 } else if (MONO_IS_STORE_MEMINDEX (ins))
11494 g_assert_not_reached ();
11499 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11500 printf ("\t %.3s %d", spec, ins->dreg);
11501 num_sregs = mono_inst_get_src_registers (ins, sregs);
11502 for (srcindex = 0; srcindex < 3; ++srcindex)
11503 printf (" %d", sregs [srcindex]);
11510 regtype = spec [MONO_INST_DEST];
11511 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11514 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11515 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11516 MonoInst *store_ins;
11518 MonoInst *def_ins = ins;
11519 int dreg = ins->dreg; /* The original vreg */
11521 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11523 if (var->opcode == OP_REGVAR) {
11524 ins->dreg = var->dreg;
11525 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11527 * Instead of emitting a load+store, use a _membase opcode.
11529 g_assert (var->opcode == OP_REGOFFSET);
11530 if (ins->opcode == OP_MOVE) {
11534 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11535 ins->inst_basereg = var->inst_basereg;
11536 ins->inst_offset = var->inst_offset;
11539 spec = INS_INFO (ins->opcode);
11543 g_assert (var->opcode == OP_REGOFFSET);
11545 prev_dreg = ins->dreg;
11547 /* Invalidate any previous lvreg for this vreg */
11548 vreg_to_lvreg [ins->dreg] = 0;
11552 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11554 store_opcode = OP_STOREI8_MEMBASE_REG;
11557 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11559 if (regtype == 'l') {
11560 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11561 mono_bblock_insert_after_ins (bb, ins, store_ins);
11562 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11563 mono_bblock_insert_after_ins (bb, ins, store_ins);
11564 def_ins = store_ins;
11567 g_assert (store_opcode != OP_STOREV_MEMBASE);
11569 /* Try to fuse the store into the instruction itself */
11570 /* FIXME: Add more instructions */
11571 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11572 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11573 ins->inst_imm = ins->inst_c0;
11574 ins->inst_destbasereg = var->inst_basereg;
11575 ins->inst_offset = var->inst_offset;
11576 spec = INS_INFO (ins->opcode);
11577 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11578 ins->opcode = store_opcode;
11579 ins->inst_destbasereg = var->inst_basereg;
11580 ins->inst_offset = var->inst_offset;
11584 tmp_reg = ins->dreg;
11585 ins->dreg = ins->sreg2;
11586 ins->sreg2 = tmp_reg;
11589 spec2 [MONO_INST_DEST] = ' ';
11590 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11591 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11592 spec2 [MONO_INST_SRC3] = ' ';
11594 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11595 // FIXME: The backends expect the base reg to be in inst_basereg
11596 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11598 ins->inst_basereg = var->inst_basereg;
11599 ins->inst_offset = var->inst_offset;
11600 spec = INS_INFO (ins->opcode);
11602 /* printf ("INS: "); mono_print_ins (ins); */
11603 /* Create a store instruction */
11604 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11606 /* Insert it after the instruction */
11607 mono_bblock_insert_after_ins (bb, ins, store_ins);
11609 def_ins = store_ins;
11612 * We can't assign ins->dreg to var->dreg here, since the
11613 * sregs could use it. So set a flag, and do it after
11616 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11617 dest_has_lvreg = TRUE;
11622 if (def_ins && !live_range_start [dreg]) {
11623 live_range_start [dreg] = def_ins;
11624 live_range_start_bb [dreg] = bb;
11627 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11630 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11631 tmp->inst_c1 = dreg;
11632 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11639 num_sregs = mono_inst_get_src_registers (ins, sregs);
11640 for (srcindex = 0; srcindex < 3; ++srcindex) {
11641 regtype = spec [MONO_INST_SRC1 + srcindex];
11642 sreg = sregs [srcindex];
11644 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11645 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11646 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11647 MonoInst *use_ins = ins;
11648 MonoInst *load_ins;
11649 guint32 load_opcode;
11651 if (var->opcode == OP_REGVAR) {
11652 sregs [srcindex] = var->dreg;
11653 //mono_inst_set_src_registers (ins, sregs);
11654 live_range_end [sreg] = use_ins;
11655 live_range_end_bb [sreg] = bb;
11657 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11660 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11661 /* var->dreg is a hreg */
11662 tmp->inst_c1 = sreg;
11663 mono_bblock_insert_after_ins (bb, ins, tmp);
11669 g_assert (var->opcode == OP_REGOFFSET);
11671 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11673 g_assert (load_opcode != OP_LOADV_MEMBASE);
11675 if (vreg_to_lvreg [sreg]) {
11676 g_assert (vreg_to_lvreg [sreg] != -1);
11678 /* The variable is already loaded to an lvreg */
11679 if (G_UNLIKELY (cfg->verbose_level > 2))
11680 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11681 sregs [srcindex] = vreg_to_lvreg [sreg];
11682 //mono_inst_set_src_registers (ins, sregs);
11686 /* Try to fuse the load into the instruction */
11687 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11688 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11689 sregs [0] = var->inst_basereg;
11690 //mono_inst_set_src_registers (ins, sregs);
11691 ins->inst_offset = var->inst_offset;
11692 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11693 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11694 sregs [1] = var->inst_basereg;
11695 //mono_inst_set_src_registers (ins, sregs);
11696 ins->inst_offset = var->inst_offset;
11698 if (MONO_IS_REAL_MOVE (ins)) {
11699 ins->opcode = OP_NOP;
11702 //printf ("%d ", srcindex); mono_print_ins (ins);
11704 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11706 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11707 if (var->dreg == prev_dreg) {
11709 * sreg refers to the value loaded by the load
11710 * emitted below, but we need to use ins->dreg
11711 * since it refers to the store emitted earlier.
11715 g_assert (sreg != -1);
11716 vreg_to_lvreg [var->dreg] = sreg;
11717 g_assert (lvregs_len < 1024);
11718 lvregs [lvregs_len ++] = var->dreg;
11722 sregs [srcindex] = sreg;
11723 //mono_inst_set_src_registers (ins, sregs);
11725 if (regtype == 'l') {
11726 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11727 mono_bblock_insert_before_ins (bb, ins, load_ins);
11728 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11729 mono_bblock_insert_before_ins (bb, ins, load_ins);
11730 use_ins = load_ins;
11733 #if SIZEOF_REGISTER == 4
11734 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11736 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11737 mono_bblock_insert_before_ins (bb, ins, load_ins);
11738 use_ins = load_ins;
11742 if (var->dreg < orig_next_vreg) {
11743 live_range_end [var->dreg] = use_ins;
11744 live_range_end_bb [var->dreg] = bb;
11747 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11750 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11751 tmp->inst_c1 = var->dreg;
11752 mono_bblock_insert_after_ins (bb, ins, tmp);
11756 mono_inst_set_src_registers (ins, sregs);
11758 if (dest_has_lvreg) {
11759 g_assert (ins->dreg != -1);
11760 vreg_to_lvreg [prev_dreg] = ins->dreg;
11761 g_assert (lvregs_len < 1024);
11762 lvregs [lvregs_len ++] = prev_dreg;
11763 dest_has_lvreg = FALSE;
11767 tmp_reg = ins->dreg;
11768 ins->dreg = ins->sreg2;
11769 ins->sreg2 = tmp_reg;
11772 if (MONO_IS_CALL (ins)) {
11773 /* Clear vreg_to_lvreg array */
11774 for (i = 0; i < lvregs_len; i++)
11775 vreg_to_lvreg [lvregs [i]] = 0;
11777 } else if (ins->opcode == OP_NOP) {
11779 MONO_INST_NULLIFY_SREGS (ins);
11782 if (cfg->verbose_level > 2)
11783 mono_print_ins_index (1, ins);
11786 /* Extend the live range based on the liveness info */
11787 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11788 for (i = 0; i < cfg->num_varinfo; i ++) {
11789 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11791 if (vreg_is_volatile (cfg, vi->vreg))
11792 /* The liveness info is incomplete */
11795 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11796 /* Live from at least the first ins of this bb */
11797 live_range_start [vi->vreg] = bb->code;
11798 live_range_start_bb [vi->vreg] = bb;
11801 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11802 /* Live at least until the last ins of this bb */
11803 live_range_end [vi->vreg] = bb->last_ins;
11804 live_range_end_bb [vi->vreg] = bb;
11810 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11812 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11813 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11815 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11816 for (i = 0; i < cfg->num_varinfo; ++i) {
11817 int vreg = MONO_VARINFO (cfg, i)->vreg;
11820 if (live_range_start [vreg]) {
11821 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11823 ins->inst_c1 = vreg;
11824 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11826 if (live_range_end [vreg]) {
11827 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11829 ins->inst_c1 = vreg;
11830 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11831 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11833 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11839 g_free (live_range_start);
11840 g_free (live_range_end);
11841 g_free (live_range_start_bb);
11842 g_free (live_range_end_bb);
11847 * - use 'iadd' instead of 'int_add'
11848 * - handling ovf opcodes: decompose in method_to_ir.
11849 * - unify iregs/fregs
11850 * -> partly done, the missing parts are:
11851 * - a more complete unification would involve unifying the hregs as well, so
11852 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11853 * would no longer map to the machine hregs, so the code generators would need to
11854 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11855 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11856 * fp/non-fp branches speeds it up by about 15%.
11857 * - use sext/zext opcodes instead of shifts
11859 * - get rid of TEMPLOADs if possible and use vregs instead
11860 * - clean up usage of OP_P/OP_ opcodes
11861 * - cleanup usage of DUMMY_USE
11862 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11864 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11865 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11866 * - make sure handle_stack_args () is called before the branch is emitted
11867 * - when the new IR is done, get rid of all unused stuff
11868 * - COMPARE/BEQ as separate instructions or unify them ?
11869 * - keeping them separate allows specialized compare instructions like
11870 * compare_imm, compare_membase
11871 * - most back ends unify fp compare+branch, fp compare+ceq
11872 * - integrate mono_save_args into inline_method
11873 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11874 * - handle long shift opts on 32 bit platforms somehow: they require
11875 * 3 sregs (2 for arg1 and 1 for arg2)
11876 * - make byref a 'normal' type.
11877 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11878 * variable if needed.
11879 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11880 * like inline_method.
11881 * - remove inlining restrictions
11882 * - fix LNEG and enable cfold of INEG
11883 * - generalize x86 optimizations like ldelema as a peephole optimization
11884 * - add store_mem_imm for amd64
11885 * - optimize the loading of the interruption flag in the managed->native wrappers
11886 * - avoid special handling of OP_NOP in passes
11887 * - move code inserting instructions into one function/macro.
11888 * - try a coalescing phase after liveness analysis
11889 * - add float -> vreg conversion + local optimizations on !x86
11890 * - figure out how to handle decomposed branches during optimizations, ie.
11891 * compare+branch, op_jump_table+op_br etc.
11892 * - promote RuntimeXHandles to vregs
11893 * - vtype cleanups:
11894 * - add a NEW_VARLOADA_VREG macro
11895 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11896 * accessing vtype fields.
11897 * - get rid of I8CONST on 64 bit platforms
11898 * - dealing with the increase in code size due to branches created during opcode
11900 * - use extended basic blocks
11901 * - all parts of the JIT
11902 * - handle_global_vregs () && local regalloc
11903 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11904 * - sources of increase in code size:
11907 * - isinst and castclass
11908 * - lvregs not allocated to global registers even if used multiple times
11909 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11911 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11912 * - add all micro optimizations from the old JIT
11913 * - put tree optimizations into the deadce pass
11914 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11915 * specific function.
11916 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11917 * fcompare + branchCC.
11918 * - create a helper function for allocating a stack slot, taking into account
11919 * MONO_CFG_HAS_SPILLUP.
11921 * - merge the ia64 switch changes.
11922 * - optimize mono_regstate2_alloc_int/float.
11923 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11924 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11925 * parts of the tree could be separated by other instructions, killing the tree
11926 * arguments, or stores killing loads etc. Also, should we fold loads into other
11927 * instructions if the result of the load is used multiple times ?
11928 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11929 * - LAST MERGE: 108395.
11930 * - when returning vtypes in registers, generate IR and append it to the end of the
11931 * last bb instead of doing it in the epilog.
11932 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11940 - When to decompose opcodes:
11941 - earlier: this makes some optimizations hard to implement, since the low level IR
11942 no longer contains the neccessary information. But it is easier to do.
11943 - later: harder to implement, enables more optimizations.
11944 - Branches inside bblocks:
11945 - created when decomposing complex opcodes.
11946 - branches to another bblock: harmless, but not tracked by the branch
11947 optimizations, so need to branch to a label at the start of the bblock.
11948 - branches to inside the same bblock: very problematic, trips up the local
11949 reg allocator. Can be fixed by spitting the current bblock, but that is a
11950 complex operation, since some local vregs can become global vregs etc.
11951 - Local/global vregs:
11952 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11953 local register allocator.
11954 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11955 structure, created by mono_create_var (). Assigned to hregs or the stack by
11956 the global register allocator.
11957 - When to do optimizations like alu->alu_imm:
11958 - earlier -> saves work later on since the IR will be smaller/simpler
11959 - later -> can work on more instructions
11960 - Handling of valuetypes:
11961 - When a vtype is pushed on the stack, a new temporary is created, an
11962 instruction computing its address (LDADDR) is emitted and pushed on
11963 the stack. Need to optimize cases when the vtype is used immediately as in
11964 argument passing, stloc etc.
11965 - Instead of the to_end stuff in the old JIT, simply call the function handling
11966 the values on the stack before emitting the last instruction of the bb.
11969 #endif /* DISABLE_JIT */