2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
241 return alloc_ireg (cfg);
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
251 switch (type->type) {
254 case MONO_TYPE_BOOLEAN:
266 case MONO_TYPE_FNPTR:
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
276 #if SIZEOF_REGISTER == 8
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
293 case MONO_TYPE_TYPEDBYREF:
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
300 g_assert (cfg->generic_sharing_context);
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
408 #define ADD_BINCOND(next_block) do { \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
453 if (from->cil_code) {
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
462 printf ("edge from entry to exit\n");
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
618 inst->klass = klass = mono_class_from_mono_type (type);
620 inst->type = STACK_MP;
625 switch (type->type) {
627 inst->type = STACK_INV;
631 case MONO_TYPE_BOOLEAN:
637 inst->type = STACK_I4;
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
654 inst->type = STACK_I8;
658 inst->type = STACK_R8;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
666 inst->type = STACK_VTYPE;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
840 ins->opcode = OP_ICOMPARE;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
857 ins->opcode += beqops_op_map [src1->type];
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
890 ins->type = STACK_R8;
891 switch (src1->type) {
894 ins->opcode = OP_ICONV_TO_R_UN;
897 ins->opcode = OP_LCONV_TO_R_UN;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 switch (src1->type) {
928 ins->opcode = OP_ICONV_TO_U;
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
935 ins->opcode = OP_MOVE;
939 ins->opcode = OP_LCONV_TO_U;
942 ins->opcode = OP_FCONV_TO_U;
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
967 ins->type = STACK_R8;
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1026 param_table [STACK_MAX] [STACK_MAX] = {
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1035 switch (args->type) {
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1050 if (!sig->params [i]->byref)
1054 if (sig->params [i]->byref)
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1068 if (sig->params [i]->byref)
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1156 case MONO_TYPE_BOOLEAN:
1159 case MONO_TYPE_CHAR:
1166 case MONO_TYPE_FNPTR:
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1190 g_assert_not_reached ();
1197 array_access_to_klass (int opcode)
1201 return mono_defaults.byte_class;
1203 return mono_defaults.uint16_class;
1206 return mono_defaults.int_class;
1209 return mono_defaults.sbyte_class;
1212 return mono_defaults.int16_class;
1215 return mono_defaults.int32_class;
1217 return mono_defaults.uint32_class;
1220 return mono_defaults.int64_class;
1223 return mono_defaults.single_class;
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1231 g_assert_not_reached ();
1237 * We try to share variables when possible
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1322 bb->out_stack = outb->in_stack;
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1385 /* Find a bblock which has a different in_stack */
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1700 g_assert (val == 0);
1705 if ((size <= 4) && (size <= align)) {
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1716 #if SIZEOF_REGISTER == 8
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1732 /* This could be optimized further if neccesary */
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1785 /* This could be optimized further if neccesary */
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1848 case MONO_TYPE_BOOLEAN:
1851 case MONO_TYPE_CHAR:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1898 * Returns: non-0 value if arg can't be stored on a target.
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1921 case MONO_TYPE_BOOLEAN:
1924 case MONO_TYPE_CHAR:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1937 case MONO_TYPE_FNPTR:
1938 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 case MONO_TYPE_CLASS:
1942 case MONO_TYPE_STRING:
1943 case MONO_TYPE_OBJECT:
1944 case MONO_TYPE_SZARRAY:
1945 case MONO_TYPE_ARRAY:
1946 if (arg->type != STACK_OBJ)
1948 /* FIXME: check type compatibility */
1952 if (arg->type != STACK_I8)
1957 if (arg->type != STACK_R8)
1960 case MONO_TYPE_VALUETYPE:
1961 if (arg->type != STACK_VTYPE)
1963 klass = mono_class_from_mono_type (simple_type);
1964 if (klass != arg->klass)
1967 case MONO_TYPE_TYPEDBYREF:
1968 if (arg->type != STACK_VTYPE)
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1974 case MONO_TYPE_GENERICINST:
1975 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1976 if (arg->type != STACK_VTYPE)
1978 klass = mono_class_from_mono_type (simple_type);
1979 if (klass != arg->klass)
1983 if (arg->type != STACK_OBJ)
1985 /* FIXME: check type compatibility */
1989 case MONO_TYPE_MVAR:
1990 /* FIXME: all the arguments must be references for now,
1991 * later look inside cfg and see if the arg num is
1992 * really a reference
1994 g_assert (cfg->generic_sharing_context);
1995 if (arg->type != STACK_OBJ)
1999 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2005 * Prepare arguments for passing to a function call.
2006 * Return a non-zero value if the arguments can't be passed to the given
2008 * The type checks are not yet complete and some conversions may need
2009 * casts on 32 or 64 bit architectures.
2011 * FIXME: implement this using target_type_is_incompatible ()
2014 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2016 MonoType *simple_type;
2020 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2024 for (i = 0; i < sig->param_count; ++i) {
2025 if (sig->params [i]->byref) {
2026 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2030 simple_type = sig->params [i];
2031 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2033 switch (simple_type->type) {
2034 case MONO_TYPE_VOID:
2039 case MONO_TYPE_BOOLEAN:
2042 case MONO_TYPE_CHAR:
2045 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2051 case MONO_TYPE_FNPTR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (args [i]->type != STACK_OBJ)
2065 if (args [i]->type != STACK_I8)
2070 if (args [i]->type != STACK_R8)
2073 case MONO_TYPE_VALUETYPE:
2074 if (simple_type->data.klass->enumtype) {
2075 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2078 if (args [i]->type != STACK_VTYPE)
2081 case MONO_TYPE_TYPEDBYREF:
2082 if (args [i]->type != STACK_VTYPE)
2085 case MONO_TYPE_GENERICINST:
2086 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2090 g_error ("unknown type 0x%02x in check_call_signature",
2098 callvirt_to_call (int opcode)
2103 case OP_VOIDCALLVIRT:
2112 g_assert_not_reached ();
2119 callvirt_to_call_membase (int opcode)
2123 return OP_CALL_MEMBASE;
2124 case OP_VOIDCALLVIRT:
2125 return OP_VOIDCALL_MEMBASE;
2127 return OP_FCALL_MEMBASE;
2129 return OP_LCALL_MEMBASE;
2131 return OP_VCALL_MEMBASE;
2133 g_assert_not_reached ();
2139 #ifdef MONO_ARCH_HAVE_IMT
2141 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2145 if (COMPILE_LLVM (cfg)) {
2146 method_reg = alloc_preg (cfg);
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2161 call->imt_arg_reg = method_reg;
2163 #ifdef MONO_ARCH_IMT_REG
2164 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2166 /* Need this to keep the IMT arg alive */
2167 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2172 #ifdef MONO_ARCH_IMT_REG
2173 method_reg = alloc_preg (cfg);
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2177 } else if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2181 MONO_INST_NEW (cfg, ins, OP_PCONST);
2182 ins->inst_p0 = call->method;
2183 ins->dreg = method_reg;
2184 MONO_ADD_INS (cfg->cbb, ins);
2187 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2189 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2194 static MonoJumpInfo *
2195 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2197 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2201 ji->data.target = target;
2206 inline static MonoCallInst *
2207 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2208 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2211 #ifdef MONO_ARCH_SOFT_FLOAT
2216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2218 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2221 call->signature = sig;
2222 call->rgctx_reg = rgctx;
2224 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 call->vret_var = cfg->vret_addr;
2229 //g_assert_not_reached ();
2231 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2235 temp->backend.is_pinvoke = sig->pinvoke;
2238 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2239 * address of return value to increase optimization opportunities.
2240 * Before vtype decomposition, the dreg of the call ins itself represents the
2241 * fact the call modifies the return value. After decomposition, the call will
2242 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2243 * will be transformed into an LDADDR.
2245 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2246 loada->dreg = alloc_preg (cfg);
2247 loada->inst_p0 = temp;
2248 /* We reference the call too since call->dreg could change during optimization */
2249 loada->inst_p1 = call;
2250 MONO_ADD_INS (cfg->cbb, loada);
2252 call->inst.dreg = temp->dreg;
2254 call->vret_var = loada;
2255 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2256 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2258 #ifdef MONO_ARCH_SOFT_FLOAT
2259 if (COMPILE_SOFT_FLOAT (cfg)) {
2261 * If the call has a float argument, we would need to do an r8->r4 conversion using
2262 * an icall, but that cannot be done during the call sequence since it would clobber
2263 * the call registers + the stack. So we do it before emitting the call.
2265 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2267 MonoInst *in = call->args [i];
2269 if (i >= sig->hasthis)
2270 t = sig->params [i - sig->hasthis];
2272 t = &mono_defaults.int_class->byval_arg;
2273 t = mono_type_get_underlying_type (t);
2275 if (!t->byref && t->type == MONO_TYPE_R4) {
2276 MonoInst *iargs [1];
2280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2282 /* The result will be in an int vreg */
2283 call->args [i] = conv;
2290 if (COMPILE_LLVM (cfg))
2291 mono_llvm_emit_call (cfg, call);
2293 mono_arch_emit_call (cfg, call);
2295 mono_arch_emit_call (cfg, call);
2298 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2299 cfg->flags |= MONO_CFG_HAS_CALLS;
2305 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2307 #ifdef MONO_ARCH_RGCTX_REG
2308 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2309 cfg->uses_rgctx_reg = TRUE;
2310 call->rgctx_reg = TRUE;
2312 call->rgctx_arg_reg = rgctx_reg;
2319 inline static MonoInst*
2320 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2326 rgctx_reg = mono_alloc_preg (cfg);
2327 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2333 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2335 call->inst.sreg1 = addr->dreg;
2337 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2340 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2342 return (MonoInst*)call;
2346 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2348 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2351 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2352 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2354 gboolean might_be_remote;
2355 gboolean virtual = this != NULL;
2356 gboolean enable_for_aot = TRUE;
2362 rgctx_reg = mono_alloc_preg (cfg);
2363 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2366 if (method->string_ctor) {
2367 /* Create the real signature */
2368 /* FIXME: Cache these */
2369 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2370 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2375 might_be_remote = this && sig->hasthis &&
2376 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2377 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2379 context_used = mono_method_check_context_used (method);
2380 if (might_be_remote && context_used) {
2383 g_assert (cfg->generic_sharing_context);
2385 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2387 return mono_emit_calli (cfg, sig, args, addr, NULL);
2390 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2392 if (might_be_remote)
2393 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2395 call->method = method;
2396 call->inst.flags |= MONO_INST_HAS_METHOD;
2397 call->inst.inst_left = this;
2400 int vtable_reg, slot_reg, this_reg;
2402 this_reg = this->dreg;
2404 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2405 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2406 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2408 /* Make a call to delegate->invoke_impl */
2409 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2410 call->inst.inst_basereg = this_reg;
2411 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2412 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2414 return (MonoInst*)call;
2418 if ((!cfg->compile_aot || enable_for_aot) &&
2419 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2420 (MONO_METHOD_IS_FINAL (method) &&
2421 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2422 !(method->klass->marshalbyref && context_used)) {
2424 * the method is not virtual, we just need to ensure this is not null
2425 * and then we can call the method directly.
2427 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2429 * The check above ensures method is not gshared, this is needed since
2430 * gshared methods can't have wrappers.
2432 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2435 if (!method->string_ctor)
2436 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2438 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2440 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2442 return (MonoInst*)call;
2445 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2447 * the method is virtual, but we can statically dispatch since either
2448 * it's class or the method itself are sealed.
2449 * But first we need to ensure it's not a null reference.
2451 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2453 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2454 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2456 return (MonoInst*)call;
2459 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2461 vtable_reg = alloc_preg (cfg);
2462 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2463 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2465 #ifdef MONO_ARCH_HAVE_IMT
2467 guint32 imt_slot = mono_method_get_imt_slot (method);
2468 emit_imt_argument (cfg, call, imt_arg);
2469 slot_reg = vtable_reg;
2470 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2473 if (slot_reg == -1) {
2474 slot_reg = alloc_preg (cfg);
2475 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2476 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2479 slot_reg = vtable_reg;
2480 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2481 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2482 #ifdef MONO_ARCH_HAVE_IMT
2484 g_assert (mono_method_signature (method)->generic_param_count);
2485 emit_imt_argument (cfg, call, imt_arg);
2490 call->inst.sreg1 = slot_reg;
2491 call->virtual = TRUE;
2494 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2497 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2499 return (MonoInst*)call;
2503 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2505 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2509 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2516 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2519 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2521 return (MonoInst*)call;
2525 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2527 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2531 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2535 * mono_emit_abs_call:
2537 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2539 inline static MonoInst*
2540 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2541 MonoMethodSignature *sig, MonoInst **args)
2543 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2547 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2550 if (cfg->abs_patches == NULL)
2551 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2552 g_hash_table_insert (cfg->abs_patches, ji, ji);
2553 ins = mono_emit_native_call (cfg, ji, sig, args);
2554 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2559 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2561 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2562 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2566 * Native code might return non register sized integers
2567 * without initializing the upper bits.
2569 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2570 case OP_LOADI1_MEMBASE:
2571 widen_op = OP_ICONV_TO_I1;
2573 case OP_LOADU1_MEMBASE:
2574 widen_op = OP_ICONV_TO_U1;
2576 case OP_LOADI2_MEMBASE:
2577 widen_op = OP_ICONV_TO_I2;
2579 case OP_LOADU2_MEMBASE:
2580 widen_op = OP_ICONV_TO_U2;
2586 if (widen_op != -1) {
2587 int dreg = alloc_preg (cfg);
2590 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2591 widen->type = ins->type;
2601 get_memcpy_method (void)
2603 static MonoMethod *memcpy_method = NULL;
2604 if (!memcpy_method) {
2605 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2607 g_error ("Old corlib found. Install a new one");
2609 return memcpy_method;
2613 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2615 MonoClassField *field;
2616 gpointer iter = NULL;
2618 while ((field = mono_class_get_fields (klass, &iter))) {
2621 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2623 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2624 if (mono_type_is_reference (field->type)) {
2625 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2626 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2628 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2629 MonoClass *field_class = mono_class_from_mono_type (field->type);
2630 if (field_class->has_references)
2631 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2637 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2639 int card_table_shift_bits;
2640 gpointer card_table_mask;
2642 MonoInst *dummy_use;
2643 int nursery_shift_bits;
2644 size_t nursery_size;
2645 gboolean has_card_table_wb = FALSE;
2647 if (!cfg->gen_write_barriers)
2650 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2652 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2654 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2655 has_card_table_wb = TRUE;
2658 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2661 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2662 wbarrier->sreg1 = ptr->dreg;
2664 wbarrier->sreg2 = value->dreg;
2666 wbarrier->sreg2 = value_reg;
2667 MONO_ADD_INS (cfg->cbb, wbarrier);
2668 } else if (card_table) {
2669 int offset_reg = alloc_preg (cfg);
2670 int card_reg = alloc_preg (cfg);
2673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2674 if (card_table_mask)
2675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2677 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2678 * IMM's larger than 32bits.
2680 if (cfg->compile_aot) {
2681 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2683 MONO_INST_NEW (cfg, ins, OP_PCONST);
2684 ins->inst_p0 = card_table;
2685 ins->dreg = card_reg;
2686 MONO_ADD_INS (cfg->cbb, ins);
2689 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2690 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2692 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2693 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2697 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2699 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2700 dummy_use->sreg1 = value_reg;
2701 MONO_ADD_INS (cfg->cbb, dummy_use);
2706 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2708 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2709 unsigned need_wb = 0;
2714 /*types with references can't have alignment smaller than sizeof(void*) */
2715 if (align < SIZEOF_VOID_P)
2718 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2719 if (size > 32 * SIZEOF_VOID_P)
2722 create_write_barrier_bitmap (klass, &need_wb, 0);
2724 /* We don't unroll more than 5 stores to avoid code bloat. */
2725 if (size > 5 * SIZEOF_VOID_P) {
2726 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2727 size += (SIZEOF_VOID_P - 1);
2728 size &= ~(SIZEOF_VOID_P - 1);
2730 EMIT_NEW_ICONST (cfg, iargs [2], size);
2731 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2732 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2736 destreg = iargs [0]->dreg;
2737 srcreg = iargs [1]->dreg;
2740 dest_ptr_reg = alloc_preg (cfg);
2741 tmp_reg = alloc_preg (cfg);
2744 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2746 while (size >= SIZEOF_VOID_P) {
2747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2751 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2753 offset += SIZEOF_VOID_P;
2754 size -= SIZEOF_VOID_P;
2757 /*tmp += sizeof (void*)*/
2758 if (size >= SIZEOF_VOID_P) {
2759 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2760 MONO_ADD_INS (cfg->cbb, iargs [0]);
2764 /* Those cannot be references since size < sizeof (void*) */
2766 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2780 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2781 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2790 * Emit code to copy a valuetype of type @klass whose address is stored in
2791 * @src->dreg to memory whose address is stored at @dest->dreg.
2794 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2796 MonoInst *iargs [4];
2799 MonoMethod *memcpy_method;
2803 * This check breaks with spilled vars... need to handle it during verification anyway.
2804 * g_assert (klass && klass == src->klass && klass == dest->klass);
2808 n = mono_class_native_size (klass, &align);
2810 n = mono_class_value_size (klass, &align);
2812 /* if native is true there should be no references in the struct */
2813 if (cfg->gen_write_barriers && klass->has_references && !native) {
2814 /* Avoid barriers when storing to the stack */
2815 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2816 (dest->opcode == OP_LDADDR))) {
2817 int context_used = 0;
2822 if (cfg->generic_sharing_context)
2823 context_used = mono_class_check_context_used (klass);
2825 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2826 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2828 } else if (context_used) {
2829 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2831 if (cfg->compile_aot) {
2832 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2834 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2835 mono_class_compute_gc_descriptor (klass);
2839 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2844 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2845 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2846 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2850 EMIT_NEW_ICONST (cfg, iargs [2], n);
2852 memcpy_method = get_memcpy_method ();
2853 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2858 get_memset_method (void)
2860 static MonoMethod *memset_method = NULL;
2861 if (!memset_method) {
2862 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2864 g_error ("Old corlib found. Install a new one");
2866 return memset_method;
2870 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2872 MonoInst *iargs [3];
2875 MonoMethod *memset_method;
2877 /* FIXME: Optimize this for the case when dest is an LDADDR */
2879 mono_class_init (klass);
2880 n = mono_class_value_size (klass, &align);
2882 if (n <= sizeof (gpointer) * 5) {
2883 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2886 memset_method = get_memset_method ();
2888 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2889 EMIT_NEW_ICONST (cfg, iargs [2], n);
2890 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2895 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2897 MonoInst *this = NULL;
2899 g_assert (cfg->generic_sharing_context);
2901 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2902 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2903 !method->klass->valuetype)
2904 EMIT_NEW_ARGLOAD (cfg, this, 0);
2906 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2907 MonoInst *mrgctx_loc, *mrgctx_var;
2910 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2912 mrgctx_loc = mono_get_vtable_var (cfg);
2913 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2916 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2917 MonoInst *vtable_loc, *vtable_var;
2921 vtable_loc = mono_get_vtable_var (cfg);
2922 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2924 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2925 MonoInst *mrgctx_var = vtable_var;
2928 vtable_reg = alloc_preg (cfg);
2929 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2930 vtable_var->type = STACK_PTR;
2938 vtable_reg = alloc_preg (cfg);
2939 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2944 static MonoJumpInfoRgctxEntry *
2945 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2947 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2948 res->method = method;
2949 res->in_mrgctx = in_mrgctx;
2950 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2951 res->data->type = patch_type;
2952 res->data->data.target = patch_data;
2953 res->info_type = info_type;
2958 static inline MonoInst*
2959 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2961 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2965 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2966 MonoClass *klass, int rgctx_type)
2968 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2969 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2971 return emit_rgctx_fetch (cfg, rgctx, entry);
2975 * emit_get_rgctx_method:
2977 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2978 * normal constants, else emit a load from the rgctx.
2981 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2982 MonoMethod *cmethod, int rgctx_type)
2984 if (!context_used) {
2987 switch (rgctx_type) {
2988 case MONO_RGCTX_INFO_METHOD:
2989 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2991 case MONO_RGCTX_INFO_METHOD_RGCTX:
2992 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2995 g_assert_not_reached ();
2998 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2999 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3001 return emit_rgctx_fetch (cfg, rgctx, entry);
3006 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3007 MonoClassField *field, int rgctx_type)
3009 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3010 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3012 return emit_rgctx_fetch (cfg, rgctx, entry);
3016 * On return the caller must check @klass for load errors.
3019 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3021 MonoInst *vtable_arg;
3023 int context_used = 0;
3025 if (cfg->generic_sharing_context)
3026 context_used = mono_class_check_context_used (klass);
3029 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3030 klass, MONO_RGCTX_INFO_VTABLE);
3032 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3036 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3039 if (COMPILE_LLVM (cfg))
3040 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3042 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3043 #ifdef MONO_ARCH_VTABLE_REG
3044 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3045 cfg->uses_vtable_reg = TRUE;
3052 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3054 if (mini_get_debug_options ()->better_cast_details) {
3055 int to_klass_reg = alloc_preg (cfg);
3056 int vtable_reg = alloc_preg (cfg);
3057 int klass_reg = alloc_preg (cfg);
3058 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3061 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3065 MONO_ADD_INS (cfg->cbb, tls_get);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3070 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3071 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3076 reset_cast_details (MonoCompile *cfg)
3078 /* Reset the variables holding the cast details */
3079 if (mini_get_debug_options ()->better_cast_details) {
3080 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3082 MONO_ADD_INS (cfg->cbb, tls_get);
3083 /* It is enough to reset the from field */
3084 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3089 * On return the caller must check @array_class for load errors
3092 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3094 int vtable_reg = alloc_preg (cfg);
3095 int context_used = 0;
3097 if (cfg->generic_sharing_context)
3098 context_used = mono_class_check_context_used (array_class);
3100 save_cast_details (cfg, array_class, obj->dreg);
3102 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3104 if (cfg->opt & MONO_OPT_SHARED) {
3105 int class_reg = alloc_preg (cfg);
3106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3107 if (cfg->compile_aot) {
3108 int klass_reg = alloc_preg (cfg);
3109 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3110 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3114 } else if (context_used) {
3115 MonoInst *vtable_ins;
3117 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3118 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3120 if (cfg->compile_aot) {
3124 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3126 vt_reg = alloc_preg (cfg);
3127 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3128 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3131 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3137 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3139 reset_cast_details (cfg);
3143 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3144 * generic code is generated.
3147 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3149 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3152 MonoInst *rgctx, *addr;
3154 /* FIXME: What if the class is shared? We might not
3155 have to get the address of the method from the
3157 addr = emit_get_rgctx_method (cfg, context_used, method,
3158 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3160 rgctx = emit_get_rgctx (cfg, method, context_used);
3162 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3164 return mono_emit_method_call (cfg, method, &val, NULL);
3169 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3173 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3174 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3175 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3176 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3178 obj_reg = sp [0]->dreg;
3179 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3180 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3182 /* FIXME: generics */
3183 g_assert (klass->rank == 0);
3186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3187 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3189 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3190 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3193 MonoInst *element_class;
3195 /* This assertion is from the unboxcast insn */
3196 g_assert (klass->rank == 0);
3198 element_class = emit_get_rgctx_klass (cfg, context_used,
3199 klass->element_class, MONO_RGCTX_INFO_KLASS);
3201 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3202 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3204 save_cast_details (cfg, klass->element_class, obj_reg);
3205 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3206 reset_cast_details (cfg);
3209 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3210 MONO_ADD_INS (cfg->cbb, add);
3211 add->type = STACK_MP;
3218 * Returns NULL and set the cfg exception on error.
3221 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3223 MonoInst *iargs [2];
3229 MonoInst *iargs [2];
3232 FIXME: we cannot get managed_alloc here because we can't get
3233 the class's vtable (because it's not a closed class)
3235 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3236 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3239 if (cfg->opt & MONO_OPT_SHARED)
3240 rgctx_info = MONO_RGCTX_INFO_KLASS;
3242 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3243 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3245 if (cfg->opt & MONO_OPT_SHARED) {
3246 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3248 alloc_ftn = mono_object_new;
3251 alloc_ftn = mono_object_new_specific;
3254 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3257 if (cfg->opt & MONO_OPT_SHARED) {
3258 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3259 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3261 alloc_ftn = mono_object_new;
3262 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3263 /* This happens often in argument checking code, eg. throw new FooException... */
3264 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3265 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3266 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3268 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3269 MonoMethod *managed_alloc = NULL;
3273 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3274 cfg->exception_ptr = klass;
3278 #ifndef MONO_CROSS_COMPILE
3279 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3282 if (managed_alloc) {
3283 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3284 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3286 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3288 guint32 lw = vtable->klass->instance_size;
3289 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3290 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3291 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3294 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3298 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3302 * Returns NULL and set the cfg exception on error.
3305 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3307 MonoInst *alloc, *ins;
3309 if (mono_class_is_nullable (klass)) {
3310 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3313 /* FIXME: What if the class is shared? We might not
3314 have to get the method address from the RGCTX. */
3315 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3316 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3317 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3319 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3321 return mono_emit_method_call (cfg, method, &val, NULL);
3325 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3329 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3336 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3339 MonoGenericContainer *container;
3340 MonoGenericInst *ginst;
3342 if (klass->generic_class) {
3343 container = klass->generic_class->container_class->generic_container;
3344 ginst = klass->generic_class->context.class_inst;
3345 } else if (klass->generic_container && context_used) {
3346 container = klass->generic_container;
3347 ginst = container->context.class_inst;
3352 for (i = 0; i < container->type_argc; ++i) {
3354 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3356 type = ginst->type_argv [i];
3357 if (MONO_TYPE_IS_REFERENCE (type))
3360 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3366 // FIXME: This doesn't work yet (class libs tests fail?)
3367 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3370 * Returns NULL and set the cfg exception on error.
3373 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3375 MonoBasicBlock *is_null_bb;
3376 int obj_reg = src->dreg;
3377 int vtable_reg = alloc_preg (cfg);
3378 MonoInst *klass_inst = NULL;
3383 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3384 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3385 MonoInst *cache_ins;
3387 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3392 /* klass - it's the second element of the cache entry*/
3393 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3396 args [2] = cache_ins;
3398 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3401 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3403 if (is_complex_isinst (klass)) {
3404 /* Complex case, handle by an icall */
3410 args [1] = klass_inst;
3412 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3414 /* Simple case, handled by the code below */
3418 NEW_BBLOCK (cfg, is_null_bb);
3420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3421 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3423 save_cast_details (cfg, klass, obj_reg);
3425 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3429 int klass_reg = alloc_preg (cfg);
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3433 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3434 /* the remoting code is broken, access the class for now */
3435 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3436 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3438 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3439 cfg->exception_ptr = klass;
3442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3447 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3450 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3454 MONO_START_BB (cfg, is_null_bb);
3456 reset_cast_details (cfg);
3462 * Returns NULL and set the cfg exception on error.
3465 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3468 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3469 int obj_reg = src->dreg;
3470 int vtable_reg = alloc_preg (cfg);
3471 int res_reg = alloc_ireg_ref (cfg);
3472 MonoInst *klass_inst = NULL;
3477 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3478 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3479 MonoInst *cache_ins;
3481 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3486 /* klass - it's the second element of the cache entry*/
3487 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3490 args [2] = cache_ins;
3492 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3495 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3497 if (is_complex_isinst (klass)) {
3498 /* Complex case, handle by an icall */
3504 args [1] = klass_inst;
3506 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3508 /* Simple case, the code below can handle it */
3512 NEW_BBLOCK (cfg, is_null_bb);
3513 NEW_BBLOCK (cfg, false_bb);
3514 NEW_BBLOCK (cfg, end_bb);
3516 /* Do the assignment at the beginning, so the other assignment can be if converted */
3517 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3518 ins->type = STACK_OBJ;
3521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3522 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3526 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3527 g_assert (!context_used);
3528 /* the is_null_bb target simply copies the input register to the output */
3529 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3531 int klass_reg = alloc_preg (cfg);
3534 int rank_reg = alloc_preg (cfg);
3535 int eclass_reg = alloc_preg (cfg);
3537 g_assert (!context_used);
3538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3541 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3543 if (klass->cast_class == mono_defaults.object_class) {
3544 int parent_reg = alloc_preg (cfg);
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3546 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3547 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3549 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3550 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3551 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3553 } else if (klass->cast_class == mono_defaults.enum_class) {
3554 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3556 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3557 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3559 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3560 /* Check that the object is a vector too */
3561 int bounds_reg = alloc_preg (cfg);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3567 /* the is_null_bb target simply copies the input register to the output */
3568 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3570 } else if (mono_class_is_nullable (klass)) {
3571 g_assert (!context_used);
3572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3573 /* the is_null_bb target simply copies the input register to the output */
3574 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3576 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3577 g_assert (!context_used);
3578 /* the remoting code is broken, access the class for now */
3579 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3580 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3582 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3583 cfg->exception_ptr = klass;
3586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3595 /* the is_null_bb target simply copies the input register to the output */
3596 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3601 MONO_START_BB (cfg, false_bb);
3603 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3606 MONO_START_BB (cfg, is_null_bb);
3608 MONO_START_BB (cfg, end_bb);
3614 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3616 /* This opcode takes as input an object reference and a class, and returns:
3617 0) if the object is an instance of the class,
3618 1) if the object is not instance of the class,
3619 2) if the object is a proxy whose type cannot be determined */
3622 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3623 int obj_reg = src->dreg;
3624 int dreg = alloc_ireg (cfg);
3626 int klass_reg = alloc_preg (cfg);
3628 NEW_BBLOCK (cfg, true_bb);
3629 NEW_BBLOCK (cfg, false_bb);
3630 NEW_BBLOCK (cfg, false2_bb);
3631 NEW_BBLOCK (cfg, end_bb);
3632 NEW_BBLOCK (cfg, no_proxy_bb);
3634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3637 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3638 NEW_BBLOCK (cfg, interface_fail_bb);
3640 tmp_reg = alloc_preg (cfg);
3641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3642 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3643 MONO_START_BB (cfg, interface_fail_bb);
3644 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3646 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3648 tmp_reg = alloc_preg (cfg);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3653 tmp_reg = alloc_preg (cfg);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3657 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3658 tmp_reg = alloc_preg (cfg);
3659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3662 tmp_reg = alloc_preg (cfg);
3663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3667 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3670 MONO_START_BB (cfg, no_proxy_bb);
3672 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3675 MONO_START_BB (cfg, false_bb);
3677 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3680 MONO_START_BB (cfg, false2_bb);
3682 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3685 MONO_START_BB (cfg, true_bb);
3687 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3689 MONO_START_BB (cfg, end_bb);
3692 MONO_INST_NEW (cfg, ins, OP_ICONST);
3694 ins->type = STACK_I4;
3700 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3702 /* This opcode takes as input an object reference and a class, and returns:
3703 0) if the object is an instance of the class,
3704 1) if the object is a proxy whose type cannot be determined
3705 an InvalidCastException exception is thrown otherwhise*/
3708 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3709 int obj_reg = src->dreg;
3710 int dreg = alloc_ireg (cfg);
3711 int tmp_reg = alloc_preg (cfg);
3712 int klass_reg = alloc_preg (cfg);
3714 NEW_BBLOCK (cfg, end_bb);
3715 NEW_BBLOCK (cfg, ok_result_bb);
3717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3720 save_cast_details (cfg, klass, obj_reg);
3722 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3723 NEW_BBLOCK (cfg, interface_fail_bb);
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3726 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3727 MONO_START_BB (cfg, interface_fail_bb);
3728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3730 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3732 tmp_reg = alloc_preg (cfg);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3735 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3737 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3738 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3741 NEW_BBLOCK (cfg, no_proxy_bb);
3743 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3744 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3745 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3747 tmp_reg = alloc_preg (cfg);
3748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3751 tmp_reg = alloc_preg (cfg);
3752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3756 NEW_BBLOCK (cfg, fail_1_bb);
3758 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3760 MONO_START_BB (cfg, fail_1_bb);
3762 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3765 MONO_START_BB (cfg, no_proxy_bb);
3767 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3770 MONO_START_BB (cfg, ok_result_bb);
3772 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3774 MONO_START_BB (cfg, end_bb);
3777 MONO_INST_NEW (cfg, ins, OP_ICONST);
3779 ins->type = STACK_I4;
3785 * Returns NULL and set the cfg exception on error.
3787 static G_GNUC_UNUSED MonoInst*
3788 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3792 gpointer *trampoline;
3793 MonoInst *obj, *method_ins, *tramp_ins;
3797 obj = handle_alloc (cfg, klass, FALSE, 0);
3801 /* Inline the contents of mono_delegate_ctor */
3803 /* Set target field */
3804 /* Optimize away setting of NULL target */
3805 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3807 if (cfg->gen_write_barriers) {
3808 dreg = alloc_preg (cfg);
3809 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3810 emit_write_barrier (cfg, ptr, target, 0);
3814 /* Set method field */
3815 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3817 if (cfg->gen_write_barriers) {
3818 dreg = alloc_preg (cfg);
3819 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3820 emit_write_barrier (cfg, ptr, method_ins, 0);
3823 * To avoid looking up the compiled code belonging to the target method
3824 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3825 * store it, and we fill it after the method has been compiled.
3827 if (!cfg->compile_aot && !method->dynamic) {
3828 MonoInst *code_slot_ins;
3831 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3833 domain = mono_domain_get ();
3834 mono_domain_lock (domain);
3835 if (!domain_jit_info (domain)->method_code_hash)
3836 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3837 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3839 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3840 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3842 mono_domain_unlock (domain);
3844 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3849 /* Set invoke_impl field */
3850 if (cfg->compile_aot) {
3851 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3853 trampoline = mono_create_delegate_trampoline (klass);
3854 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3858 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3864 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3866 MonoJitICallInfo *info;
3868 /* Need to register the icall so it gets an icall wrapper */
3869 info = mono_get_array_new_va_icall (rank);
3871 cfg->flags |= MONO_CFG_HAS_VARARGS;
3873 /* mono_array_new_va () needs a vararg calling convention */
3874 cfg->disable_llvm = TRUE;
3876 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3877 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3881 mono_emit_load_got_addr (MonoCompile *cfg)
3883 MonoInst *getaddr, *dummy_use;
3885 if (!cfg->got_var || cfg->got_var_allocated)
3888 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3889 getaddr->dreg = cfg->got_var->dreg;
3891 /* Add it to the start of the first bblock */
3892 if (cfg->bb_entry->code) {
3893 getaddr->next = cfg->bb_entry->code;
3894 cfg->bb_entry->code = getaddr;
3897 MONO_ADD_INS (cfg->bb_entry, getaddr);
3899 cfg->got_var_allocated = TRUE;
3902 * Add a dummy use to keep the got_var alive, since real uses might
3903 * only be generated by the back ends.
3904 * Add it to end_bblock, so the variable's lifetime covers the whole
3906 * It would be better to make the usage of the got var explicit in all
3907 * cases when the backend needs it (i.e. calls, throw etc.), so this
3908 * wouldn't be needed.
3910 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3911 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3914 static int inline_limit;
3915 static gboolean inline_limit_inited;
3918 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3920 MonoMethodHeaderSummary header;
3922 #ifdef MONO_ARCH_SOFT_FLOAT
3923 MonoMethodSignature *sig = mono_method_signature (method);
3927 if (cfg->generic_sharing_context)
3930 if (cfg->inline_depth > 10)
3933 #ifdef MONO_ARCH_HAVE_LMF_OPS
3934 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3935 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3936 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3941 if (!mono_method_get_header_summary (method, &header))
3944 /*runtime, icall and pinvoke are checked by summary call*/
3945 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3946 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3947 (method->klass->marshalbyref) ||
3951 /* also consider num_locals? */
3952 /* Do the size check early to avoid creating vtables */
3953 if (!inline_limit_inited) {
3954 if (getenv ("MONO_INLINELIMIT"))
3955 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3957 inline_limit = INLINE_LENGTH_LIMIT;
3958 inline_limit_inited = TRUE;
3960 if (header.code_size >= inline_limit)
3964 * if we can initialize the class of the method right away, we do,
3965 * otherwise we don't allow inlining if the class needs initialization,
3966 * since it would mean inserting a call to mono_runtime_class_init()
3967 * inside the inlined code
3969 if (!(cfg->opt & MONO_OPT_SHARED)) {
3970 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3971 if (cfg->run_cctors && method->klass->has_cctor) {
3972 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3973 if (!method->klass->runtime_info)
3974 /* No vtable created yet */
3976 vtable = mono_class_vtable (cfg->domain, method->klass);
3979 /* This makes so that inline cannot trigger */
3980 /* .cctors: too many apps depend on them */
3981 /* running with a specific order... */
3982 if (! vtable->initialized)
3984 mono_runtime_class_init (vtable);
3986 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3987 if (!method->klass->runtime_info)
3988 /* No vtable created yet */
3990 vtable = mono_class_vtable (cfg->domain, method->klass);
3993 if (!vtable->initialized)
3998 * If we're compiling for shared code
3999 * the cctor will need to be run at aot method load time, for example,
4000 * or at the end of the compilation of the inlining method.
4002 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4007 * CAS - do not inline methods with declarative security
4008 * Note: this has to be before any possible return TRUE;
4010 if (mono_method_has_declsec (method))
4013 #ifdef MONO_ARCH_SOFT_FLOAT
4015 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4017 for (i = 0; i < sig->param_count; ++i)
4018 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4026 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4028 if (vtable->initialized && !cfg->compile_aot)
4031 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4034 if (!mono_class_needs_cctor_run (vtable->klass, method))
4037 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4038 /* The initialization is already done before the method is called */
4045 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4049 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4051 mono_class_init (klass);
4052 size = mono_class_array_element_size (klass);
4054 mult_reg = alloc_preg (cfg);
4055 array_reg = arr->dreg;
4056 index_reg = index->dreg;
4058 #if SIZEOF_REGISTER == 8
4059 /* The array reg is 64 bits but the index reg is only 32 */
4060 if (COMPILE_LLVM (cfg)) {
4062 index2_reg = index_reg;
4064 index2_reg = alloc_preg (cfg);
4065 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4068 if (index->type == STACK_I8) {
4069 index2_reg = alloc_preg (cfg);
4070 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4072 index2_reg = index_reg;
4077 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4079 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4080 if (size == 1 || size == 2 || size == 4 || size == 8) {
4081 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4083 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4084 ins->klass = mono_class_get_element_class (klass);
4085 ins->type = STACK_MP;
4091 add_reg = alloc_ireg_mp (cfg);
4093 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4094 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4095 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4096 ins->klass = mono_class_get_element_class (klass);
4097 ins->type = STACK_MP;
4098 MONO_ADD_INS (cfg->cbb, ins);
4103 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4105 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4107 int bounds_reg = alloc_preg (cfg);
4108 int add_reg = alloc_ireg_mp (cfg);
4109 int mult_reg = alloc_preg (cfg);
4110 int mult2_reg = alloc_preg (cfg);
4111 int low1_reg = alloc_preg (cfg);
4112 int low2_reg = alloc_preg (cfg);
4113 int high1_reg = alloc_preg (cfg);
4114 int high2_reg = alloc_preg (cfg);
4115 int realidx1_reg = alloc_preg (cfg);
4116 int realidx2_reg = alloc_preg (cfg);
4117 int sum_reg = alloc_preg (cfg);
4122 mono_class_init (klass);
4123 size = mono_class_array_element_size (klass);
4125 index1 = index_ins1->dreg;
4126 index2 = index_ins2->dreg;
4128 /* range checking */
4129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4130 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4133 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4134 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4135 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4136 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4137 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4138 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4141 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4144 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4145 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4146 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4148 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4149 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4150 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4151 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4152 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4154 ins->type = STACK_MP;
4156 MONO_ADD_INS (cfg->cbb, ins);
4163 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4167 MonoMethod *addr_method;
4170 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4173 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4175 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4176 /* emit_ldelema_2 depends on OP_LMUL */
4177 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4178 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4182 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4183 addr_method = mono_marshal_get_array_address (rank, element_size);
4184 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4189 static MonoBreakPolicy
4190 always_insert_breakpoint (MonoMethod *method)
4192 return MONO_BREAK_POLICY_ALWAYS;
4195 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4198 * mono_set_break_policy:
4199 * policy_callback: the new callback function
4201 * Allow embedders to decide wherther to actually obey breakpoint instructions
4202 * (both break IL instructions and Debugger.Break () method calls), for example
4203 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4204 * untrusted or semi-trusted code.
4206 * @policy_callback will be called every time a break point instruction needs to
4207 * be inserted with the method argument being the method that calls Debugger.Break()
4208 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4209 * if it wants the breakpoint to not be effective in the given method.
4210 * #MONO_BREAK_POLICY_ALWAYS is the default.
4213 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4215 if (policy_callback)
4216 break_policy_func = policy_callback;
4218 break_policy_func = always_insert_breakpoint;
4222 should_insert_brekpoint (MonoMethod *method) {
4223 switch (break_policy_func (method)) {
4224 case MONO_BREAK_POLICY_ALWAYS:
4226 case MONO_BREAK_POLICY_NEVER:
4228 case MONO_BREAK_POLICY_ON_DBG:
4229 return mono_debug_using_mono_debugger ();
4231 g_warning ("Incorrect value returned from break policy callback");
4236 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4238 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4240 MonoInst *addr, *store, *load;
4241 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4243 /* the bounds check is already done by the callers */
4244 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4246 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4247 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4249 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4250 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4256 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4258 MonoInst *ins = NULL;
4259 #ifdef MONO_ARCH_SIMD_INTRINSICS
4260 if (cfg->opt & MONO_OPT_SIMD) {
4261 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4271 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4273 MonoInst *ins = NULL;
4275 static MonoClass *runtime_helpers_class = NULL;
4276 if (! runtime_helpers_class)
4277 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4278 "System.Runtime.CompilerServices", "RuntimeHelpers");
4280 if (cmethod->klass == mono_defaults.string_class) {
4281 if (strcmp (cmethod->name, "get_Chars") == 0) {
4282 int dreg = alloc_ireg (cfg);
4283 int index_reg = alloc_preg (cfg);
4284 int mult_reg = alloc_preg (cfg);
4285 int add_reg = alloc_preg (cfg);
4287 #if SIZEOF_REGISTER == 8
4288 /* The array reg is 64 bits but the index reg is only 32 */
4289 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4291 index_reg = args [1]->dreg;
4293 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4295 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4296 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4297 add_reg = ins->dreg;
4298 /* Avoid a warning */
4300 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4303 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4304 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4305 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4306 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4308 type_from_op (ins, NULL, NULL);
4310 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4311 int dreg = alloc_ireg (cfg);
4312 /* Decompose later to allow more optimizations */
4313 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4314 ins->type = STACK_I4;
4315 ins->flags |= MONO_INST_FAULT;
4316 cfg->cbb->has_array_access = TRUE;
4317 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4320 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4321 int mult_reg = alloc_preg (cfg);
4322 int add_reg = alloc_preg (cfg);
4324 /* The corlib functions check for oob already. */
4325 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4326 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4327 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4328 return cfg->cbb->last_ins;
4331 } else if (cmethod->klass == mono_defaults.object_class) {
4333 if (strcmp (cmethod->name, "GetType") == 0) {
4334 int dreg = alloc_ireg_ref (cfg);
4335 int vt_reg = alloc_preg (cfg);
4336 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4337 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4338 type_from_op (ins, NULL, NULL);
4341 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4342 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4343 int dreg = alloc_ireg (cfg);
4344 int t1 = alloc_ireg (cfg);
4346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4347 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4348 ins->type = STACK_I4;
4352 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4353 MONO_INST_NEW (cfg, ins, OP_NOP);
4354 MONO_ADD_INS (cfg->cbb, ins);
4358 } else if (cmethod->klass == mono_defaults.array_class) {
4359 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4360 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4362 #ifndef MONO_BIG_ARRAYS
4364 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4367 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4368 int dreg = alloc_ireg (cfg);
4369 int bounds_reg = alloc_ireg_mp (cfg);
4370 MonoBasicBlock *end_bb, *szarray_bb;
4371 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4373 NEW_BBLOCK (cfg, end_bb);
4374 NEW_BBLOCK (cfg, szarray_bb);
4376 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4377 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4380 /* Non-szarray case */
4382 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4383 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4385 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4386 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4387 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4388 MONO_START_BB (cfg, szarray_bb);
4391 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4392 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4394 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4395 MONO_START_BB (cfg, end_bb);
4397 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4398 ins->type = STACK_I4;
4404 if (cmethod->name [0] != 'g')
4407 if (strcmp (cmethod->name, "get_Rank") == 0) {
4408 int dreg = alloc_ireg (cfg);
4409 int vtable_reg = alloc_preg (cfg);
4410 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4411 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4412 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4413 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4414 type_from_op (ins, NULL, NULL);
4417 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4418 int dreg = alloc_ireg (cfg);
4420 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4421 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4422 type_from_op (ins, NULL, NULL);
4427 } else if (cmethod->klass == runtime_helpers_class) {
4429 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4430 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4434 } else if (cmethod->klass == mono_defaults.thread_class) {
4435 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4436 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4437 MONO_ADD_INS (cfg->cbb, ins);
4439 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4440 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4441 MONO_ADD_INS (cfg->cbb, ins);
4444 } else if (cmethod->klass == mono_defaults.monitor_class) {
4445 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4446 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4449 if (COMPILE_LLVM (cfg)) {
4451 * Pass the argument normally, the LLVM backend will handle the
4452 * calling convention problems.
4454 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4456 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4457 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4458 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4459 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4462 return (MonoInst*)call;
4463 } else if (strcmp (cmethod->name, "Exit") == 0) {
4466 if (COMPILE_LLVM (cfg)) {
4467 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4469 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4470 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4471 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4472 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4475 return (MonoInst*)call;
4477 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4478 MonoMethod *fast_method = NULL;
4480 /* Avoid infinite recursion */
4481 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4482 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4483 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4486 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4487 strcmp (cmethod->name, "Exit") == 0)
4488 fast_method = mono_monitor_get_fast_path (cmethod);
4492 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4494 } else if (cmethod->klass->image == mono_defaults.corlib &&
4495 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4496 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4499 #if SIZEOF_REGISTER == 8
4500 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4501 /* 64 bit reads are already atomic */
4502 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4503 ins->dreg = mono_alloc_preg (cfg);
4504 ins->inst_basereg = args [0]->dreg;
4505 ins->inst_offset = 0;
4506 MONO_ADD_INS (cfg->cbb, ins);
4510 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4511 if (strcmp (cmethod->name, "Increment") == 0) {
4512 MonoInst *ins_iconst;
4515 if (fsig->params [0]->type == MONO_TYPE_I4)
4516 opcode = OP_ATOMIC_ADD_NEW_I4;
4517 #if SIZEOF_REGISTER == 8
4518 else if (fsig->params [0]->type == MONO_TYPE_I8)
4519 opcode = OP_ATOMIC_ADD_NEW_I8;
4522 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4523 ins_iconst->inst_c0 = 1;
4524 ins_iconst->dreg = mono_alloc_ireg (cfg);
4525 MONO_ADD_INS (cfg->cbb, ins_iconst);
4527 MONO_INST_NEW (cfg, ins, opcode);
4528 ins->dreg = mono_alloc_ireg (cfg);
4529 ins->inst_basereg = args [0]->dreg;
4530 ins->inst_offset = 0;
4531 ins->sreg2 = ins_iconst->dreg;
4532 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4533 MONO_ADD_INS (cfg->cbb, ins);
4535 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4536 MonoInst *ins_iconst;
4539 if (fsig->params [0]->type == MONO_TYPE_I4)
4540 opcode = OP_ATOMIC_ADD_NEW_I4;
4541 #if SIZEOF_REGISTER == 8
4542 else if (fsig->params [0]->type == MONO_TYPE_I8)
4543 opcode = OP_ATOMIC_ADD_NEW_I8;
4546 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4547 ins_iconst->inst_c0 = -1;
4548 ins_iconst->dreg = mono_alloc_ireg (cfg);
4549 MONO_ADD_INS (cfg->cbb, ins_iconst);
4551 MONO_INST_NEW (cfg, ins, opcode);
4552 ins->dreg = mono_alloc_ireg (cfg);
4553 ins->inst_basereg = args [0]->dreg;
4554 ins->inst_offset = 0;
4555 ins->sreg2 = ins_iconst->dreg;
4556 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4557 MONO_ADD_INS (cfg->cbb, ins);
4559 } else if (strcmp (cmethod->name, "Add") == 0) {
4562 if (fsig->params [0]->type == MONO_TYPE_I4)
4563 opcode = OP_ATOMIC_ADD_NEW_I4;
4564 #if SIZEOF_REGISTER == 8
4565 else if (fsig->params [0]->type == MONO_TYPE_I8)
4566 opcode = OP_ATOMIC_ADD_NEW_I8;
4570 MONO_INST_NEW (cfg, ins, opcode);
4571 ins->dreg = mono_alloc_ireg (cfg);
4572 ins->inst_basereg = args [0]->dreg;
4573 ins->inst_offset = 0;
4574 ins->sreg2 = args [1]->dreg;
4575 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4576 MONO_ADD_INS (cfg->cbb, ins);
4579 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4581 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4582 if (strcmp (cmethod->name, "Exchange") == 0) {
4584 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4586 if (fsig->params [0]->type == MONO_TYPE_I4)
4587 opcode = OP_ATOMIC_EXCHANGE_I4;
4588 #if SIZEOF_REGISTER == 8
4589 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4590 (fsig->params [0]->type == MONO_TYPE_I))
4591 opcode = OP_ATOMIC_EXCHANGE_I8;
4593 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4594 opcode = OP_ATOMIC_EXCHANGE_I4;
4599 MONO_INST_NEW (cfg, ins, opcode);
4600 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4601 ins->inst_basereg = args [0]->dreg;
4602 ins->inst_offset = 0;
4603 ins->sreg2 = args [1]->dreg;
4604 MONO_ADD_INS (cfg->cbb, ins);
4606 switch (fsig->params [0]->type) {
4608 ins->type = STACK_I4;
4612 ins->type = STACK_I8;
4614 case MONO_TYPE_OBJECT:
4615 ins->type = STACK_OBJ;
4618 g_assert_not_reached ();
4621 if (cfg->gen_write_barriers && is_ref)
4622 emit_write_barrier (cfg, args [0], args [1], -1);
4624 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4626 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4627 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4629 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4630 if (fsig->params [1]->type == MONO_TYPE_I4)
4632 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4633 size = sizeof (gpointer);
4634 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4637 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4638 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4639 ins->sreg1 = args [0]->dreg;
4640 ins->sreg2 = args [1]->dreg;
4641 ins->sreg3 = args [2]->dreg;
4642 ins->type = STACK_I4;
4643 MONO_ADD_INS (cfg->cbb, ins);
4644 } else if (size == 8) {
4645 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4646 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4647 ins->sreg1 = args [0]->dreg;
4648 ins->sreg2 = args [1]->dreg;
4649 ins->sreg3 = args [2]->dreg;
4650 ins->type = STACK_I8;
4651 MONO_ADD_INS (cfg->cbb, ins);
4653 /* g_assert_not_reached (); */
4655 if (cfg->gen_write_barriers && is_ref)
4656 emit_write_barrier (cfg, args [0], args [1], -1);
4658 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4662 } else if (cmethod->klass->image == mono_defaults.corlib) {
4663 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4664 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4665 if (should_insert_brekpoint (cfg->method))
4666 MONO_INST_NEW (cfg, ins, OP_BREAK);
4668 MONO_INST_NEW (cfg, ins, OP_NOP);
4669 MONO_ADD_INS (cfg->cbb, ins);
4672 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4673 && strcmp (cmethod->klass->name, "Environment") == 0) {
4675 EMIT_NEW_ICONST (cfg, ins, 1);
4677 EMIT_NEW_ICONST (cfg, ins, 0);
4681 } else if (cmethod->klass == mono_defaults.math_class) {
4683 * There is general branches code for Min/Max, but it does not work for
4685 * http://everything2.com/?node_id=1051618
4689 #ifdef MONO_ARCH_SIMD_INTRINSICS
4690 if (cfg->opt & MONO_OPT_SIMD) {
4691 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4697 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4701 * This entry point could be used later for arbitrary method
4704 inline static MonoInst*
4705 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4706 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4708 if (method->klass == mono_defaults.string_class) {
4709 /* managed string allocation support */
4710 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4711 MonoInst *iargs [2];
4712 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4713 MonoMethod *managed_alloc = NULL;
4715 g_assert (vtable); /*Should not fail since it System.String*/
4716 #ifndef MONO_CROSS_COMPILE
4717 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4721 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4722 iargs [1] = args [0];
4723 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4730 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4732 MonoInst *store, *temp;
4735 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4736 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4739 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4740 * would be different than the MonoInst's used to represent arguments, and
4741 * the ldelema implementation can't deal with that.
4742 * Solution: When ldelema is used on an inline argument, create a var for
4743 * it, emit ldelema on that var, and emit the saving code below in
4744 * inline_method () if needed.
4746 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4747 cfg->args [i] = temp;
4748 /* This uses cfg->args [i] which is set by the preceeding line */
4749 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4750 store->cil_code = sp [0]->cil_code;
4755 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4756 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4758 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4760 check_inline_called_method_name_limit (MonoMethod *called_method)
4763 static char *limit = NULL;
4765 if (limit == NULL) {
4766 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4768 if (limit_string != NULL)
4769 limit = limit_string;
4771 limit = (char *) "";
4774 if (limit [0] != '\0') {
4775 char *called_method_name = mono_method_full_name (called_method, TRUE);
4777 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4778 g_free (called_method_name);
4780 //return (strncmp_result <= 0);
4781 return (strncmp_result == 0);
4788 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4790 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4793 static char *limit = NULL;
4795 if (limit == NULL) {
4796 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4797 if (limit_string != NULL) {
4798 limit = limit_string;
4800 limit = (char *) "";
4804 if (limit [0] != '\0') {
4805 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4807 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4808 g_free (caller_method_name);
4810 //return (strncmp_result <= 0);
4811 return (strncmp_result == 0);
4819 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4820 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4822 MonoInst *ins, *rvar = NULL;
4823 MonoMethodHeader *cheader;
4824 MonoBasicBlock *ebblock, *sbblock;
4826 MonoMethod *prev_inlined_method;
4827 MonoInst **prev_locals, **prev_args;
4828 MonoType **prev_arg_types;
4829 guint prev_real_offset;
4830 GHashTable *prev_cbb_hash;
4831 MonoBasicBlock **prev_cil_offset_to_bb;
4832 MonoBasicBlock *prev_cbb;
4833 unsigned char* prev_cil_start;
4834 guint32 prev_cil_offset_to_bb_len;
4835 MonoMethod *prev_current_method;
4836 MonoGenericContext *prev_generic_context;
4837 gboolean ret_var_set, prev_ret_var_set;
4839 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4841 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4842 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4845 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4846 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4850 if (cfg->verbose_level > 2)
4851 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4853 if (!cmethod->inline_info) {
4854 mono_jit_stats.inlineable_methods++;
4855 cmethod->inline_info = 1;
4858 /* allocate local variables */
4859 cheader = mono_method_get_header (cmethod);
4861 if (cheader == NULL || mono_loader_get_last_error ()) {
4862 MonoLoaderError *error = mono_loader_get_last_error ();
4865 mono_metadata_free_mh (cheader);
4866 if (inline_always && error)
4867 mono_cfg_set_exception (cfg, error->exception_type);
4869 mono_loader_clear_error ();
4873 /*Must verify before creating locals as it can cause the JIT to assert.*/
4874 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4875 mono_metadata_free_mh (cheader);
4879 /* allocate space to store the return value */
4880 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4881 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4885 prev_locals = cfg->locals;
4886 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4887 for (i = 0; i < cheader->num_locals; ++i)
4888 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4890 /* allocate start and end blocks */
4891 /* This is needed so if the inline is aborted, we can clean up */
4892 NEW_BBLOCK (cfg, sbblock);
4893 sbblock->real_offset = real_offset;
4895 NEW_BBLOCK (cfg, ebblock);
4896 ebblock->block_num = cfg->num_bblocks++;
4897 ebblock->real_offset = real_offset;
4899 prev_args = cfg->args;
4900 prev_arg_types = cfg->arg_types;
4901 prev_inlined_method = cfg->inlined_method;
4902 cfg->inlined_method = cmethod;
4903 cfg->ret_var_set = FALSE;
4904 cfg->inline_depth ++;
4905 prev_real_offset = cfg->real_offset;
4906 prev_cbb_hash = cfg->cbb_hash;
4907 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4908 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4909 prev_cil_start = cfg->cil_start;
4910 prev_cbb = cfg->cbb;
4911 prev_current_method = cfg->current_method;
4912 prev_generic_context = cfg->generic_context;
4913 prev_ret_var_set = cfg->ret_var_set;
4915 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4917 ret_var_set = cfg->ret_var_set;
4919 cfg->inlined_method = prev_inlined_method;
4920 cfg->real_offset = prev_real_offset;
4921 cfg->cbb_hash = prev_cbb_hash;
4922 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4923 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4924 cfg->cil_start = prev_cil_start;
4925 cfg->locals = prev_locals;
4926 cfg->args = prev_args;
4927 cfg->arg_types = prev_arg_types;
4928 cfg->current_method = prev_current_method;
4929 cfg->generic_context = prev_generic_context;
4930 cfg->ret_var_set = prev_ret_var_set;
4931 cfg->inline_depth --;
4933 if ((costs >= 0 && costs < 60) || inline_always) {
4934 if (cfg->verbose_level > 2)
4935 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4937 mono_jit_stats.inlined_methods++;
4939 /* always add some code to avoid block split failures */
4940 MONO_INST_NEW (cfg, ins, OP_NOP);
4941 MONO_ADD_INS (prev_cbb, ins);
4943 prev_cbb->next_bb = sbblock;
4944 link_bblock (cfg, prev_cbb, sbblock);
4947 * Get rid of the begin and end bblocks if possible to aid local
4950 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4952 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4953 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4955 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4956 MonoBasicBlock *prev = ebblock->in_bb [0];
4957 mono_merge_basic_blocks (cfg, prev, ebblock);
4959 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4960 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4961 cfg->cbb = prev_cbb;
4969 * If the inlined method contains only a throw, then the ret var is not
4970 * set, so set it to a dummy value.
4973 static double r8_0 = 0.0;
4975 switch (rvar->type) {
4977 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4980 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4985 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4988 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4989 ins->type = STACK_R8;
4990 ins->inst_p0 = (void*)&r8_0;
4991 ins->dreg = rvar->dreg;
4992 MONO_ADD_INS (cfg->cbb, ins);
4995 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4998 g_assert_not_reached ();
5002 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5005 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5008 if (cfg->verbose_level > 2)
5009 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5010 cfg->exception_type = MONO_EXCEPTION_NONE;
5011 mono_loader_clear_error ();
5013 /* This gets rid of the newly added bblocks */
5014 cfg->cbb = prev_cbb;
5016 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5021 * Some of these comments may well be out-of-date.
5022 * Design decisions: we do a single pass over the IL code (and we do bblock
5023 * splitting/merging in the few cases when it's required: a back jump to an IL
5024 * address that was not already seen as bblock starting point).
5025 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5026 * Complex operations are decomposed in simpler ones right away. We need to let the
5027 * arch-specific code peek and poke inside this process somehow (except when the
5028 * optimizations can take advantage of the full semantic info of coarse opcodes).
5029 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5030 * MonoInst->opcode initially is the IL opcode or some simplification of that
5031 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5032 * opcode with value bigger than OP_LAST.
5033 * At this point the IR can be handed over to an interpreter, a dumb code generator
5034 * or to the optimizing code generator that will translate it to SSA form.
5036 * Profiling directed optimizations.
5037 * We may compile by default with few or no optimizations and instrument the code
5038 * or the user may indicate what methods to optimize the most either in a config file
5039 * or through repeated runs where the compiler applies offline the optimizations to
5040 * each method and then decides if it was worth it.
5043 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5044 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5045 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5046 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5047 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5048 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5049 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5050 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5052 /* offset from br.s -> br like opcodes */
5053 #define BIG_BRANCH_OFFSET 13
5056 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5058 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5060 return b == NULL || b == bb;
5064 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5066 unsigned char *ip = start;
5067 unsigned char *target;
5070 MonoBasicBlock *bblock;
5071 const MonoOpcode *opcode;
5074 cli_addr = ip - start;
5075 i = mono_opcode_value ((const guint8 **)&ip, end);
5078 opcode = &mono_opcodes [i];
5079 switch (opcode->argument) {
5080 case MonoInlineNone:
5083 case MonoInlineString:
5084 case MonoInlineType:
5085 case MonoInlineField:
5086 case MonoInlineMethod:
5089 case MonoShortInlineR:
5096 case MonoShortInlineVar:
5097 case MonoShortInlineI:
5100 case MonoShortInlineBrTarget:
5101 target = start + cli_addr + 2 + (signed char)ip [1];
5102 GET_BBLOCK (cfg, bblock, target);
5105 GET_BBLOCK (cfg, bblock, ip);
5107 case MonoInlineBrTarget:
5108 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5109 GET_BBLOCK (cfg, bblock, target);
5112 GET_BBLOCK (cfg, bblock, ip);
5114 case MonoInlineSwitch: {
5115 guint32 n = read32 (ip + 1);
5118 cli_addr += 5 + 4 * n;
5119 target = start + cli_addr;
5120 GET_BBLOCK (cfg, bblock, target);
5122 for (j = 0; j < n; ++j) {
5123 target = start + cli_addr + (gint32)read32 (ip);
5124 GET_BBLOCK (cfg, bblock, target);
5134 g_assert_not_reached ();
5137 if (i == CEE_THROW) {
5138 unsigned char *bb_start = ip - 1;
5140 /* Find the start of the bblock containing the throw */
5142 while ((bb_start >= start) && !bblock) {
5143 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5147 bblock->out_of_line = 1;
5156 static inline MonoMethod *
5157 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5161 if (m->wrapper_type != MONO_WRAPPER_NONE)
5162 return mono_method_get_wrapper_data (m, token);
5164 method = mono_get_method_full (m->klass->image, token, klass, context);
5169 static inline MonoMethod *
5170 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5172 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5174 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5180 static inline MonoClass*
5181 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5185 if (method->wrapper_type != MONO_WRAPPER_NONE)
5186 klass = mono_method_get_wrapper_data (method, token);
5188 klass = mono_class_get_full (method->klass->image, token, context);
5190 mono_class_init (klass);
5195 * Returns TRUE if the JIT should abort inlining because "callee"
5196 * is influenced by security attributes.
5199 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5203 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5207 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5208 if (result == MONO_JIT_SECURITY_OK)
5211 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5212 /* Generate code to throw a SecurityException before the actual call/link */
5213 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5216 NEW_ICONST (cfg, args [0], 4);
5217 NEW_METHODCONST (cfg, args [1], caller);
5218 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5219 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5220 /* don't hide previous results */
5221 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5222 cfg->exception_data = result;
5230 throw_exception (void)
5232 static MonoMethod *method = NULL;
5235 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5236 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5243 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5245 MonoMethod *thrower = throw_exception ();
5248 EMIT_NEW_PCONST (cfg, args [0], ex);
5249 mono_emit_method_call (cfg, thrower, args, NULL);
5253 * Return the original method is a wrapper is specified. We can only access
5254 * the custom attributes from the original method.
5257 get_original_method (MonoMethod *method)
5259 if (method->wrapper_type == MONO_WRAPPER_NONE)
5262 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5263 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5266 /* in other cases we need to find the original method */
5267 return mono_marshal_method_from_wrapper (method);
5271 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5272 MonoBasicBlock *bblock, unsigned char *ip)
5274 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5275 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5277 emit_throw_exception (cfg, ex);
5281 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5282 MonoBasicBlock *bblock, unsigned char *ip)
5284 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5285 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5287 emit_throw_exception (cfg, ex);
5291 * Check that the IL instructions at ip are the array initialization
5292 * sequence and return the pointer to the data and the size.
5295 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5298 * newarr[System.Int32]
5300 * ldtoken field valuetype ...
5301 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5303 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5304 guint32 token = read32 (ip + 7);
5305 guint32 field_token = read32 (ip + 2);
5306 guint32 field_index = field_token & 0xffffff;
5308 const char *data_ptr;
5310 MonoMethod *cmethod;
5311 MonoClass *dummy_class;
5312 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5318 *out_field_token = field_token;
5320 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5323 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5325 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5326 case MONO_TYPE_BOOLEAN:
5330 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5331 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5332 case MONO_TYPE_CHAR:
5342 return NULL; /* stupid ARM FP swapped format */
5352 if (size > mono_type_size (field->type, &dummy_align))
5355 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5356 if (!method->klass->image->dynamic) {
5357 field_index = read32 (ip + 2) & 0xffffff;
5358 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5359 data_ptr = mono_image_rva_map (method->klass->image, rva);
5360 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5361 /* for aot code we do the lookup on load */
5362 if (aot && data_ptr)
5363 return GUINT_TO_POINTER (rva);
5365 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5367 data_ptr = mono_field_get_data (field);
5375 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5377 char *method_fname = mono_method_full_name (method, TRUE);
5379 MonoMethodHeader *header = mono_method_get_header (method);
5381 if (header->code_size == 0)
5382 method_code = g_strdup ("method body is empty.");
5384 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5386 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5387 g_free (method_fname);
5388 g_free (method_code);
5389 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5393 set_exception_object (MonoCompile *cfg, MonoException *exception)
5395 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5396 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5397 cfg->exception_ptr = exception;
5401 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5405 if (cfg->generic_sharing_context)
5406 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5408 type = &klass->byval_arg;
5409 return MONO_TYPE_IS_REFERENCE (type);
5413 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5416 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5417 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5418 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5419 /* Optimize reg-reg moves away */
5421 * Can't optimize other opcodes, since sp[0] might point to
5422 * the last ins of a decomposed opcode.
5424 sp [0]->dreg = (cfg)->locals [n]->dreg;
5426 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5431 * ldloca inhibits many optimizations so try to get rid of it in common
5434 static inline unsigned char *
5435 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5444 local = read16 (ip + 2);
5448 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5449 gboolean skip = FALSE;
5451 /* From the INITOBJ case */
5452 token = read32 (ip + 2);
5453 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5454 CHECK_TYPELOAD (klass);
5455 if (generic_class_is_reference_type (cfg, klass)) {
5456 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5457 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5458 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5459 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5460 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5473 is_exception_class (MonoClass *class)
5476 if (class == mono_defaults.exception_class)
5478 class = class->parent;
5484 * is_jit_optimizer_disabled:
5486 * Determine whenever M's assembly has a DebuggableAttribute with the
5487 * IsJITOptimizerDisabled flag set.
5490 is_jit_optimizer_disabled (MonoMethod *m)
5492 MonoAssembly *ass = m->klass->image->assembly;
5493 MonoCustomAttrInfo* attrs;
5494 static MonoClass *klass;
5496 gboolean val = FALSE;
5499 if (ass->jit_optimizer_disabled_inited)
5500 return ass->jit_optimizer_disabled;
5502 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5504 attrs = mono_custom_attrs_from_assembly (ass);
5506 for (i = 0; i < attrs->num_attrs; ++i) {
5507 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5510 MonoMethodSignature *sig;
5512 if (!attr->ctor || attr->ctor->klass != klass)
5514 /* Decode the attribute. See reflection.c */
5515 len = attr->data_size;
5516 p = (const char*)attr->data;
5517 g_assert (read16 (p) == 0x0001);
5520 // FIXME: Support named parameters
5521 sig = mono_method_signature (attr->ctor);
5522 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5524 /* Two boolean arguments */
5528 mono_custom_attrs_free (attrs);
5531 ass->jit_optimizer_disabled = val;
5532 mono_memory_barrier ();
5533 ass->jit_optimizer_disabled_inited = TRUE;
5539 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5541 gboolean supported_tail_call;
5544 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5545 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5547 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5550 for (i = 0; i < fsig->param_count; ++i) {
5551 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5552 /* These can point to the current method's stack */
5553 supported_tail_call = FALSE;
5555 if (fsig->hasthis && cmethod->klass->valuetype)
5556 /* this might point to the current method's stack */
5557 supported_tail_call = FALSE;
5558 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5559 supported_tail_call = FALSE;
5560 if (cfg->method->save_lmf)
5561 supported_tail_call = FALSE;
5562 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5563 supported_tail_call = FALSE;
5565 /* Debugging support */
5567 if (supported_tail_call) {
5568 static int count = 0;
5570 if (getenv ("COUNT")) {
5571 if (count == atoi (getenv ("COUNT")))
5572 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5573 if (count > atoi (getenv ("COUNT")))
5574 supported_tail_call = FALSE;
5579 return supported_tail_call;
5583 * mono_method_to_ir:
5585 * Translate the .net IL into linear IR.
5588 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5589 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5590 guint inline_offset, gboolean is_virtual_call)
5593 MonoInst *ins, **sp, **stack_start;
5594 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5595 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5596 MonoMethod *cmethod, *method_definition;
5597 MonoInst **arg_array;
5598 MonoMethodHeader *header;
5600 guint32 token, ins_flag;
5602 MonoClass *constrained_call = NULL;
5603 unsigned char *ip, *end, *target, *err_pos;
5604 static double r8_0 = 0.0;
5605 MonoMethodSignature *sig;
5606 MonoGenericContext *generic_context = NULL;
5607 MonoGenericContainer *generic_container = NULL;
5608 MonoType **param_types;
5609 int i, n, start_new_bblock, dreg;
5610 int num_calls = 0, inline_costs = 0;
5611 int breakpoint_id = 0;
5613 MonoBoolean security, pinvoke;
5614 MonoSecurityManager* secman = NULL;
5615 MonoDeclSecurityActions actions;
5616 GSList *class_inits = NULL;
5617 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5619 gboolean init_locals, seq_points, skip_dead_blocks;
5620 gboolean disable_inline;
5622 disable_inline = is_jit_optimizer_disabled (method);
5624 /* serialization and xdomain stuff may need access to private fields and methods */
5625 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5626 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5627 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5628 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5629 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5630 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5632 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5634 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5635 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5636 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5637 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5638 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5640 image = method->klass->image;
5641 header = mono_method_get_header (method);
5643 MonoLoaderError *error;
5645 if ((error = mono_loader_get_last_error ())) {
5646 mono_cfg_set_exception (cfg, error->exception_type);
5648 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5649 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5651 goto exception_exit;
5653 generic_container = mono_method_get_generic_container (method);
5654 sig = mono_method_signature (method);
5655 num_args = sig->hasthis + sig->param_count;
5656 ip = (unsigned char*)header->code;
5657 cfg->cil_start = ip;
5658 end = ip + header->code_size;
5659 mono_jit_stats.cil_code_size += header->code_size;
5660 init_locals = header->init_locals;
5662 seq_points = cfg->gen_seq_points && cfg->method == method;
5665 * Methods without init_locals set could cause asserts in various passes
5670 method_definition = method;
5671 while (method_definition->is_inflated) {
5672 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5673 method_definition = imethod->declaring;
5676 /* SkipVerification is not allowed if core-clr is enabled */
5677 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5679 dont_verify_stloc = TRUE;
5682 if (mono_debug_using_mono_debugger ())
5683 cfg->keep_cil_nops = TRUE;
5685 if (sig->is_inflated)
5686 generic_context = mono_method_get_context (method);
5687 else if (generic_container)
5688 generic_context = &generic_container->context;
5689 cfg->generic_context = generic_context;
5691 if (!cfg->generic_sharing_context)
5692 g_assert (!sig->has_type_parameters);
5694 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5695 g_assert (method->is_inflated);
5696 g_assert (mono_method_get_context (method)->method_inst);
5698 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5699 g_assert (sig->generic_param_count);
5701 if (cfg->method == method) {
5702 cfg->real_offset = 0;
5704 cfg->real_offset = inline_offset;
5707 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5708 cfg->cil_offset_to_bb_len = header->code_size;
5710 cfg->current_method = method;
5712 if (cfg->verbose_level > 2)
5713 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5715 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5717 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5718 for (n = 0; n < sig->param_count; ++n)
5719 param_types [n + sig->hasthis] = sig->params [n];
5720 cfg->arg_types = param_types;
5722 dont_inline = g_list_prepend (dont_inline, method);
5723 if (cfg->method == method) {
5725 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5726 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5729 NEW_BBLOCK (cfg, start_bblock);
5730 cfg->bb_entry = start_bblock;
5731 start_bblock->cil_code = NULL;
5732 start_bblock->cil_length = 0;
5733 #if defined(__native_client_codegen__)
5734 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5735 ins->dreg = alloc_dreg (cfg, STACK_I4);
5736 MONO_ADD_INS (start_bblock, ins);
5740 NEW_BBLOCK (cfg, end_bblock);
5741 cfg->bb_exit = end_bblock;
5742 end_bblock->cil_code = NULL;
5743 end_bblock->cil_length = 0;
5744 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5745 g_assert (cfg->num_bblocks == 2);
5747 arg_array = cfg->args;
5749 if (header->num_clauses) {
5750 cfg->spvars = g_hash_table_new (NULL, NULL);
5751 cfg->exvars = g_hash_table_new (NULL, NULL);
5753 /* handle exception clauses */
5754 for (i = 0; i < header->num_clauses; ++i) {
5755 MonoBasicBlock *try_bb;
5756 MonoExceptionClause *clause = &header->clauses [i];
5757 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5758 try_bb->real_offset = clause->try_offset;
5759 try_bb->try_start = TRUE;
5760 try_bb->region = ((i + 1) << 8) | clause->flags;
5761 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5762 tblock->real_offset = clause->handler_offset;
5763 tblock->flags |= BB_EXCEPTION_HANDLER;
5765 link_bblock (cfg, try_bb, tblock);
5767 if (*(ip + clause->handler_offset) == CEE_POP)
5768 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5770 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5771 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5772 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5773 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5774 MONO_ADD_INS (tblock, ins);
5777 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5778 MONO_ADD_INS (tblock, ins);
5781 /* todo: is a fault block unsafe to optimize? */
5782 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5783 tblock->flags |= BB_EXCEPTION_UNSAFE;
5787 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5789 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5791 /* catch and filter blocks get the exception object on the stack */
5792 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5793 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5794 MonoInst *dummy_use;
5796 /* mostly like handle_stack_args (), but just sets the input args */
5797 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5798 tblock->in_scount = 1;
5799 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5800 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5803 * Add a dummy use for the exvar so its liveness info will be
5807 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5809 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5810 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5811 tblock->flags |= BB_EXCEPTION_HANDLER;
5812 tblock->real_offset = clause->data.filter_offset;
5813 tblock->in_scount = 1;
5814 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5815 /* The filter block shares the exvar with the handler block */
5816 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5817 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5818 MONO_ADD_INS (tblock, ins);
5822 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5823 clause->data.catch_class &&
5824 cfg->generic_sharing_context &&
5825 mono_class_check_context_used (clause->data.catch_class)) {
5827 * In shared generic code with catch
5828 * clauses containing type variables
5829 * the exception handling code has to
5830 * be able to get to the rgctx.
5831 * Therefore we have to make sure that
5832 * the vtable/mrgctx argument (for
5833 * static or generic methods) or the
5834 * "this" argument (for non-static
5835 * methods) are live.
5837 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5838 mini_method_get_context (method)->method_inst ||
5839 method->klass->valuetype) {
5840 mono_get_vtable_var (cfg);
5842 MonoInst *dummy_use;
5844 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5849 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5850 cfg->cbb = start_bblock;
5851 cfg->args = arg_array;
5852 mono_save_args (cfg, sig, inline_args);
5855 /* FIRST CODE BLOCK */
5856 NEW_BBLOCK (cfg, bblock);
5857 bblock->cil_code = ip;
5861 ADD_BBLOCK (cfg, bblock);
5863 if (cfg->method == method) {
5864 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5865 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5866 MONO_INST_NEW (cfg, ins, OP_BREAK);
5867 MONO_ADD_INS (bblock, ins);
5871 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5872 secman = mono_security_manager_get_methods ();
5874 security = (secman && mono_method_has_declsec (method));
5875 /* at this point having security doesn't mean we have any code to generate */
5876 if (security && (cfg->method == method)) {
5877 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5878 * And we do not want to enter the next section (with allocation) if we
5879 * have nothing to generate */
5880 security = mono_declsec_get_demands (method, &actions);
5883 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5884 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5886 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5887 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5888 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5890 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5891 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5895 mono_custom_attrs_free (custom);
5898 custom = mono_custom_attrs_from_class (wrapped->klass);
5899 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5903 mono_custom_attrs_free (custom);
5906 /* not a P/Invoke after all */
5911 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5912 /* we use a separate basic block for the initialization code */
5913 NEW_BBLOCK (cfg, init_localsbb);
5914 cfg->bb_init = init_localsbb;
5915 init_localsbb->real_offset = cfg->real_offset;
5916 start_bblock->next_bb = init_localsbb;
5917 init_localsbb->next_bb = bblock;
5918 link_bblock (cfg, start_bblock, init_localsbb);
5919 link_bblock (cfg, init_localsbb, bblock);
5921 cfg->cbb = init_localsbb;
5923 start_bblock->next_bb = bblock;
5924 link_bblock (cfg, start_bblock, bblock);
5927 /* at this point we know, if security is TRUE, that some code needs to be generated */
5928 if (security && (cfg->method == method)) {
5931 mono_jit_stats.cas_demand_generation++;
5933 if (actions.demand.blob) {
5934 /* Add code for SecurityAction.Demand */
5935 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5936 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5937 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5938 mono_emit_method_call (cfg, secman->demand, args, NULL);
5940 if (actions.noncasdemand.blob) {
5941 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5942 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5943 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5944 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5945 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5946 mono_emit_method_call (cfg, secman->demand, args, NULL);
5948 if (actions.demandchoice.blob) {
5949 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5950 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5951 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5952 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5953 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5957 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5959 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5962 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5963 /* check if this is native code, e.g. an icall or a p/invoke */
5964 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5965 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5967 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5968 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5970 /* if this ia a native call then it can only be JITted from platform code */
5971 if ((icall || pinvk) && method->klass && method->klass->image) {
5972 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5973 MonoException *ex = icall ? mono_get_exception_security () :
5974 mono_get_exception_method_access ();
5975 emit_throw_exception (cfg, ex);
5982 if (header->code_size == 0)
5985 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5990 if (cfg->method == method)
5991 mono_debug_init_method (cfg, bblock, breakpoint_id);
5993 for (n = 0; n < header->num_locals; ++n) {
5994 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5999 /* We force the vtable variable here for all shared methods
6000 for the possibility that they might show up in a stack
6001 trace where their exact instantiation is needed. */
6002 if (cfg->generic_sharing_context && method == cfg->method) {
6003 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6004 mini_method_get_context (method)->method_inst ||
6005 method->klass->valuetype) {
6006 mono_get_vtable_var (cfg);
6008 /* FIXME: Is there a better way to do this?
6009 We need the variable live for the duration
6010 of the whole method. */
6011 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6015 /* add a check for this != NULL to inlined methods */
6016 if (is_virtual_call) {
6019 NEW_ARGLOAD (cfg, arg_ins, 0);
6020 MONO_ADD_INS (cfg->cbb, arg_ins);
6021 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6024 skip_dead_blocks = !dont_verify;
6025 if (skip_dead_blocks) {
6026 original_bb = bb = mono_basic_block_split (method, &error);
6027 if (!mono_error_ok (&error)) {
6028 mono_error_cleanup (&error);
6034 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6035 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6038 start_new_bblock = 0;
6041 if (cfg->method == method)
6042 cfg->real_offset = ip - header->code;
6044 cfg->real_offset = inline_offset;
6049 if (start_new_bblock) {
6050 bblock->cil_length = ip - bblock->cil_code;
6051 if (start_new_bblock == 2) {
6052 g_assert (ip == tblock->cil_code);
6054 GET_BBLOCK (cfg, tblock, ip);
6056 bblock->next_bb = tblock;
6059 start_new_bblock = 0;
6060 for (i = 0; i < bblock->in_scount; ++i) {
6061 if (cfg->verbose_level > 3)
6062 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6063 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6067 g_slist_free (class_inits);
6070 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6071 link_bblock (cfg, bblock, tblock);
6072 if (sp != stack_start) {
6073 handle_stack_args (cfg, stack_start, sp - stack_start);
6075 CHECK_UNVERIFIABLE (cfg);
6077 bblock->next_bb = tblock;
6080 for (i = 0; i < bblock->in_scount; ++i) {
6081 if (cfg->verbose_level > 3)
6082 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6083 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6086 g_slist_free (class_inits);
6091 if (skip_dead_blocks) {
6092 int ip_offset = ip - header->code;
6094 if (ip_offset == bb->end)
6098 int op_size = mono_opcode_size (ip, end);
6099 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6101 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6103 if (ip_offset + op_size == bb->end) {
6104 MONO_INST_NEW (cfg, ins, OP_NOP);
6105 MONO_ADD_INS (bblock, ins);
6106 start_new_bblock = 1;
6114 * Sequence points are points where the debugger can place a breakpoint.
6115 * Currently, we generate these automatically at points where the IL
6118 if (seq_points && sp == stack_start) {
6119 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6120 MONO_ADD_INS (cfg->cbb, ins);
6123 bblock->real_offset = cfg->real_offset;
6125 if ((cfg->method == method) && cfg->coverage_info) {
6126 guint32 cil_offset = ip - header->code;
6127 cfg->coverage_info->data [cil_offset].cil_code = ip;
6129 /* TODO: Use an increment here */
6130 #if defined(TARGET_X86)
6131 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6132 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6134 MONO_ADD_INS (cfg->cbb, ins);
6136 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6137 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6141 if (cfg->verbose_level > 3)
6142 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6146 if (cfg->keep_cil_nops)
6147 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6149 MONO_INST_NEW (cfg, ins, OP_NOP);
6151 MONO_ADD_INS (bblock, ins);
6154 if (should_insert_brekpoint (cfg->method))
6155 MONO_INST_NEW (cfg, ins, OP_BREAK);
6157 MONO_INST_NEW (cfg, ins, OP_NOP);
6159 MONO_ADD_INS (bblock, ins);
6165 CHECK_STACK_OVF (1);
6166 n = (*ip)-CEE_LDARG_0;
6168 EMIT_NEW_ARGLOAD (cfg, ins, n);
6176 CHECK_STACK_OVF (1);
6177 n = (*ip)-CEE_LDLOC_0;
6179 EMIT_NEW_LOCLOAD (cfg, ins, n);
6188 n = (*ip)-CEE_STLOC_0;
6191 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6193 emit_stloc_ir (cfg, sp, header, n);
6200 CHECK_STACK_OVF (1);
6203 EMIT_NEW_ARGLOAD (cfg, ins, n);
6209 CHECK_STACK_OVF (1);
6212 NEW_ARGLOADA (cfg, ins, n);
6213 MONO_ADD_INS (cfg->cbb, ins);
6223 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6225 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6230 CHECK_STACK_OVF (1);
6233 EMIT_NEW_LOCLOAD (cfg, ins, n);
6237 case CEE_LDLOCA_S: {
6238 unsigned char *tmp_ip;
6240 CHECK_STACK_OVF (1);
6241 CHECK_LOCAL (ip [1]);
6243 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6249 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6258 CHECK_LOCAL (ip [1]);
6259 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6261 emit_stloc_ir (cfg, sp, header, ip [1]);
6266 CHECK_STACK_OVF (1);
6267 EMIT_NEW_PCONST (cfg, ins, NULL);
6268 ins->type = STACK_OBJ;
6273 CHECK_STACK_OVF (1);
6274 EMIT_NEW_ICONST (cfg, ins, -1);
6287 CHECK_STACK_OVF (1);
6288 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6294 CHECK_STACK_OVF (1);
6296 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6302 CHECK_STACK_OVF (1);
6303 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6309 CHECK_STACK_OVF (1);
6310 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6311 ins->type = STACK_I8;
6312 ins->dreg = alloc_dreg (cfg, STACK_I8);
6314 ins->inst_l = (gint64)read64 (ip);
6315 MONO_ADD_INS (bblock, ins);
6321 gboolean use_aotconst = FALSE;
6323 #ifdef TARGET_POWERPC
6324 /* FIXME: Clean this up */
6325 if (cfg->compile_aot)
6326 use_aotconst = TRUE;
6329 /* FIXME: we should really allocate this only late in the compilation process */
6330 f = mono_domain_alloc (cfg->domain, sizeof (float));
6332 CHECK_STACK_OVF (1);
6338 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6340 dreg = alloc_freg (cfg);
6341 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6342 ins->type = STACK_R8;
6344 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6345 ins->type = STACK_R8;
6346 ins->dreg = alloc_dreg (cfg, STACK_R8);
6348 MONO_ADD_INS (bblock, ins);
6358 gboolean use_aotconst = FALSE;
6360 #ifdef TARGET_POWERPC
6361 /* FIXME: Clean this up */
6362 if (cfg->compile_aot)
6363 use_aotconst = TRUE;
6366 /* FIXME: we should really allocate this only late in the compilation process */
6367 d = mono_domain_alloc (cfg->domain, sizeof (double));
6369 CHECK_STACK_OVF (1);
6375 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6377 dreg = alloc_freg (cfg);
6378 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6379 ins->type = STACK_R8;
6381 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6382 ins->type = STACK_R8;
6383 ins->dreg = alloc_dreg (cfg, STACK_R8);
6385 MONO_ADD_INS (bblock, ins);
6394 MonoInst *temp, *store;
6396 CHECK_STACK_OVF (1);
6400 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6401 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6403 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6406 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6419 if (sp [0]->type == STACK_R8)
6420 /* we need to pop the value from the x86 FP stack */
6421 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6430 if (stack_start != sp)
6432 token = read32 (ip + 1);
6433 /* FIXME: check the signature matches */
6434 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6436 if (!cmethod || mono_loader_get_last_error ())
6439 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6440 GENERIC_SHARING_FAILURE (CEE_JMP);
6442 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6443 CHECK_CFG_EXCEPTION;
6445 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6447 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6450 /* Handle tail calls similarly to calls */
6451 n = fsig->param_count + fsig->hasthis;
6453 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6454 call->method = cmethod;
6455 call->tail_call = TRUE;
6456 call->signature = mono_method_signature (cmethod);
6457 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6458 call->inst.inst_p0 = cmethod;
6459 for (i = 0; i < n; ++i)
6460 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6462 mono_arch_emit_call (cfg, call);
6463 MONO_ADD_INS (bblock, (MonoInst*)call);
6466 for (i = 0; i < num_args; ++i)
6467 /* Prevent arguments from being optimized away */
6468 arg_array [i]->flags |= MONO_INST_VOLATILE;
6470 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6471 ins = (MonoInst*)call;
6472 ins->inst_p0 = cmethod;
6473 MONO_ADD_INS (bblock, ins);
6477 start_new_bblock = 1;
6482 case CEE_CALLVIRT: {
6483 MonoInst *addr = NULL;
6484 MonoMethodSignature *fsig = NULL;
6486 int virtual = *ip == CEE_CALLVIRT;
6487 int calli = *ip == CEE_CALLI;
6488 gboolean pass_imt_from_rgctx = FALSE;
6489 MonoInst *imt_arg = NULL;
6490 gboolean pass_vtable = FALSE;
6491 gboolean pass_mrgctx = FALSE;
6492 MonoInst *vtable_arg = NULL;
6493 gboolean check_this = FALSE;
6494 gboolean supported_tail_call = FALSE;
6497 token = read32 (ip + 1);
6504 if (method->wrapper_type != MONO_WRAPPER_NONE)
6505 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6507 fsig = mono_metadata_parse_signature (image, token);
6509 n = fsig->param_count + fsig->hasthis;
6511 if (method->dynamic && fsig->pinvoke) {
6515 * This is a call through a function pointer using a pinvoke
6516 * signature. Have to create a wrapper and call that instead.
6517 * FIXME: This is very slow, need to create a wrapper at JIT time
6518 * instead based on the signature.
6520 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6521 EMIT_NEW_PCONST (cfg, args [1], fsig);
6523 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6526 MonoMethod *cil_method;
6528 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6529 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6530 cil_method = cmethod;
6531 } else if (constrained_call) {
6532 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6534 * This is needed since get_method_constrained can't find
6535 * the method in klass representing a type var.
6536 * The type var is guaranteed to be a reference type in this
6539 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6540 cil_method = cmethod;
6541 g_assert (!cmethod->klass->valuetype);
6543 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6546 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6547 cil_method = cmethod;
6550 if (!cmethod || mono_loader_get_last_error ())
6552 if (!dont_verify && !cfg->skip_visibility) {
6553 MonoMethod *target_method = cil_method;
6554 if (method->is_inflated) {
6555 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6557 if (!mono_method_can_access_method (method_definition, target_method) &&
6558 !mono_method_can_access_method (method, cil_method))
6559 METHOD_ACCESS_FAILURE;
6562 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6563 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6565 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6566 /* MS.NET seems to silently convert this to a callvirt */
6571 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6572 * converts to a callvirt.
6574 * tests/bug-515884.il is an example of this behavior
6576 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6577 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6578 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6582 if (!cmethod->klass->inited)
6583 if (!mono_class_init (cmethod->klass))
6586 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6587 mini_class_is_system_array (cmethod->klass)) {
6588 array_rank = cmethod->klass->rank;
6589 fsig = mono_method_signature (cmethod);
6591 fsig = mono_method_signature (cmethod);
6596 if (fsig->pinvoke) {
6597 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6598 check_for_pending_exc, FALSE);
6599 fsig = mono_method_signature (wrapper);
6600 } else if (constrained_call) {
6601 fsig = mono_method_signature (cmethod);
6603 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6607 mono_save_token_info (cfg, image, token, cil_method);
6609 n = fsig->param_count + fsig->hasthis;
6611 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6612 if (check_linkdemand (cfg, method, cmethod))
6614 CHECK_CFG_EXCEPTION;
6617 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6618 g_assert_not_reached ();
6621 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6624 if (!cfg->generic_sharing_context && cmethod)
6625 g_assert (!mono_method_check_context_used (cmethod));
6629 //g_assert (!virtual || fsig->hasthis);
6633 if (constrained_call) {
6635 * We have the `constrained.' prefix opcode.
6637 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6639 * The type parameter is instantiated as a valuetype,
6640 * but that type doesn't override the method we're
6641 * calling, so we need to box `this'.
6643 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6644 ins->klass = constrained_call;
6645 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6646 CHECK_CFG_EXCEPTION;
6647 } else if (!constrained_call->valuetype) {
6648 int dreg = alloc_ireg_ref (cfg);
6651 * The type parameter is instantiated as a reference
6652 * type. We have a managed pointer on the stack, so
6653 * we need to dereference it here.
6655 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6656 ins->type = STACK_OBJ;
6658 } else if (cmethod->klass->valuetype)
6660 constrained_call = NULL;
6663 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6667 * If the callee is a shared method, then its static cctor
6668 * might not get called after the call was patched.
6670 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6671 emit_generic_class_init (cfg, cmethod->klass);
6672 CHECK_TYPELOAD (cmethod->klass);
6675 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6676 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6677 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6678 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6679 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6682 * Pass vtable iff target method might
6683 * be shared, which means that sharing
6684 * is enabled for its class and its
6685 * context is sharable (and it's not a
6688 if (sharing_enabled && context_sharable &&
6689 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6693 if (cmethod && mini_method_get_context (cmethod) &&
6694 mini_method_get_context (cmethod)->method_inst) {
6695 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6696 MonoGenericContext *context = mini_method_get_context (cmethod);
6697 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6699 g_assert (!pass_vtable);
6701 if (sharing_enabled && context_sharable)
6705 if (cfg->generic_sharing_context && cmethod) {
6706 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6708 context_used = mono_method_check_context_used (cmethod);
6710 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6711 /* Generic method interface
6712 calls are resolved via a
6713 helper function and don't
6715 if (!cmethod_context || !cmethod_context->method_inst)
6716 pass_imt_from_rgctx = TRUE;
6720 * If a shared method calls another
6721 * shared method then the caller must
6722 * have a generic sharing context
6723 * because the magic trampoline
6724 * requires it. FIXME: We shouldn't
6725 * have to force the vtable/mrgctx
6726 * variable here. Instead there
6727 * should be a flag in the cfg to
6728 * request a generic sharing context.
6731 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6732 mono_get_vtable_var (cfg);
6737 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6739 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6741 CHECK_TYPELOAD (cmethod->klass);
6742 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6747 g_assert (!vtable_arg);
6749 if (!cfg->compile_aot) {
6751 * emit_get_rgctx_method () calls mono_class_vtable () so check
6752 * for type load errors before.
6754 mono_class_setup_vtable (cmethod->klass);
6755 CHECK_TYPELOAD (cmethod->klass);
6758 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6760 /* !marshalbyref is needed to properly handle generic methods + remoting */
6761 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6762 MONO_METHOD_IS_FINAL (cmethod)) &&
6763 !cmethod->klass->marshalbyref) {
6770 if (pass_imt_from_rgctx) {
6771 g_assert (!pass_vtable);
6774 imt_arg = emit_get_rgctx_method (cfg, context_used,
6775 cmethod, MONO_RGCTX_INFO_METHOD);
6779 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6781 /* Calling virtual generic methods */
6782 if (cmethod && virtual &&
6783 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6784 !(MONO_METHOD_IS_FINAL (cmethod) &&
6785 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6786 mono_method_signature (cmethod)->generic_param_count) {
6787 MonoInst *this_temp, *this_arg_temp, *store;
6788 MonoInst *iargs [4];
6790 g_assert (mono_method_signature (cmethod)->is_inflated);
6792 /* Prevent inlining of methods that contain indirect calls */
6795 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6796 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6797 g_assert (!imt_arg);
6799 g_assert (cmethod->is_inflated);
6800 imt_arg = emit_get_rgctx_method (cfg, context_used,
6801 cmethod, MONO_RGCTX_INFO_METHOD);
6802 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6806 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6807 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6808 MONO_ADD_INS (bblock, store);
6810 /* FIXME: This should be a managed pointer */
6811 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6813 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6814 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6815 cmethod, MONO_RGCTX_INFO_METHOD);
6816 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6817 addr = mono_emit_jit_icall (cfg,
6818 mono_helper_compile_generic_method, iargs);
6820 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6822 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6825 if (!MONO_TYPE_IS_VOID (fsig->ret))
6826 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6828 CHECK_CFG_EXCEPTION;
6836 * Implement a workaround for the inherent races involved in locking:
6842 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6843 * try block, the Exit () won't be executed, see:
6844 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6845 * To work around this, we extend such try blocks to include the last x bytes
6846 * of the Monitor.Enter () call.
6848 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6849 MonoBasicBlock *tbb;
6851 GET_BBLOCK (cfg, tbb, ip + 5);
6853 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6854 * from Monitor.Enter like ArgumentNullException.
6856 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6857 /* Mark this bblock as needing to be extended */
6858 tbb->extend_try_block = TRUE;
6862 /* Conversion to a JIT intrinsic */
6863 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6865 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6866 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6871 CHECK_CFG_EXCEPTION;
6879 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6880 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6881 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6882 !g_list_find (dont_inline, cmethod)) {
6884 gboolean always = FALSE;
6886 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6887 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6888 /* Prevent inlining of methods that call wrappers */
6890 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6894 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6896 cfg->real_offset += 5;
6899 if (!MONO_TYPE_IS_VOID (fsig->ret))
6900 /* *sp is already set by inline_method */
6903 inline_costs += costs;
6909 inline_costs += 10 * num_calls++;
6911 /* Tail recursion elimination */
6912 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6913 gboolean has_vtargs = FALSE;
6916 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6919 /* keep it simple */
6920 for (i = fsig->param_count - 1; i >= 0; i--) {
6921 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6926 for (i = 0; i < n; ++i)
6927 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6928 MONO_INST_NEW (cfg, ins, OP_BR);
6929 MONO_ADD_INS (bblock, ins);
6930 tblock = start_bblock->out_bb [0];
6931 link_bblock (cfg, bblock, tblock);
6932 ins->inst_target_bb = tblock;
6933 start_new_bblock = 1;
6935 /* skip the CEE_RET, too */
6936 if (ip_in_bb (cfg, bblock, ip + 5))
6946 /* Generic sharing */
6947 /* FIXME: only do this for generic methods if
6948 they are not shared! */
6949 if (context_used && !imt_arg && !array_rank &&
6950 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6951 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6952 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6953 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6956 g_assert (cfg->generic_sharing_context && cmethod);
6960 * We are compiling a call to a
6961 * generic method from shared code,
6962 * which means that we have to look up
6963 * the method in the rgctx and do an
6966 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6969 /* Indirect calls */
6971 g_assert (!imt_arg);
6973 if (*ip == CEE_CALL)
6974 g_assert (context_used);
6975 else if (*ip == CEE_CALLI)
6976 g_assert (!vtable_arg);
6978 /* FIXME: what the hell is this??? */
6979 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6980 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6982 /* Prevent inlining of methods with indirect calls */
6988 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
6989 call = (MonoCallInst*)ins;
6991 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6993 * Instead of emitting an indirect call, emit a direct call
6994 * with the contents of the aotconst as the patch info.
6996 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6998 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6999 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7002 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7005 if (!MONO_TYPE_IS_VOID (fsig->ret))
7006 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7008 CHECK_CFG_EXCEPTION;
7019 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7020 MonoInst *val = sp [fsig->param_count];
7022 if (val->type == STACK_OBJ) {
7023 MonoInst *iargs [2];
7028 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7031 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7032 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7033 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7034 emit_write_barrier (cfg, addr, val, 0);
7035 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7036 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7041 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7042 if (!cmethod->klass->element_class->valuetype && !readonly)
7043 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7044 CHECK_TYPELOAD (cmethod->klass);
7047 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7050 g_assert_not_reached ();
7053 CHECK_CFG_EXCEPTION;
7060 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7062 if (!MONO_TYPE_IS_VOID (fsig->ret))
7063 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7065 CHECK_CFG_EXCEPTION;
7072 /* Tail prefix / tail call optimization */
7074 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7075 /* FIXME: runtime generic context pointer for jumps? */
7076 /* FIXME: handle this for generic sharing eventually */
7077 supported_tail_call = cmethod &&
7078 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7079 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7080 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7082 if (supported_tail_call) {
7085 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7088 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7090 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7091 /* Handle tail calls similarly to calls */
7092 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7094 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7095 call->tail_call = TRUE;
7096 call->method = cmethod;
7097 call->signature = mono_method_signature (cmethod);
7100 * We implement tail calls by storing the actual arguments into the
7101 * argument variables, then emitting a CEE_JMP.
7103 for (i = 0; i < n; ++i) {
7104 /* Prevent argument from being register allocated */
7105 arg_array [i]->flags |= MONO_INST_VOLATILE;
7106 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7110 ins = (MonoInst*)call;
7111 ins->inst_p0 = cmethod;
7112 ins->inst_p1 = arg_array [0];
7113 MONO_ADD_INS (bblock, ins);
7114 link_bblock (cfg, bblock, end_bblock);
7115 start_new_bblock = 1;
7117 CHECK_CFG_EXCEPTION;
7122 // FIXME: Eliminate unreachable epilogs
7125 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7126 * only reachable from this call.
7128 GET_BBLOCK (cfg, tblock, ip);
7129 if (tblock == bblock || tblock->in_count == 0)
7136 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7137 imt_arg, vtable_arg);
7139 if (!MONO_TYPE_IS_VOID (fsig->ret))
7140 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7142 CHECK_CFG_EXCEPTION;
7149 if (cfg->method != method) {
7150 /* return from inlined method */
7152 * If in_count == 0, that means the ret is unreachable due to
7153 * being preceeded by a throw. In that case, inline_method () will
7154 * handle setting the return value
7155 * (test case: test_0_inline_throw ()).
7157 if (return_var && cfg->cbb->in_count) {
7161 //g_assert (returnvar != -1);
7162 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7163 cfg->ret_var_set = TRUE;
7167 MonoType *ret_type = mono_method_signature (method)->ret;
7171 * Place a seq point here too even through the IL stack is not
7172 * empty, so a step over on
7175 * will work correctly.
7177 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7178 MONO_ADD_INS (cfg->cbb, ins);
7181 g_assert (!return_var);
7185 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7188 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7191 if (!cfg->vret_addr) {
7194 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7196 EMIT_NEW_RETLOADA (cfg, ret_addr);
7198 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7199 ins->klass = mono_class_from_mono_type (ret_type);
7202 #ifdef MONO_ARCH_SOFT_FLOAT
7203 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7204 MonoInst *iargs [1];
7208 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7209 mono_arch_emit_setret (cfg, method, conv);
7211 mono_arch_emit_setret (cfg, method, *sp);
7214 mono_arch_emit_setret (cfg, method, *sp);
7219 if (sp != stack_start)
7221 MONO_INST_NEW (cfg, ins, OP_BR);
7223 ins->inst_target_bb = end_bblock;
7224 MONO_ADD_INS (bblock, ins);
7225 link_bblock (cfg, bblock, end_bblock);
7226 start_new_bblock = 1;
7230 MONO_INST_NEW (cfg, ins, OP_BR);
7232 target = ip + 1 + (signed char)(*ip);
7234 GET_BBLOCK (cfg, tblock, target);
7235 link_bblock (cfg, bblock, tblock);
7236 ins->inst_target_bb = tblock;
7237 if (sp != stack_start) {
7238 handle_stack_args (cfg, stack_start, sp - stack_start);
7240 CHECK_UNVERIFIABLE (cfg);
7242 MONO_ADD_INS (bblock, ins);
7243 start_new_bblock = 1;
7244 inline_costs += BRANCH_COST;
7258 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7260 target = ip + 1 + *(signed char*)ip;
7266 inline_costs += BRANCH_COST;
7270 MONO_INST_NEW (cfg, ins, OP_BR);
7273 target = ip + 4 + (gint32)read32(ip);
7275 GET_BBLOCK (cfg, tblock, target);
7276 link_bblock (cfg, bblock, tblock);
7277 ins->inst_target_bb = tblock;
7278 if (sp != stack_start) {
7279 handle_stack_args (cfg, stack_start, sp - stack_start);
7281 CHECK_UNVERIFIABLE (cfg);
7284 MONO_ADD_INS (bblock, ins);
7286 start_new_bblock = 1;
7287 inline_costs += BRANCH_COST;
7294 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7295 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7296 guint32 opsize = is_short ? 1 : 4;
7298 CHECK_OPSIZE (opsize);
7300 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7303 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7308 GET_BBLOCK (cfg, tblock, target);
7309 link_bblock (cfg, bblock, tblock);
7310 GET_BBLOCK (cfg, tblock, ip);
7311 link_bblock (cfg, bblock, tblock);
7313 if (sp != stack_start) {
7314 handle_stack_args (cfg, stack_start, sp - stack_start);
7315 CHECK_UNVERIFIABLE (cfg);
7318 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7319 cmp->sreg1 = sp [0]->dreg;
7320 type_from_op (cmp, sp [0], NULL);
7323 #if SIZEOF_REGISTER == 4
7324 if (cmp->opcode == OP_LCOMPARE_IMM) {
7325 /* Convert it to OP_LCOMPARE */
7326 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7327 ins->type = STACK_I8;
7328 ins->dreg = alloc_dreg (cfg, STACK_I8);
7330 MONO_ADD_INS (bblock, ins);
7331 cmp->opcode = OP_LCOMPARE;
7332 cmp->sreg2 = ins->dreg;
7335 MONO_ADD_INS (bblock, cmp);
7337 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7338 type_from_op (ins, sp [0], NULL);
7339 MONO_ADD_INS (bblock, ins);
7340 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7341 GET_BBLOCK (cfg, tblock, target);
7342 ins->inst_true_bb = tblock;
7343 GET_BBLOCK (cfg, tblock, ip);
7344 ins->inst_false_bb = tblock;
7345 start_new_bblock = 2;
7348 inline_costs += BRANCH_COST;
7363 MONO_INST_NEW (cfg, ins, *ip);
7365 target = ip + 4 + (gint32)read32(ip);
7371 inline_costs += BRANCH_COST;
7375 MonoBasicBlock **targets;
7376 MonoBasicBlock *default_bblock;
7377 MonoJumpInfoBBTable *table;
7378 int offset_reg = alloc_preg (cfg);
7379 int target_reg = alloc_preg (cfg);
7380 int table_reg = alloc_preg (cfg);
7381 int sum_reg = alloc_preg (cfg);
7382 gboolean use_op_switch;
7386 n = read32 (ip + 1);
7389 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7393 CHECK_OPSIZE (n * sizeof (guint32));
7394 target = ip + n * sizeof (guint32);
7396 GET_BBLOCK (cfg, default_bblock, target);
7397 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7399 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7400 for (i = 0; i < n; ++i) {
7401 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7402 targets [i] = tblock;
7403 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7407 if (sp != stack_start) {
7409 * Link the current bb with the targets as well, so handle_stack_args
7410 * will set their in_stack correctly.
7412 link_bblock (cfg, bblock, default_bblock);
7413 for (i = 0; i < n; ++i)
7414 link_bblock (cfg, bblock, targets [i]);
7416 handle_stack_args (cfg, stack_start, sp - stack_start);
7418 CHECK_UNVERIFIABLE (cfg);
7421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7425 for (i = 0; i < n; ++i)
7426 link_bblock (cfg, bblock, targets [i]);
7428 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7429 table->table = targets;
7430 table->table_size = n;
7432 use_op_switch = FALSE;
7434 /* ARM implements SWITCH statements differently */
7435 /* FIXME: Make it use the generic implementation */
7436 if (!cfg->compile_aot)
7437 use_op_switch = TRUE;
7440 if (COMPILE_LLVM (cfg))
7441 use_op_switch = TRUE;
7443 cfg->cbb->has_jump_table = 1;
7445 if (use_op_switch) {
7446 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7447 ins->sreg1 = src1->dreg;
7448 ins->inst_p0 = table;
7449 ins->inst_many_bb = targets;
7450 ins->klass = GUINT_TO_POINTER (n);
7451 MONO_ADD_INS (cfg->cbb, ins);
7453 if (sizeof (gpointer) == 8)
7454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7458 #if SIZEOF_REGISTER == 8
7459 /* The upper word might not be zero, and we add it to a 64 bit address later */
7460 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7463 if (cfg->compile_aot) {
7464 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7466 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7467 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7468 ins->inst_p0 = table;
7469 ins->dreg = table_reg;
7470 MONO_ADD_INS (cfg->cbb, ins);
7473 /* FIXME: Use load_memindex */
7474 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7476 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7478 start_new_bblock = 1;
7479 inline_costs += (BRANCH_COST * 2);
7499 dreg = alloc_freg (cfg);
7502 dreg = alloc_lreg (cfg);
7505 dreg = alloc_ireg_ref (cfg);
7508 dreg = alloc_preg (cfg);
7511 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7512 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7513 ins->flags |= ins_flag;
7515 MONO_ADD_INS (bblock, ins);
7530 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7531 ins->flags |= ins_flag;
7533 MONO_ADD_INS (bblock, ins);
7535 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7536 emit_write_barrier (cfg, sp [0], sp [1], -1);
7545 MONO_INST_NEW (cfg, ins, (*ip));
7547 ins->sreg1 = sp [0]->dreg;
7548 ins->sreg2 = sp [1]->dreg;
7549 type_from_op (ins, sp [0], sp [1]);
7551 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7553 /* Use the immediate opcodes if possible */
7554 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7555 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7556 if (imm_opcode != -1) {
7557 ins->opcode = imm_opcode;
7558 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7561 sp [1]->opcode = OP_NOP;
7565 MONO_ADD_INS ((cfg)->cbb, (ins));
7567 *sp++ = mono_decompose_opcode (cfg, ins);
7584 MONO_INST_NEW (cfg, ins, (*ip));
7586 ins->sreg1 = sp [0]->dreg;
7587 ins->sreg2 = sp [1]->dreg;
7588 type_from_op (ins, sp [0], sp [1]);
7590 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7591 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7593 /* FIXME: Pass opcode to is_inst_imm */
7595 /* Use the immediate opcodes if possible */
7596 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7599 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7600 if (imm_opcode != -1) {
7601 ins->opcode = imm_opcode;
7602 if (sp [1]->opcode == OP_I8CONST) {
7603 #if SIZEOF_REGISTER == 8
7604 ins->inst_imm = sp [1]->inst_l;
7606 ins->inst_ls_word = sp [1]->inst_ls_word;
7607 ins->inst_ms_word = sp [1]->inst_ms_word;
7611 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7614 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7615 if (sp [1]->next == NULL)
7616 sp [1]->opcode = OP_NOP;
7619 MONO_ADD_INS ((cfg)->cbb, (ins));
7621 *sp++ = mono_decompose_opcode (cfg, ins);
7634 case CEE_CONV_OVF_I8:
7635 case CEE_CONV_OVF_U8:
7639 /* Special case this earlier so we have long constants in the IR */
7640 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7641 int data = sp [-1]->inst_c0;
7642 sp [-1]->opcode = OP_I8CONST;
7643 sp [-1]->type = STACK_I8;
7644 #if SIZEOF_REGISTER == 8
7645 if ((*ip) == CEE_CONV_U8)
7646 sp [-1]->inst_c0 = (guint32)data;
7648 sp [-1]->inst_c0 = data;
7650 sp [-1]->inst_ls_word = data;
7651 if ((*ip) == CEE_CONV_U8)
7652 sp [-1]->inst_ms_word = 0;
7654 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7656 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7663 case CEE_CONV_OVF_I4:
7664 case CEE_CONV_OVF_I1:
7665 case CEE_CONV_OVF_I2:
7666 case CEE_CONV_OVF_I:
7667 case CEE_CONV_OVF_U:
7670 if (sp [-1]->type == STACK_R8) {
7671 ADD_UNOP (CEE_CONV_OVF_I8);
7678 case CEE_CONV_OVF_U1:
7679 case CEE_CONV_OVF_U2:
7680 case CEE_CONV_OVF_U4:
7683 if (sp [-1]->type == STACK_R8) {
7684 ADD_UNOP (CEE_CONV_OVF_U8);
7691 case CEE_CONV_OVF_I1_UN:
7692 case CEE_CONV_OVF_I2_UN:
7693 case CEE_CONV_OVF_I4_UN:
7694 case CEE_CONV_OVF_I8_UN:
7695 case CEE_CONV_OVF_U1_UN:
7696 case CEE_CONV_OVF_U2_UN:
7697 case CEE_CONV_OVF_U4_UN:
7698 case CEE_CONV_OVF_U8_UN:
7699 case CEE_CONV_OVF_I_UN:
7700 case CEE_CONV_OVF_U_UN:
7707 CHECK_CFG_EXCEPTION;
7711 case CEE_ADD_OVF_UN:
7713 case CEE_MUL_OVF_UN:
7715 case CEE_SUB_OVF_UN:
7723 token = read32 (ip + 1);
7724 klass = mini_get_class (method, token, generic_context);
7725 CHECK_TYPELOAD (klass);
7727 if (generic_class_is_reference_type (cfg, klass)) {
7728 MonoInst *store, *load;
7729 int dreg = alloc_ireg_ref (cfg);
7731 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7732 load->flags |= ins_flag;
7733 MONO_ADD_INS (cfg->cbb, load);
7735 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7736 store->flags |= ins_flag;
7737 MONO_ADD_INS (cfg->cbb, store);
7739 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7740 emit_write_barrier (cfg, sp [0], sp [1], -1);
7742 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7754 token = read32 (ip + 1);
7755 klass = mini_get_class (method, token, generic_context);
7756 CHECK_TYPELOAD (klass);
7758 /* Optimize the common ldobj+stloc combination */
7768 loc_index = ip [5] - CEE_STLOC_0;
7775 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7776 CHECK_LOCAL (loc_index);
7778 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7779 ins->dreg = cfg->locals [loc_index]->dreg;
7785 /* Optimize the ldobj+stobj combination */
7786 /* The reference case ends up being a load+store anyway */
7787 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7792 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7799 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7808 CHECK_STACK_OVF (1);
7810 n = read32 (ip + 1);
7812 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7813 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7814 ins->type = STACK_OBJ;
7817 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7818 MonoInst *iargs [1];
7820 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7821 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7823 if (cfg->opt & MONO_OPT_SHARED) {
7824 MonoInst *iargs [3];
7826 if (cfg->compile_aot) {
7827 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7829 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7830 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7831 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7832 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7833 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7835 if (bblock->out_of_line) {
7836 MonoInst *iargs [2];
7838 if (image == mono_defaults.corlib) {
7840 * Avoid relocations in AOT and save some space by using a
7841 * version of helper_ldstr specialized to mscorlib.
7843 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7844 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7846 /* Avoid creating the string object */
7847 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7848 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7849 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7853 if (cfg->compile_aot) {
7854 NEW_LDSTRCONST (cfg, ins, image, n);
7856 MONO_ADD_INS (bblock, ins);
7859 NEW_PCONST (cfg, ins, NULL);
7860 ins->type = STACK_OBJ;
7861 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7863 OUT_OF_MEMORY_FAILURE;
7866 MONO_ADD_INS (bblock, ins);
7875 MonoInst *iargs [2];
7876 MonoMethodSignature *fsig;
7879 MonoInst *vtable_arg = NULL;
7882 token = read32 (ip + 1);
7883 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7884 if (!cmethod || mono_loader_get_last_error ())
7886 fsig = mono_method_get_signature (cmethod, image, token);
7890 mono_save_token_info (cfg, image, token, cmethod);
7892 if (!mono_class_init (cmethod->klass))
7895 if (cfg->generic_sharing_context)
7896 context_used = mono_method_check_context_used (cmethod);
7898 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7899 if (check_linkdemand (cfg, method, cmethod))
7901 CHECK_CFG_EXCEPTION;
7902 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7903 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7906 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7907 emit_generic_class_init (cfg, cmethod->klass);
7908 CHECK_TYPELOAD (cmethod->klass);
7911 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7912 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7913 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7914 mono_class_vtable (cfg->domain, cmethod->klass);
7915 CHECK_TYPELOAD (cmethod->klass);
7917 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7918 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7921 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7922 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7924 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7926 CHECK_TYPELOAD (cmethod->klass);
7927 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7932 n = fsig->param_count;
7936 * Generate smaller code for the common newobj <exception> instruction in
7937 * argument checking code.
7939 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7940 is_exception_class (cmethod->klass) && n <= 2 &&
7941 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7942 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7943 MonoInst *iargs [3];
7945 g_assert (!vtable_arg);
7949 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7952 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7956 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7961 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7964 g_assert_not_reached ();
7972 /* move the args to allow room for 'this' in the first position */
7978 /* check_call_signature () requires sp[0] to be set */
7979 this_ins.type = STACK_OBJ;
7981 if (check_call_signature (cfg, fsig, sp))
7986 if (mini_class_is_system_array (cmethod->klass)) {
7987 g_assert (!vtable_arg);
7989 *sp = emit_get_rgctx_method (cfg, context_used,
7990 cmethod, MONO_RGCTX_INFO_METHOD);
7992 /* Avoid varargs in the common case */
7993 if (fsig->param_count == 1)
7994 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7995 else if (fsig->param_count == 2)
7996 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7997 else if (fsig->param_count == 3)
7998 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8000 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8001 } else if (cmethod->string_ctor) {
8002 g_assert (!context_used);
8003 g_assert (!vtable_arg);
8004 /* we simply pass a null pointer */
8005 EMIT_NEW_PCONST (cfg, *sp, NULL);
8006 /* now call the string ctor */
8007 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8009 MonoInst* callvirt_this_arg = NULL;
8011 if (cmethod->klass->valuetype) {
8012 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8013 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8014 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8019 * The code generated by mini_emit_virtual_call () expects
8020 * iargs [0] to be a boxed instance, but luckily the vcall
8021 * will be transformed into a normal call there.
8023 } else if (context_used) {
8024 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8027 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8029 CHECK_TYPELOAD (cmethod->klass);
8032 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8033 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8034 * As a workaround, we call class cctors before allocating objects.
8036 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8037 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8038 if (cfg->verbose_level > 2)
8039 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8040 class_inits = g_slist_prepend (class_inits, vtable);
8043 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8046 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8049 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8051 /* Now call the actual ctor */
8052 /* Avoid virtual calls to ctors if possible */
8053 if (cmethod->klass->marshalbyref)
8054 callvirt_this_arg = sp [0];
8057 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8058 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8059 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8064 CHECK_CFG_EXCEPTION;
8065 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8066 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8067 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8068 !g_list_find (dont_inline, cmethod)) {
8071 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8072 cfg->real_offset += 5;
8075 inline_costs += costs - 5;
8078 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8080 } else if (context_used &&
8081 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8082 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8083 MonoInst *cmethod_addr;
8085 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8086 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8088 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8091 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8092 callvirt_this_arg, NULL, vtable_arg);
8096 if (alloc == NULL) {
8098 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8099 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8113 token = read32 (ip + 1);
8114 klass = mini_get_class (method, token, generic_context);
8115 CHECK_TYPELOAD (klass);
8116 if (sp [0]->type != STACK_OBJ)
8119 if (cfg->generic_sharing_context)
8120 context_used = mono_class_check_context_used (klass);
8122 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8123 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8130 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8133 /*FIXME AOT support*/
8134 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8136 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8137 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8140 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8141 MonoMethod *mono_castclass;
8142 MonoInst *iargs [1];
8145 mono_castclass = mono_marshal_get_castclass (klass);
8148 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8149 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8150 CHECK_CFG_EXCEPTION;
8151 g_assert (costs > 0);
8154 cfg->real_offset += 5;
8159 inline_costs += costs;
8162 ins = handle_castclass (cfg, klass, *sp, context_used);
8163 CHECK_CFG_EXCEPTION;
8173 token = read32 (ip + 1);
8174 klass = mini_get_class (method, token, generic_context);
8175 CHECK_TYPELOAD (klass);
8176 if (sp [0]->type != STACK_OBJ)
8179 if (cfg->generic_sharing_context)
8180 context_used = mono_class_check_context_used (klass);
8182 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8183 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8190 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8193 /*FIXME AOT support*/
8194 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8196 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8199 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8200 MonoMethod *mono_isinst;
8201 MonoInst *iargs [1];
8204 mono_isinst = mono_marshal_get_isinst (klass);
8207 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8208 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8209 CHECK_CFG_EXCEPTION;
8210 g_assert (costs > 0);
8213 cfg->real_offset += 5;
8218 inline_costs += costs;
8221 ins = handle_isinst (cfg, klass, *sp, context_used);
8222 CHECK_CFG_EXCEPTION;
8229 case CEE_UNBOX_ANY: {
8233 token = read32 (ip + 1);
8234 klass = mini_get_class (method, token, generic_context);
8235 CHECK_TYPELOAD (klass);
8237 mono_save_token_info (cfg, image, token, klass);
8239 if (cfg->generic_sharing_context)
8240 context_used = mono_class_check_context_used (klass);
8242 if (generic_class_is_reference_type (cfg, klass)) {
8243 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8244 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8245 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8252 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8255 /*FIXME AOT support*/
8256 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8258 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8259 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8262 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8263 MonoMethod *mono_castclass;
8264 MonoInst *iargs [1];
8267 mono_castclass = mono_marshal_get_castclass (klass);
8270 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8271 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8272 CHECK_CFG_EXCEPTION;
8273 g_assert (costs > 0);
8276 cfg->real_offset += 5;
8280 inline_costs += costs;
8282 ins = handle_castclass (cfg, klass, *sp, context_used);
8283 CHECK_CFG_EXCEPTION;
8291 if (mono_class_is_nullable (klass)) {
8292 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8299 ins = handle_unbox (cfg, klass, sp, context_used);
8305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8318 token = read32 (ip + 1);
8319 klass = mini_get_class (method, token, generic_context);
8320 CHECK_TYPELOAD (klass);
8322 mono_save_token_info (cfg, image, token, klass);
8324 if (cfg->generic_sharing_context)
8325 context_used = mono_class_check_context_used (klass);
8327 if (generic_class_is_reference_type (cfg, klass)) {
8333 if (klass == mono_defaults.void_class)
8335 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8337 /* frequent check in generic code: box (struct), brtrue */
8339 // FIXME: LLVM can't handle the inconsistent bb linking
8340 if (!mono_class_is_nullable (klass) &&
8341 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8342 (ip [5] == CEE_BRTRUE ||
8343 ip [5] == CEE_BRTRUE_S ||
8344 ip [5] == CEE_BRFALSE ||
8345 ip [5] == CEE_BRFALSE_S)) {
8346 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8348 MonoBasicBlock *true_bb, *false_bb;
8352 if (cfg->verbose_level > 3) {
8353 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8354 printf ("<box+brtrue opt>\n");
8362 target = ip + 1 + (signed char)(*ip);
8369 target = ip + 4 + (gint)(read32 (ip));
8373 g_assert_not_reached ();
8377 * We need to link both bblocks, since it is needed for handling stack
8378 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8379 * Branching to only one of them would lead to inconsistencies, so
8380 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8382 GET_BBLOCK (cfg, true_bb, target);
8383 GET_BBLOCK (cfg, false_bb, ip);
8385 mono_link_bblock (cfg, cfg->cbb, true_bb);
8386 mono_link_bblock (cfg, cfg->cbb, false_bb);
8388 if (sp != stack_start) {
8389 handle_stack_args (cfg, stack_start, sp - stack_start);
8391 CHECK_UNVERIFIABLE (cfg);
8394 if (COMPILE_LLVM (cfg)) {
8395 dreg = alloc_ireg (cfg);
8396 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8399 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8401 /* The JIT can't eliminate the iconst+compare */
8402 MONO_INST_NEW (cfg, ins, OP_BR);
8403 ins->inst_target_bb = is_true ? true_bb : false_bb;
8404 MONO_ADD_INS (cfg->cbb, ins);
8407 start_new_bblock = 1;
8411 *sp++ = handle_box (cfg, val, klass, context_used);
8413 CHECK_CFG_EXCEPTION;
8422 token = read32 (ip + 1);
8423 klass = mini_get_class (method, token, generic_context);
8424 CHECK_TYPELOAD (klass);
8426 mono_save_token_info (cfg, image, token, klass);
8428 if (cfg->generic_sharing_context)
8429 context_used = mono_class_check_context_used (klass);
8431 if (mono_class_is_nullable (klass)) {
8434 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8435 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8439 ins = handle_unbox (cfg, klass, sp, context_used);
8449 MonoClassField *field;
8453 if (*ip == CEE_STFLD) {
8460 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8462 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8465 token = read32 (ip + 1);
8466 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8467 field = mono_method_get_wrapper_data (method, token);
8468 klass = field->parent;
8471 field = mono_field_from_token (image, token, &klass, generic_context);
8475 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8476 FIELD_ACCESS_FAILURE;
8477 mono_class_init (klass);
8479 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8480 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8481 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8482 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8485 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8486 if (*ip == CEE_STFLD) {
8487 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8489 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8490 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8491 MonoInst *iargs [5];
8494 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8495 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8496 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8500 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8501 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8502 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8503 CHECK_CFG_EXCEPTION;
8504 g_assert (costs > 0);
8506 cfg->real_offset += 5;
8509 inline_costs += costs;
8511 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8516 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8518 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8519 if (sp [0]->opcode != OP_LDADDR)
8520 store->flags |= MONO_INST_FAULT;
8522 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8523 /* insert call to write barrier */
8527 dreg = alloc_ireg_mp (cfg);
8528 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8529 emit_write_barrier (cfg, ptr, sp [1], -1);
8532 store->flags |= ins_flag;
8539 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8540 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8541 MonoInst *iargs [4];
8544 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8545 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8546 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8547 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8548 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8549 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8550 CHECK_CFG_EXCEPTION;
8552 g_assert (costs > 0);
8554 cfg->real_offset += 5;
8558 inline_costs += costs;
8560 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8564 if (sp [0]->type == STACK_VTYPE) {
8567 /* Have to compute the address of the variable */
8569 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8571 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8573 g_assert (var->klass == klass);
8575 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8579 if (*ip == CEE_LDFLDA) {
8580 if (sp [0]->type == STACK_OBJ) {
8581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8582 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8585 dreg = alloc_ireg_mp (cfg);
8587 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8588 ins->klass = mono_class_from_mono_type (field->type);
8589 ins->type = STACK_MP;
8594 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8596 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8597 load->flags |= ins_flag;
8598 if (sp [0]->opcode != OP_LDADDR)
8599 load->flags |= MONO_INST_FAULT;
8610 MonoClassField *field;
8611 gpointer addr = NULL;
8612 gboolean is_special_static;
8616 token = read32 (ip + 1);
8618 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8619 field = mono_method_get_wrapper_data (method, token);
8620 klass = field->parent;
8623 field = mono_field_from_token (image, token, &klass, generic_context);
8626 mono_class_init (klass);
8627 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8628 FIELD_ACCESS_FAILURE;
8630 /* if the class is Critical then transparent code cannot access it's fields */
8631 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8632 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8635 * We can only support shared generic static
8636 * field access on architectures where the
8637 * trampoline code has been extended to handle
8638 * the generic class init.
8640 #ifndef MONO_ARCH_VTABLE_REG
8641 GENERIC_SHARING_FAILURE (*ip);
8644 if (cfg->generic_sharing_context)
8645 context_used = mono_class_check_context_used (klass);
8647 ftype = mono_field_get_type (field);
8649 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8651 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8652 * to be called here.
8654 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8655 mono_class_vtable (cfg->domain, klass);
8656 CHECK_TYPELOAD (klass);
8658 mono_domain_lock (cfg->domain);
8659 if (cfg->domain->special_static_fields)
8660 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8661 mono_domain_unlock (cfg->domain);
8663 is_special_static = mono_class_field_is_special_static (field);
8665 /* Generate IR to compute the field address */
8666 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8668 * Fast access to TLS data
8669 * Inline version of get_thread_static_data () in
8673 int idx, static_data_reg, array_reg, dreg;
8674 MonoInst *thread_ins;
8676 // offset &= 0x7fffffff;
8677 // idx = (offset >> 24) - 1;
8678 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8680 thread_ins = mono_get_thread_intrinsic (cfg);
8681 MONO_ADD_INS (cfg->cbb, thread_ins);
8682 static_data_reg = alloc_ireg (cfg);
8683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8685 if (cfg->compile_aot) {
8686 int offset_reg, offset2_reg, idx_reg;
8688 /* For TLS variables, this will return the TLS offset */
8689 EMIT_NEW_SFLDACONST (cfg, ins, field);
8690 offset_reg = ins->dreg;
8691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8692 idx_reg = alloc_ireg (cfg);
8693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8696 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8697 array_reg = alloc_ireg (cfg);
8698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8699 offset2_reg = alloc_ireg (cfg);
8700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8701 dreg = alloc_ireg (cfg);
8702 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8704 offset = (gsize)addr & 0x7fffffff;
8705 idx = (offset >> 24) - 1;
8707 array_reg = alloc_ireg (cfg);
8708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8709 dreg = alloc_ireg (cfg);
8710 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8712 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8713 (cfg->compile_aot && is_special_static) ||
8714 (context_used && is_special_static)) {
8715 MonoInst *iargs [2];
8717 g_assert (field->parent);
8718 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8720 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8721 field, MONO_RGCTX_INFO_CLASS_FIELD);
8723 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8725 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8726 } else if (context_used) {
8727 MonoInst *static_data;
8730 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8731 method->klass->name_space, method->klass->name, method->name,
8732 depth, field->offset);
8735 if (mono_class_needs_cctor_run (klass, method))
8736 emit_generic_class_init (cfg, klass);
8739 * The pointer we're computing here is
8741 * super_info.static_data + field->offset
8743 static_data = emit_get_rgctx_klass (cfg, context_used,
8744 klass, MONO_RGCTX_INFO_STATIC_DATA);
8746 if (field->offset == 0) {
8749 int addr_reg = mono_alloc_preg (cfg);
8750 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8752 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8753 MonoInst *iargs [2];
8755 g_assert (field->parent);
8756 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8757 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8758 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8760 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8762 CHECK_TYPELOAD (klass);
8764 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8765 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8766 if (cfg->verbose_level > 2)
8767 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8768 class_inits = g_slist_prepend (class_inits, vtable);
8770 if (cfg->run_cctors) {
8772 /* This makes so that inline cannot trigger */
8773 /* .cctors: too many apps depend on them */
8774 /* running with a specific order... */
8775 if (! vtable->initialized)
8777 ex = mono_runtime_class_init_full (vtable, FALSE);
8779 set_exception_object (cfg, ex);
8780 goto exception_exit;
8784 addr = (char*)vtable->data + field->offset;
8786 if (cfg->compile_aot)
8787 EMIT_NEW_SFLDACONST (cfg, ins, field);
8789 EMIT_NEW_PCONST (cfg, ins, addr);
8791 MonoInst *iargs [1];
8792 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8793 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8797 /* Generate IR to do the actual load/store operation */
8799 if (*ip == CEE_LDSFLDA) {
8800 ins->klass = mono_class_from_mono_type (ftype);
8801 ins->type = STACK_PTR;
8803 } else if (*ip == CEE_STSFLD) {
8808 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8809 store->flags |= ins_flag;
8811 gboolean is_const = FALSE;
8812 MonoVTable *vtable = NULL;
8814 if (!context_used) {
8815 vtable = mono_class_vtable (cfg->domain, klass);
8816 CHECK_TYPELOAD (klass);
8818 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8819 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8820 gpointer addr = (char*)vtable->data + field->offset;
8821 int ro_type = ftype->type;
8822 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8823 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8825 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8828 case MONO_TYPE_BOOLEAN:
8830 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8834 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8837 case MONO_TYPE_CHAR:
8839 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8843 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8848 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8852 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8858 case MONO_TYPE_FNPTR:
8859 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8860 type_to_eval_stack_type ((cfg), field->type, *sp);
8863 case MONO_TYPE_STRING:
8864 case MONO_TYPE_OBJECT:
8865 case MONO_TYPE_CLASS:
8866 case MONO_TYPE_SZARRAY:
8867 case MONO_TYPE_ARRAY:
8868 if (!mono_gc_is_moving ()) {
8869 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8870 type_to_eval_stack_type ((cfg), field->type, *sp);
8878 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8883 case MONO_TYPE_VALUETYPE:
8893 CHECK_STACK_OVF (1);
8895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8896 load->flags |= ins_flag;
8909 token = read32 (ip + 1);
8910 klass = mini_get_class (method, token, generic_context);
8911 CHECK_TYPELOAD (klass);
8912 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8913 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8914 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8915 generic_class_is_reference_type (cfg, klass)) {
8916 /* insert call to write barrier */
8917 emit_write_barrier (cfg, sp [0], sp [1], -1);
8929 const char *data_ptr;
8931 guint32 field_token;
8937 token = read32 (ip + 1);
8939 klass = mini_get_class (method, token, generic_context);
8940 CHECK_TYPELOAD (klass);
8942 if (cfg->generic_sharing_context)
8943 context_used = mono_class_check_context_used (klass);
8945 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8946 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8947 ins->sreg1 = sp [0]->dreg;
8948 ins->type = STACK_I4;
8949 ins->dreg = alloc_ireg (cfg);
8950 MONO_ADD_INS (cfg->cbb, ins);
8951 *sp = mono_decompose_opcode (cfg, ins);
8956 MonoClass *array_class = mono_array_class_get (klass, 1);
8957 /* FIXME: we cannot get a managed
8958 allocator because we can't get the
8959 open generic class's vtable. We
8960 have the same problem in
8961 handle_alloc(). This
8962 needs to be solved so that we can
8963 have managed allocs of shared
8966 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8967 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8969 MonoMethod *managed_alloc = NULL;
8971 /* FIXME: Decompose later to help abcrem */
8974 args [0] = emit_get_rgctx_klass (cfg, context_used,
8975 array_class, MONO_RGCTX_INFO_VTABLE);
8980 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8982 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8984 if (cfg->opt & MONO_OPT_SHARED) {
8985 /* Decompose now to avoid problems with references to the domainvar */
8986 MonoInst *iargs [3];
8988 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8989 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8992 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8994 /* Decompose later since it is needed by abcrem */
8995 MonoClass *array_type = mono_array_class_get (klass, 1);
8996 mono_class_vtable (cfg->domain, array_type);
8997 CHECK_TYPELOAD (array_type);
8999 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9000 ins->dreg = alloc_ireg_ref (cfg);
9001 ins->sreg1 = sp [0]->dreg;
9002 ins->inst_newa_class = klass;
9003 ins->type = STACK_OBJ;
9005 MONO_ADD_INS (cfg->cbb, ins);
9006 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9007 cfg->cbb->has_array_access = TRUE;
9009 /* Needed so mono_emit_load_get_addr () gets called */
9010 mono_get_got_var (cfg);
9020 * we inline/optimize the initialization sequence if possible.
9021 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9022 * for small sizes open code the memcpy
9023 * ensure the rva field is big enough
9025 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9026 MonoMethod *memcpy_method = get_memcpy_method ();
9027 MonoInst *iargs [3];
9028 int add_reg = alloc_ireg_mp (cfg);
9030 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9031 if (cfg->compile_aot) {
9032 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9034 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9036 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9037 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9046 if (sp [0]->type != STACK_OBJ)
9049 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9050 ins->dreg = alloc_preg (cfg);
9051 ins->sreg1 = sp [0]->dreg;
9052 ins->type = STACK_I4;
9053 /* This flag will be inherited by the decomposition */
9054 ins->flags |= MONO_INST_FAULT;
9055 MONO_ADD_INS (cfg->cbb, ins);
9056 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9057 cfg->cbb->has_array_access = TRUE;
9065 if (sp [0]->type != STACK_OBJ)
9068 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9070 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9071 CHECK_TYPELOAD (klass);
9072 /* we need to make sure that this array is exactly the type it needs
9073 * to be for correctness. the wrappers are lax with their usage
9074 * so we need to ignore them here
9076 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9077 MonoClass *array_class = mono_array_class_get (klass, 1);
9078 mini_emit_check_array_type (cfg, sp [0], array_class);
9079 CHECK_TYPELOAD (array_class);
9083 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9098 case CEE_LDELEM_REF: {
9104 if (*ip == CEE_LDELEM) {
9106 token = read32 (ip + 1);
9107 klass = mini_get_class (method, token, generic_context);
9108 CHECK_TYPELOAD (klass);
9109 mono_class_init (klass);
9112 klass = array_access_to_klass (*ip);
9114 if (sp [0]->type != STACK_OBJ)
9117 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9119 if (sp [1]->opcode == OP_ICONST) {
9120 int array_reg = sp [0]->dreg;
9121 int index_reg = sp [1]->dreg;
9122 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9124 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9125 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9127 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9128 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9131 if (*ip == CEE_LDELEM)
9144 case CEE_STELEM_REF:
9151 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9153 if (*ip == CEE_STELEM) {
9155 token = read32 (ip + 1);
9156 klass = mini_get_class (method, token, generic_context);
9157 CHECK_TYPELOAD (klass);
9158 mono_class_init (klass);
9161 klass = array_access_to_klass (*ip);
9163 if (sp [0]->type != STACK_OBJ)
9166 /* storing a NULL doesn't need any of the complex checks in stelemref */
9167 if (generic_class_is_reference_type (cfg, klass) &&
9168 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9169 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9170 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9171 MonoInst *iargs [3];
9174 mono_class_setup_vtable (obj_array);
9175 g_assert (helper->slot);
9177 if (sp [0]->type != STACK_OBJ)
9179 if (sp [2]->type != STACK_OBJ)
9186 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9188 if (sp [1]->opcode == OP_ICONST) {
9189 int array_reg = sp [0]->dreg;
9190 int index_reg = sp [1]->dreg;
9191 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9193 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9194 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9196 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9197 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9201 if (*ip == CEE_STELEM)
9208 case CEE_CKFINITE: {
9212 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9213 ins->sreg1 = sp [0]->dreg;
9214 ins->dreg = alloc_freg (cfg);
9215 ins->type = STACK_R8;
9216 MONO_ADD_INS (bblock, ins);
9218 *sp++ = mono_decompose_opcode (cfg, ins);
9223 case CEE_REFANYVAL: {
9224 MonoInst *src_var, *src;
9226 int klass_reg = alloc_preg (cfg);
9227 int dreg = alloc_preg (cfg);
9230 MONO_INST_NEW (cfg, ins, *ip);
9233 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9234 CHECK_TYPELOAD (klass);
9235 mono_class_init (klass);
9237 if (cfg->generic_sharing_context)
9238 context_used = mono_class_check_context_used (klass);
9241 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9243 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9244 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9248 MonoInst *klass_ins;
9250 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9251 klass, MONO_RGCTX_INFO_KLASS);
9254 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9255 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9257 mini_emit_class_check (cfg, klass_reg, klass);
9259 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9260 ins->type = STACK_MP;
9265 case CEE_MKREFANY: {
9266 MonoInst *loc, *addr;
9269 MONO_INST_NEW (cfg, ins, *ip);
9272 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9273 CHECK_TYPELOAD (klass);
9274 mono_class_init (klass);
9276 if (cfg->generic_sharing_context)
9277 context_used = mono_class_check_context_used (klass);
9279 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9280 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9283 MonoInst *const_ins;
9284 int type_reg = alloc_preg (cfg);
9286 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9288 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9289 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9290 } else if (cfg->compile_aot) {
9291 int const_reg = alloc_preg (cfg);
9292 int type_reg = alloc_preg (cfg);
9294 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9297 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9299 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9300 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9304 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9305 ins->type = STACK_VTYPE;
9306 ins->klass = mono_defaults.typed_reference_class;
9313 MonoClass *handle_class;
9315 CHECK_STACK_OVF (1);
9318 n = read32 (ip + 1);
9320 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9321 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9322 handle = mono_method_get_wrapper_data (method, n);
9323 handle_class = mono_method_get_wrapper_data (method, n + 1);
9324 if (handle_class == mono_defaults.typehandle_class)
9325 handle = &((MonoClass*)handle)->byval_arg;
9328 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9332 mono_class_init (handle_class);
9333 if (cfg->generic_sharing_context) {
9334 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9335 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9336 /* This case handles ldtoken
9337 of an open type, like for
9340 } else if (handle_class == mono_defaults.typehandle_class) {
9341 /* If we get a MONO_TYPE_CLASS
9342 then we need to provide the
9344 instantiation of it. */
9345 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9348 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9349 } else if (handle_class == mono_defaults.fieldhandle_class)
9350 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9351 else if (handle_class == mono_defaults.methodhandle_class)
9352 context_used = mono_method_check_context_used (handle);
9354 g_assert_not_reached ();
9357 if ((cfg->opt & MONO_OPT_SHARED) &&
9358 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9359 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9360 MonoInst *addr, *vtvar, *iargs [3];
9361 int method_context_used;
9363 if (cfg->generic_sharing_context)
9364 method_context_used = mono_method_check_context_used (method);
9366 method_context_used = 0;
9368 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9370 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9371 EMIT_NEW_ICONST (cfg, iargs [1], n);
9372 if (method_context_used) {
9373 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9374 method, MONO_RGCTX_INFO_METHOD);
9375 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9377 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9378 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9380 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9382 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9384 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9386 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9387 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9388 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9389 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9390 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9391 MonoClass *tclass = mono_class_from_mono_type (handle);
9393 mono_class_init (tclass);
9395 ins = emit_get_rgctx_klass (cfg, context_used,
9396 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9397 } else if (cfg->compile_aot) {
9398 if (method->wrapper_type) {
9399 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9400 /* Special case for static synchronized wrappers */
9401 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9403 /* FIXME: n is not a normal token */
9404 cfg->disable_aot = TRUE;
9405 EMIT_NEW_PCONST (cfg, ins, NULL);
9408 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9411 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9413 ins->type = STACK_OBJ;
9414 ins->klass = cmethod->klass;
9417 MonoInst *addr, *vtvar;
9419 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9422 if (handle_class == mono_defaults.typehandle_class) {
9423 ins = emit_get_rgctx_klass (cfg, context_used,
9424 mono_class_from_mono_type (handle),
9425 MONO_RGCTX_INFO_TYPE);
9426 } else if (handle_class == mono_defaults.methodhandle_class) {
9427 ins = emit_get_rgctx_method (cfg, context_used,
9428 handle, MONO_RGCTX_INFO_METHOD);
9429 } else if (handle_class == mono_defaults.fieldhandle_class) {
9430 ins = emit_get_rgctx_field (cfg, context_used,
9431 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9433 g_assert_not_reached ();
9435 } else if (cfg->compile_aot) {
9436 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9438 EMIT_NEW_PCONST (cfg, ins, handle);
9440 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9441 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9442 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9452 MONO_INST_NEW (cfg, ins, OP_THROW);
9454 ins->sreg1 = sp [0]->dreg;
9456 bblock->out_of_line = TRUE;
9457 MONO_ADD_INS (bblock, ins);
9458 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9459 MONO_ADD_INS (bblock, ins);
9462 link_bblock (cfg, bblock, end_bblock);
9463 start_new_bblock = 1;
9465 case CEE_ENDFINALLY:
9466 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9467 MONO_ADD_INS (bblock, ins);
9469 start_new_bblock = 1;
9472 * Control will leave the method so empty the stack, otherwise
9473 * the next basic block will start with a nonempty stack.
9475 while (sp != stack_start) {
9483 if (*ip == CEE_LEAVE) {
9485 target = ip + 5 + (gint32)read32(ip + 1);
9488 target = ip + 2 + (signed char)(ip [1]);
9491 /* empty the stack */
9492 while (sp != stack_start) {
9497 * If this leave statement is in a catch block, check for a
9498 * pending exception, and rethrow it if necessary.
9499 * We avoid doing this in runtime invoke wrappers, since those are called
9500 * by native code which excepts the wrapper to catch all exceptions.
9502 for (i = 0; i < header->num_clauses; ++i) {
9503 MonoExceptionClause *clause = &header->clauses [i];
9506 * Use <= in the final comparison to handle clauses with multiple
9507 * leave statements, like in bug #78024.
9508 * The ordering of the exception clauses guarantees that we find the
9511 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9513 MonoBasicBlock *dont_throw;
9518 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9521 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9523 NEW_BBLOCK (cfg, dont_throw);
9526 * Currently, we always rethrow the abort exception, despite the
9527 * fact that this is not correct. See thread6.cs for an example.
9528 * But propagating the abort exception is more important than
9529 * getting the sematics right.
9531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9533 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9535 MONO_START_BB (cfg, dont_throw);
9540 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9542 MonoExceptionClause *clause;
9544 for (tmp = handlers; tmp; tmp = tmp->next) {
9546 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9548 link_bblock (cfg, bblock, tblock);
9549 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9550 ins->inst_target_bb = tblock;
9551 ins->inst_eh_block = clause;
9552 MONO_ADD_INS (bblock, ins);
9553 bblock->has_call_handler = 1;
9554 if (COMPILE_LLVM (cfg)) {
9555 MonoBasicBlock *target_bb;
9558 * Link the finally bblock with the target, since it will
9559 * conceptually branch there.
9560 * FIXME: Have to link the bblock containing the endfinally.
9562 GET_BBLOCK (cfg, target_bb, target);
9563 link_bblock (cfg, tblock, target_bb);
9566 g_list_free (handlers);
9569 MONO_INST_NEW (cfg, ins, OP_BR);
9570 MONO_ADD_INS (bblock, ins);
9571 GET_BBLOCK (cfg, tblock, target);
9572 link_bblock (cfg, bblock, tblock);
9573 ins->inst_target_bb = tblock;
9574 start_new_bblock = 1;
9576 if (*ip == CEE_LEAVE)
9585 * Mono specific opcodes
9587 case MONO_CUSTOM_PREFIX: {
9589 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9593 case CEE_MONO_ICALL: {
9595 MonoJitICallInfo *info;
9597 token = read32 (ip + 2);
9598 func = mono_method_get_wrapper_data (method, token);
9599 info = mono_find_jit_icall_by_addr (func);
9602 CHECK_STACK (info->sig->param_count);
9603 sp -= info->sig->param_count;
9605 ins = mono_emit_jit_icall (cfg, info->func, sp);
9606 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9610 inline_costs += 10 * num_calls++;
9614 case CEE_MONO_LDPTR: {
9617 CHECK_STACK_OVF (1);
9619 token = read32 (ip + 2);
9621 ptr = mono_method_get_wrapper_data (method, token);
9622 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9623 MonoJitICallInfo *callinfo;
9624 const char *icall_name;
9626 icall_name = method->name + strlen ("__icall_wrapper_");
9627 g_assert (icall_name);
9628 callinfo = mono_find_jit_icall_by_name (icall_name);
9629 g_assert (callinfo);
9631 if (ptr == callinfo->func) {
9632 /* Will be transformed into an AOTCONST later */
9633 EMIT_NEW_PCONST (cfg, ins, ptr);
9639 /* FIXME: Generalize this */
9640 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9641 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9646 EMIT_NEW_PCONST (cfg, ins, ptr);
9649 inline_costs += 10 * num_calls++;
9650 /* Can't embed random pointers into AOT code */
9651 cfg->disable_aot = 1;
9654 case CEE_MONO_ICALL_ADDR: {
9655 MonoMethod *cmethod;
9658 CHECK_STACK_OVF (1);
9660 token = read32 (ip + 2);
9662 cmethod = mono_method_get_wrapper_data (method, token);
9664 if (cfg->compile_aot) {
9665 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9667 ptr = mono_lookup_internal_call (cmethod);
9669 EMIT_NEW_PCONST (cfg, ins, ptr);
9675 case CEE_MONO_VTADDR: {
9676 MonoInst *src_var, *src;
9682 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9683 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9688 case CEE_MONO_NEWOBJ: {
9689 MonoInst *iargs [2];
9691 CHECK_STACK_OVF (1);
9693 token = read32 (ip + 2);
9694 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9695 mono_class_init (klass);
9696 NEW_DOMAINCONST (cfg, iargs [0]);
9697 MONO_ADD_INS (cfg->cbb, iargs [0]);
9698 NEW_CLASSCONST (cfg, iargs [1], klass);
9699 MONO_ADD_INS (cfg->cbb, iargs [1]);
9700 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9702 inline_costs += 10 * num_calls++;
9705 case CEE_MONO_OBJADDR:
9708 MONO_INST_NEW (cfg, ins, OP_MOVE);
9709 ins->dreg = alloc_ireg_mp (cfg);
9710 ins->sreg1 = sp [0]->dreg;
9711 ins->type = STACK_MP;
9712 MONO_ADD_INS (cfg->cbb, ins);
9716 case CEE_MONO_LDNATIVEOBJ:
9718 * Similar to LDOBJ, but instead load the unmanaged
9719 * representation of the vtype to the stack.
9724 token = read32 (ip + 2);
9725 klass = mono_method_get_wrapper_data (method, token);
9726 g_assert (klass->valuetype);
9727 mono_class_init (klass);
9730 MonoInst *src, *dest, *temp;
9733 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9734 temp->backend.is_pinvoke = 1;
9735 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9736 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9738 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9739 dest->type = STACK_VTYPE;
9740 dest->klass = klass;
9746 case CEE_MONO_RETOBJ: {
9748 * Same as RET, but return the native representation of a vtype
9751 g_assert (cfg->ret);
9752 g_assert (mono_method_signature (method)->pinvoke);
9757 token = read32 (ip + 2);
9758 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9760 if (!cfg->vret_addr) {
9761 g_assert (cfg->ret_var_is_local);
9763 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9765 EMIT_NEW_RETLOADA (cfg, ins);
9767 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9769 if (sp != stack_start)
9772 MONO_INST_NEW (cfg, ins, OP_BR);
9773 ins->inst_target_bb = end_bblock;
9774 MONO_ADD_INS (bblock, ins);
9775 link_bblock (cfg, bblock, end_bblock);
9776 start_new_bblock = 1;
9780 case CEE_MONO_CISINST:
9781 case CEE_MONO_CCASTCLASS: {
9786 token = read32 (ip + 2);
9787 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9788 if (ip [1] == CEE_MONO_CISINST)
9789 ins = handle_cisinst (cfg, klass, sp [0]);
9791 ins = handle_ccastclass (cfg, klass, sp [0]);
9797 case CEE_MONO_SAVE_LMF:
9798 case CEE_MONO_RESTORE_LMF:
9799 #ifdef MONO_ARCH_HAVE_LMF_OPS
9800 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9801 MONO_ADD_INS (bblock, ins);
9802 cfg->need_lmf_area = TRUE;
9806 case CEE_MONO_CLASSCONST:
9807 CHECK_STACK_OVF (1);
9809 token = read32 (ip + 2);
9810 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9813 inline_costs += 10 * num_calls++;
9815 case CEE_MONO_NOT_TAKEN:
9816 bblock->out_of_line = TRUE;
9820 CHECK_STACK_OVF (1);
9822 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9823 ins->dreg = alloc_preg (cfg);
9824 ins->inst_offset = (gint32)read32 (ip + 2);
9825 ins->type = STACK_PTR;
9826 MONO_ADD_INS (bblock, ins);
9830 case CEE_MONO_DYN_CALL: {
9833 /* It would be easier to call a trampoline, but that would put an
9834 * extra frame on the stack, confusing exception handling. So
9835 * implement it inline using an opcode for now.
9838 if (!cfg->dyn_call_var) {
9839 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9840 /* prevent it from being register allocated */
9841 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9844 /* Has to use a call inst since it local regalloc expects it */
9845 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9846 ins = (MonoInst*)call;
9848 ins->sreg1 = sp [0]->dreg;
9849 ins->sreg2 = sp [1]->dreg;
9850 MONO_ADD_INS (bblock, ins);
9852 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9853 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9857 inline_costs += 10 * num_calls++;
9862 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9872 /* somewhat similar to LDTOKEN */
9873 MonoInst *addr, *vtvar;
9874 CHECK_STACK_OVF (1);
9875 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9877 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9878 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9880 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9881 ins->type = STACK_VTYPE;
9882 ins->klass = mono_defaults.argumenthandle_class;
9895 * The following transforms:
9896 * CEE_CEQ into OP_CEQ
9897 * CEE_CGT into OP_CGT
9898 * CEE_CGT_UN into OP_CGT_UN
9899 * CEE_CLT into OP_CLT
9900 * CEE_CLT_UN into OP_CLT_UN
9902 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9904 MONO_INST_NEW (cfg, ins, cmp->opcode);
9906 cmp->sreg1 = sp [0]->dreg;
9907 cmp->sreg2 = sp [1]->dreg;
9908 type_from_op (cmp, sp [0], sp [1]);
9910 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9911 cmp->opcode = OP_LCOMPARE;
9912 else if (sp [0]->type == STACK_R8)
9913 cmp->opcode = OP_FCOMPARE;
9915 cmp->opcode = OP_ICOMPARE;
9916 MONO_ADD_INS (bblock, cmp);
9917 ins->type = STACK_I4;
9918 ins->dreg = alloc_dreg (cfg, ins->type);
9919 type_from_op (ins, sp [0], sp [1]);
9921 if (cmp->opcode == OP_FCOMPARE) {
9923 * The backends expect the fceq opcodes to do the
9926 cmp->opcode = OP_NOP;
9927 ins->sreg1 = cmp->sreg1;
9928 ins->sreg2 = cmp->sreg2;
9930 MONO_ADD_INS (bblock, ins);
9937 MonoMethod *cil_method;
9938 gboolean needs_static_rgctx_invoke;
9940 CHECK_STACK_OVF (1);
9942 n = read32 (ip + 2);
9943 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9944 if (!cmethod || mono_loader_get_last_error ())
9946 mono_class_init (cmethod->klass);
9948 mono_save_token_info (cfg, image, n, cmethod);
9950 if (cfg->generic_sharing_context)
9951 context_used = mono_method_check_context_used (cmethod);
9953 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9955 cil_method = cmethod;
9956 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9957 METHOD_ACCESS_FAILURE;
9959 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9960 if (check_linkdemand (cfg, method, cmethod))
9962 CHECK_CFG_EXCEPTION;
9963 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9964 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9968 * Optimize the common case of ldftn+delegate creation
9970 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9971 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9972 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9973 MonoInst *target_ins;
9975 int invoke_context_used = 0;
9977 invoke = mono_get_delegate_invoke (ctor_method->klass);
9978 if (!invoke || !mono_method_signature (invoke))
9981 if (cfg->generic_sharing_context)
9982 invoke_context_used = mono_method_check_context_used (invoke);
9984 target_ins = sp [-1];
9986 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9987 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9988 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9989 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9990 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9994 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9995 /* FIXME: SGEN support */
9996 if (invoke_context_used == 0) {
9998 if (cfg->verbose_level > 3)
9999 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10001 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10002 CHECK_CFG_EXCEPTION;
10011 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10012 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10016 inline_costs += 10 * num_calls++;
10019 case CEE_LDVIRTFTN: {
10020 MonoInst *args [2];
10024 n = read32 (ip + 2);
10025 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10026 if (!cmethod || mono_loader_get_last_error ())
10028 mono_class_init (cmethod->klass);
10030 if (cfg->generic_sharing_context)
10031 context_used = mono_method_check_context_used (cmethod);
10033 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10034 if (check_linkdemand (cfg, method, cmethod))
10036 CHECK_CFG_EXCEPTION;
10037 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10038 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10044 args [1] = emit_get_rgctx_method (cfg, context_used,
10045 cmethod, MONO_RGCTX_INFO_METHOD);
10048 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10050 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10053 inline_costs += 10 * num_calls++;
10057 CHECK_STACK_OVF (1);
10059 n = read16 (ip + 2);
10061 EMIT_NEW_ARGLOAD (cfg, ins, n);
10066 CHECK_STACK_OVF (1);
10068 n = read16 (ip + 2);
10070 NEW_ARGLOADA (cfg, ins, n);
10071 MONO_ADD_INS (cfg->cbb, ins);
10079 n = read16 (ip + 2);
10081 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10083 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10087 CHECK_STACK_OVF (1);
10089 n = read16 (ip + 2);
10091 EMIT_NEW_LOCLOAD (cfg, ins, n);
10096 unsigned char *tmp_ip;
10097 CHECK_STACK_OVF (1);
10099 n = read16 (ip + 2);
10102 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10108 EMIT_NEW_LOCLOADA (cfg, ins, n);
10117 n = read16 (ip + 2);
10119 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10121 emit_stloc_ir (cfg, sp, header, n);
10128 if (sp != stack_start)
10130 if (cfg->method != method)
10132 * Inlining this into a loop in a parent could lead to
10133 * stack overflows which is different behavior than the
10134 * non-inlined case, thus disable inlining in this case.
10136 goto inline_failure;
10138 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10139 ins->dreg = alloc_preg (cfg);
10140 ins->sreg1 = sp [0]->dreg;
10141 ins->type = STACK_PTR;
10142 MONO_ADD_INS (cfg->cbb, ins);
10144 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10146 ins->flags |= MONO_INST_INIT;
10151 case CEE_ENDFILTER: {
10152 MonoExceptionClause *clause, *nearest;
10153 int cc, nearest_num;
10157 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10159 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10160 ins->sreg1 = (*sp)->dreg;
10161 MONO_ADD_INS (bblock, ins);
10162 start_new_bblock = 1;
10167 for (cc = 0; cc < header->num_clauses; ++cc) {
10168 clause = &header->clauses [cc];
10169 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10170 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10171 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10176 g_assert (nearest);
10177 if ((ip - header->code) != nearest->handler_offset)
10182 case CEE_UNALIGNED_:
10183 ins_flag |= MONO_INST_UNALIGNED;
10184 /* FIXME: record alignment? we can assume 1 for now */
10188 case CEE_VOLATILE_:
10189 ins_flag |= MONO_INST_VOLATILE;
10193 ins_flag |= MONO_INST_TAILCALL;
10194 cfg->flags |= MONO_CFG_HAS_TAIL;
10195 /* Can't inline tail calls at this time */
10196 inline_costs += 100000;
10203 token = read32 (ip + 2);
10204 klass = mini_get_class (method, token, generic_context);
10205 CHECK_TYPELOAD (klass);
10206 if (generic_class_is_reference_type (cfg, klass))
10207 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10209 mini_emit_initobj (cfg, *sp, NULL, klass);
10213 case CEE_CONSTRAINED_:
10215 token = read32 (ip + 2);
10216 if (method->wrapper_type != MONO_WRAPPER_NONE)
10217 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10219 constrained_call = mono_class_get_full (image, token, generic_context);
10220 CHECK_TYPELOAD (constrained_call);
10224 case CEE_INITBLK: {
10225 MonoInst *iargs [3];
10229 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10230 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10231 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10232 /* emit_memset only works when val == 0 */
10233 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10235 iargs [0] = sp [0];
10236 iargs [1] = sp [1];
10237 iargs [2] = sp [2];
10238 if (ip [1] == CEE_CPBLK) {
10239 MonoMethod *memcpy_method = get_memcpy_method ();
10240 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10242 MonoMethod *memset_method = get_memset_method ();
10243 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10253 ins_flag |= MONO_INST_NOTYPECHECK;
10255 ins_flag |= MONO_INST_NORANGECHECK;
10256 /* we ignore the no-nullcheck for now since we
10257 * really do it explicitly only when doing callvirt->call
10261 case CEE_RETHROW: {
10263 int handler_offset = -1;
10265 for (i = 0; i < header->num_clauses; ++i) {
10266 MonoExceptionClause *clause = &header->clauses [i];
10267 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10268 handler_offset = clause->handler_offset;
10273 bblock->flags |= BB_EXCEPTION_UNSAFE;
10275 g_assert (handler_offset != -1);
10277 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10278 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10279 ins->sreg1 = load->dreg;
10280 MONO_ADD_INS (bblock, ins);
10282 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10283 MONO_ADD_INS (bblock, ins);
10286 link_bblock (cfg, bblock, end_bblock);
10287 start_new_bblock = 1;
10295 CHECK_STACK_OVF (1);
10297 token = read32 (ip + 2);
10298 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10299 MonoType *type = mono_type_create_from_typespec (image, token);
10300 token = mono_type_size (type, &ialign);
10302 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10303 CHECK_TYPELOAD (klass);
10304 mono_class_init (klass);
10305 token = mono_class_value_size (klass, &align);
10307 EMIT_NEW_ICONST (cfg, ins, token);
10312 case CEE_REFANYTYPE: {
10313 MonoInst *src_var, *src;
10319 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10321 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10322 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10323 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10328 case CEE_READONLY_:
10341 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10351 g_warning ("opcode 0x%02x not handled", *ip);
10355 if (start_new_bblock != 1)
10358 bblock->cil_length = ip - bblock->cil_code;
10359 bblock->next_bb = end_bblock;
10361 if (cfg->method == method && cfg->domainvar) {
10363 MonoInst *get_domain;
10365 cfg->cbb = init_localsbb;
10367 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10368 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10371 get_domain->dreg = alloc_preg (cfg);
10372 MONO_ADD_INS (cfg->cbb, get_domain);
10374 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10375 MONO_ADD_INS (cfg->cbb, store);
10378 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10379 if (cfg->compile_aot)
10380 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10381 mono_get_got_var (cfg);
10384 if (cfg->method == method && cfg->got_var)
10385 mono_emit_load_got_addr (cfg);
10390 cfg->cbb = init_localsbb;
10392 for (i = 0; i < header->num_locals; ++i) {
10393 MonoType *ptype = header->locals [i];
10394 int t = ptype->type;
10395 dreg = cfg->locals [i]->dreg;
10397 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10398 t = mono_class_enum_basetype (ptype->data.klass)->type;
10399 if (ptype->byref) {
10400 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10401 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10402 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10403 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10404 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10405 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10406 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10407 ins->type = STACK_R8;
10408 ins->inst_p0 = (void*)&r8_0;
10409 ins->dreg = alloc_dreg (cfg, STACK_R8);
10410 MONO_ADD_INS (init_localsbb, ins);
10411 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10412 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10413 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10414 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10416 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10421 if (cfg->init_ref_vars && cfg->method == method) {
10422 /* Emit initialization for ref vars */
10423 // FIXME: Avoid duplication initialization for IL locals.
10424 for (i = 0; i < cfg->num_varinfo; ++i) {
10425 MonoInst *ins = cfg->varinfo [i];
10427 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10428 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10432 /* Add a sequence point for method entry/exit events */
10434 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10435 MONO_ADD_INS (init_localsbb, ins);
10436 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10437 MONO_ADD_INS (cfg->bb_exit, ins);
10442 if (cfg->method == method) {
10443 MonoBasicBlock *bb;
10444 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10445 bb->region = mono_find_block_region (cfg, bb->real_offset);
10447 mono_create_spvar_for_region (cfg, bb->region);
10448 if (cfg->verbose_level > 2)
10449 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10453 g_slist_free (class_inits);
10454 dont_inline = g_list_remove (dont_inline, method);
10456 if (inline_costs < 0) {
10459 /* Method is too large */
10460 mname = mono_method_full_name (method, TRUE);
10461 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10462 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10464 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10465 mono_basic_block_free (original_bb);
10469 if ((cfg->verbose_level > 2) && (cfg->method == method))
10470 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10472 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10473 mono_basic_block_free (original_bb);
10474 return inline_costs;
10477 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10484 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10488 set_exception_type_from_invalid_il (cfg, method, ip);
10492 g_slist_free (class_inits);
10493 mono_basic_block_free (original_bb);
10494 dont_inline = g_list_remove (dont_inline, method);
10495 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10500 store_membase_reg_to_store_membase_imm (int opcode)
10503 case OP_STORE_MEMBASE_REG:
10504 return OP_STORE_MEMBASE_IMM;
10505 case OP_STOREI1_MEMBASE_REG:
10506 return OP_STOREI1_MEMBASE_IMM;
10507 case OP_STOREI2_MEMBASE_REG:
10508 return OP_STOREI2_MEMBASE_IMM;
10509 case OP_STOREI4_MEMBASE_REG:
10510 return OP_STOREI4_MEMBASE_IMM;
10511 case OP_STOREI8_MEMBASE_REG:
10512 return OP_STOREI8_MEMBASE_IMM;
10514 g_assert_not_reached ();
10520 #endif /* DISABLE_JIT */
10523 mono_op_to_op_imm (int opcode)
10527 return OP_IADD_IMM;
10529 return OP_ISUB_IMM;
10531 return OP_IDIV_IMM;
10533 return OP_IDIV_UN_IMM;
10535 return OP_IREM_IMM;
10537 return OP_IREM_UN_IMM;
10539 return OP_IMUL_IMM;
10541 return OP_IAND_IMM;
10545 return OP_IXOR_IMM;
10547 return OP_ISHL_IMM;
10549 return OP_ISHR_IMM;
10551 return OP_ISHR_UN_IMM;
10554 return OP_LADD_IMM;
10556 return OP_LSUB_IMM;
10558 return OP_LAND_IMM;
10562 return OP_LXOR_IMM;
10564 return OP_LSHL_IMM;
10566 return OP_LSHR_IMM;
10568 return OP_LSHR_UN_IMM;
10571 return OP_COMPARE_IMM;
10573 return OP_ICOMPARE_IMM;
10575 return OP_LCOMPARE_IMM;
10577 case OP_STORE_MEMBASE_REG:
10578 return OP_STORE_MEMBASE_IMM;
10579 case OP_STOREI1_MEMBASE_REG:
10580 return OP_STOREI1_MEMBASE_IMM;
10581 case OP_STOREI2_MEMBASE_REG:
10582 return OP_STOREI2_MEMBASE_IMM;
10583 case OP_STOREI4_MEMBASE_REG:
10584 return OP_STOREI4_MEMBASE_IMM;
10586 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10588 return OP_X86_PUSH_IMM;
10589 case OP_X86_COMPARE_MEMBASE_REG:
10590 return OP_X86_COMPARE_MEMBASE_IMM;
10592 #if defined(TARGET_AMD64)
10593 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10594 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10596 case OP_VOIDCALL_REG:
10597 return OP_VOIDCALL;
10605 return OP_LOCALLOC_IMM;
10612 ldind_to_load_membase (int opcode)
10616 return OP_LOADI1_MEMBASE;
10618 return OP_LOADU1_MEMBASE;
10620 return OP_LOADI2_MEMBASE;
10622 return OP_LOADU2_MEMBASE;
10624 return OP_LOADI4_MEMBASE;
10626 return OP_LOADU4_MEMBASE;
10628 return OP_LOAD_MEMBASE;
10629 case CEE_LDIND_REF:
10630 return OP_LOAD_MEMBASE;
10632 return OP_LOADI8_MEMBASE;
10634 return OP_LOADR4_MEMBASE;
10636 return OP_LOADR8_MEMBASE;
10638 g_assert_not_reached ();
10645 stind_to_store_membase (int opcode)
10649 return OP_STOREI1_MEMBASE_REG;
10651 return OP_STOREI2_MEMBASE_REG;
10653 return OP_STOREI4_MEMBASE_REG;
10655 case CEE_STIND_REF:
10656 return OP_STORE_MEMBASE_REG;
10658 return OP_STOREI8_MEMBASE_REG;
10660 return OP_STORER4_MEMBASE_REG;
10662 return OP_STORER8_MEMBASE_REG;
10664 g_assert_not_reached ();
10671 mono_load_membase_to_load_mem (int opcode)
10673 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10674 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10676 case OP_LOAD_MEMBASE:
10677 return OP_LOAD_MEM;
10678 case OP_LOADU1_MEMBASE:
10679 return OP_LOADU1_MEM;
10680 case OP_LOADU2_MEMBASE:
10681 return OP_LOADU2_MEM;
10682 case OP_LOADI4_MEMBASE:
10683 return OP_LOADI4_MEM;
10684 case OP_LOADU4_MEMBASE:
10685 return OP_LOADU4_MEM;
10686 #if SIZEOF_REGISTER == 8
10687 case OP_LOADI8_MEMBASE:
10688 return OP_LOADI8_MEM;
10697 op_to_op_dest_membase (int store_opcode, int opcode)
10699 #if defined(TARGET_X86)
10700 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10705 return OP_X86_ADD_MEMBASE_REG;
10707 return OP_X86_SUB_MEMBASE_REG;
10709 return OP_X86_AND_MEMBASE_REG;
10711 return OP_X86_OR_MEMBASE_REG;
10713 return OP_X86_XOR_MEMBASE_REG;
10716 return OP_X86_ADD_MEMBASE_IMM;
10719 return OP_X86_SUB_MEMBASE_IMM;
10722 return OP_X86_AND_MEMBASE_IMM;
10725 return OP_X86_OR_MEMBASE_IMM;
10728 return OP_X86_XOR_MEMBASE_IMM;
10734 #if defined(TARGET_AMD64)
10735 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10740 return OP_X86_ADD_MEMBASE_REG;
10742 return OP_X86_SUB_MEMBASE_REG;
10744 return OP_X86_AND_MEMBASE_REG;
10746 return OP_X86_OR_MEMBASE_REG;
10748 return OP_X86_XOR_MEMBASE_REG;
10750 return OP_X86_ADD_MEMBASE_IMM;
10752 return OP_X86_SUB_MEMBASE_IMM;
10754 return OP_X86_AND_MEMBASE_IMM;
10756 return OP_X86_OR_MEMBASE_IMM;
10758 return OP_X86_XOR_MEMBASE_IMM;
10760 return OP_AMD64_ADD_MEMBASE_REG;
10762 return OP_AMD64_SUB_MEMBASE_REG;
10764 return OP_AMD64_AND_MEMBASE_REG;
10766 return OP_AMD64_OR_MEMBASE_REG;
10768 return OP_AMD64_XOR_MEMBASE_REG;
10771 return OP_AMD64_ADD_MEMBASE_IMM;
10774 return OP_AMD64_SUB_MEMBASE_IMM;
10777 return OP_AMD64_AND_MEMBASE_IMM;
10780 return OP_AMD64_OR_MEMBASE_IMM;
10783 return OP_AMD64_XOR_MEMBASE_IMM;
10793 op_to_op_store_membase (int store_opcode, int opcode)
10795 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10798 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10799 return OP_X86_SETEQ_MEMBASE;
10801 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10802 return OP_X86_SETNE_MEMBASE;
10810 op_to_op_src1_membase (int load_opcode, int opcode)
10813 /* FIXME: This has sign extension issues */
10815 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10816 return OP_X86_COMPARE_MEMBASE8_IMM;
10819 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10824 return OP_X86_PUSH_MEMBASE;
10825 case OP_COMPARE_IMM:
10826 case OP_ICOMPARE_IMM:
10827 return OP_X86_COMPARE_MEMBASE_IMM;
10830 return OP_X86_COMPARE_MEMBASE_REG;
10834 #ifdef TARGET_AMD64
10835 /* FIXME: This has sign extension issues */
10837 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10838 return OP_X86_COMPARE_MEMBASE8_IMM;
10843 #ifdef __mono_ilp32__
10844 if (load_opcode == OP_LOADI8_MEMBASE)
10846 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10848 return OP_X86_PUSH_MEMBASE;
10850 /* FIXME: This only works for 32 bit immediates
10851 case OP_COMPARE_IMM:
10852 case OP_LCOMPARE_IMM:
10853 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10854 return OP_AMD64_COMPARE_MEMBASE_IMM;
10856 case OP_ICOMPARE_IMM:
10857 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10858 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10862 #ifdef __mono_ilp32__
10863 if (load_opcode == OP_LOAD_MEMBASE)
10864 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10865 if (load_opcode == OP_LOADI8_MEMBASE)
10867 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10869 return OP_AMD64_COMPARE_MEMBASE_REG;
10872 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10873 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10882 op_to_op_src2_membase (int load_opcode, int opcode)
10885 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10891 return OP_X86_COMPARE_REG_MEMBASE;
10893 return OP_X86_ADD_REG_MEMBASE;
10895 return OP_X86_SUB_REG_MEMBASE;
10897 return OP_X86_AND_REG_MEMBASE;
10899 return OP_X86_OR_REG_MEMBASE;
10901 return OP_X86_XOR_REG_MEMBASE;
10905 #ifdef TARGET_AMD64
10906 #ifdef __mono_ilp32__
10907 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
10909 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10913 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10915 return OP_X86_ADD_REG_MEMBASE;
10917 return OP_X86_SUB_REG_MEMBASE;
10919 return OP_X86_AND_REG_MEMBASE;
10921 return OP_X86_OR_REG_MEMBASE;
10923 return OP_X86_XOR_REG_MEMBASE;
10925 #ifdef __mono_ilp32__
10926 } else if (load_opcode == OP_LOADI8_MEMBASE) {
10928 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10933 return OP_AMD64_COMPARE_REG_MEMBASE;
10935 return OP_AMD64_ADD_REG_MEMBASE;
10937 return OP_AMD64_SUB_REG_MEMBASE;
10939 return OP_AMD64_AND_REG_MEMBASE;
10941 return OP_AMD64_OR_REG_MEMBASE;
10943 return OP_AMD64_XOR_REG_MEMBASE;
10952 mono_op_to_op_imm_noemul (int opcode)
10955 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10961 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10969 return mono_op_to_op_imm (opcode);
10973 #ifndef DISABLE_JIT
10976 * mono_handle_global_vregs:
10978 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10982 mono_handle_global_vregs (MonoCompile *cfg)
10984 gint32 *vreg_to_bb;
10985 MonoBasicBlock *bb;
10988 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10990 #ifdef MONO_ARCH_SIMD_INTRINSICS
10991 if (cfg->uses_simd_intrinsics)
10992 mono_simd_simplify_indirection (cfg);
10995 /* Find local vregs used in more than one bb */
10996 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10997 MonoInst *ins = bb->code;
10998 int block_num = bb->block_num;
11000 if (cfg->verbose_level > 2)
11001 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11004 for (; ins; ins = ins->next) {
11005 const char *spec = INS_INFO (ins->opcode);
11006 int regtype = 0, regindex;
11009 if (G_UNLIKELY (cfg->verbose_level > 2))
11010 mono_print_ins (ins);
11012 g_assert (ins->opcode >= MONO_CEE_LAST);
11014 for (regindex = 0; regindex < 4; regindex ++) {
11017 if (regindex == 0) {
11018 regtype = spec [MONO_INST_DEST];
11019 if (regtype == ' ')
11022 } else if (regindex == 1) {
11023 regtype = spec [MONO_INST_SRC1];
11024 if (regtype == ' ')
11027 } else if (regindex == 2) {
11028 regtype = spec [MONO_INST_SRC2];
11029 if (regtype == ' ')
11032 } else if (regindex == 3) {
11033 regtype = spec [MONO_INST_SRC3];
11034 if (regtype == ' ')
11039 #if SIZEOF_REGISTER == 4
11040 /* In the LLVM case, the long opcodes are not decomposed */
11041 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11043 * Since some instructions reference the original long vreg,
11044 * and some reference the two component vregs, it is quite hard
11045 * to determine when it needs to be global. So be conservative.
11047 if (!get_vreg_to_inst (cfg, vreg)) {
11048 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11050 if (cfg->verbose_level > 2)
11051 printf ("LONG VREG R%d made global.\n", vreg);
11055 * Make the component vregs volatile since the optimizations can
11056 * get confused otherwise.
11058 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11059 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11063 g_assert (vreg != -1);
11065 prev_bb = vreg_to_bb [vreg];
11066 if (prev_bb == 0) {
11067 /* 0 is a valid block num */
11068 vreg_to_bb [vreg] = block_num + 1;
11069 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11070 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11073 if (!get_vreg_to_inst (cfg, vreg)) {
11074 if (G_UNLIKELY (cfg->verbose_level > 2))
11075 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11079 if (vreg_is_ref (cfg, vreg))
11080 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11082 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11085 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11088 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11091 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11094 g_assert_not_reached ();
11098 /* Flag as having been used in more than one bb */
11099 vreg_to_bb [vreg] = -1;
11105 /* If a variable is used in only one bblock, convert it into a local vreg */
11106 for (i = 0; i < cfg->num_varinfo; i++) {
11107 MonoInst *var = cfg->varinfo [i];
11108 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11110 switch (var->type) {
11116 #if SIZEOF_REGISTER == 8
11119 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11120 /* Enabling this screws up the fp stack on x86 */
11123 /* Arguments are implicitly global */
11124 /* Putting R4 vars into registers doesn't work currently */
11125 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11127 * Make that the variable's liveness interval doesn't contain a call, since
11128 * that would cause the lvreg to be spilled, making the whole optimization
11131 /* This is too slow for JIT compilation */
11133 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11135 int def_index, call_index, ins_index;
11136 gboolean spilled = FALSE;
11141 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11142 const char *spec = INS_INFO (ins->opcode);
11144 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11145 def_index = ins_index;
11147 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11148 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11149 if (call_index > def_index) {
11155 if (MONO_IS_CALL (ins))
11156 call_index = ins_index;
11166 if (G_UNLIKELY (cfg->verbose_level > 2))
11167 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11168 var->flags |= MONO_INST_IS_DEAD;
11169 cfg->vreg_to_inst [var->dreg] = NULL;
11176 * Compress the varinfo and vars tables so the liveness computation is faster and
11177 * takes up less space.
11180 for (i = 0; i < cfg->num_varinfo; ++i) {
11181 MonoInst *var = cfg->varinfo [i];
11182 if (pos < i && cfg->locals_start == i)
11183 cfg->locals_start = pos;
11184 if (!(var->flags & MONO_INST_IS_DEAD)) {
11186 cfg->varinfo [pos] = cfg->varinfo [i];
11187 cfg->varinfo [pos]->inst_c0 = pos;
11188 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11189 cfg->vars [pos].idx = pos;
11190 #if SIZEOF_REGISTER == 4
11191 if (cfg->varinfo [pos]->type == STACK_I8) {
11192 /* Modify the two component vars too */
11195 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11196 var1->inst_c0 = pos;
11197 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11198 var1->inst_c0 = pos;
11205 cfg->num_varinfo = pos;
11206 if (cfg->locals_start > cfg->num_varinfo)
11207 cfg->locals_start = cfg->num_varinfo;
11211 * mono_spill_global_vars:
11213 * Generate spill code for variables which are not allocated to registers,
11214 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11215 * code is generated which could be optimized by the local optimization passes.
11218 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11220 MonoBasicBlock *bb;
11222 int orig_next_vreg;
11223 guint32 *vreg_to_lvreg;
11225 guint32 i, lvregs_len;
11226 gboolean dest_has_lvreg = FALSE;
11227 guint32 stacktypes [128];
11228 MonoInst **live_range_start, **live_range_end;
11229 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11231 *need_local_opts = FALSE;
11233 memset (spec2, 0, sizeof (spec2));
11235 /* FIXME: Move this function to mini.c */
11236 stacktypes ['i'] = STACK_PTR;
11237 stacktypes ['l'] = STACK_I8;
11238 stacktypes ['f'] = STACK_R8;
11239 #ifdef MONO_ARCH_SIMD_INTRINSICS
11240 stacktypes ['x'] = STACK_VTYPE;
11243 #if SIZEOF_REGISTER == 4
11244 /* Create MonoInsts for longs */
11245 for (i = 0; i < cfg->num_varinfo; i++) {
11246 MonoInst *ins = cfg->varinfo [i];
11248 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11249 switch (ins->type) {
11254 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11257 g_assert (ins->opcode == OP_REGOFFSET);
11259 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11261 tree->opcode = OP_REGOFFSET;
11262 tree->inst_basereg = ins->inst_basereg;
11263 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11265 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11267 tree->opcode = OP_REGOFFSET;
11268 tree->inst_basereg = ins->inst_basereg;
11269 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11279 if (cfg->compute_gc_maps) {
11280 /* registers need liveness info even for !non refs */
11281 for (i = 0; i < cfg->num_varinfo; i++) {
11282 MonoInst *ins = cfg->varinfo [i];
11284 if (ins->opcode == OP_REGVAR)
11285 ins->flags |= MONO_INST_GC_TRACK;
11289 /* FIXME: widening and truncation */
11292 * As an optimization, when a variable allocated to the stack is first loaded into
11293 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11294 * the variable again.
11296 orig_next_vreg = cfg->next_vreg;
11297 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11298 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11302 * These arrays contain the first and last instructions accessing a given
11304 * Since we emit bblocks in the same order we process them here, and we
11305 * don't split live ranges, these will precisely describe the live range of
11306 * the variable, i.e. the instruction range where a valid value can be found
11307 * in the variables location.
11308 * The live range is computed using the liveness info computed by the liveness pass.
11309 * We can't use vmv->range, since that is an abstract live range, and we need
11310 * one which is instruction precise.
11311 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11313 /* FIXME: Only do this if debugging info is requested */
11314 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11315 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11316 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11317 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11319 /* Add spill loads/stores */
11320 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11323 if (cfg->verbose_level > 2)
11324 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11326 /* Clear vreg_to_lvreg array */
11327 for (i = 0; i < lvregs_len; i++)
11328 vreg_to_lvreg [lvregs [i]] = 0;
11332 MONO_BB_FOR_EACH_INS (bb, ins) {
11333 const char *spec = INS_INFO (ins->opcode);
11334 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11335 gboolean store, no_lvreg;
11336 int sregs [MONO_MAX_SRC_REGS];
11338 if (G_UNLIKELY (cfg->verbose_level > 2))
11339 mono_print_ins (ins);
11341 if (ins->opcode == OP_NOP)
11345 * We handle LDADDR here as well, since it can only be decomposed
11346 * when variable addresses are known.
11348 if (ins->opcode == OP_LDADDR) {
11349 MonoInst *var = ins->inst_p0;
11351 if (var->opcode == OP_VTARG_ADDR) {
11352 /* Happens on SPARC/S390 where vtypes are passed by reference */
11353 MonoInst *vtaddr = var->inst_left;
11354 if (vtaddr->opcode == OP_REGVAR) {
11355 ins->opcode = OP_MOVE;
11356 ins->sreg1 = vtaddr->dreg;
11358 else if (var->inst_left->opcode == OP_REGOFFSET) {
11359 ins->opcode = OP_LOAD_MEMBASE;
11360 ins->inst_basereg = vtaddr->inst_basereg;
11361 ins->inst_offset = vtaddr->inst_offset;
11365 g_assert (var->opcode == OP_REGOFFSET);
11367 ins->opcode = OP_ADD_IMM;
11368 ins->sreg1 = var->inst_basereg;
11369 ins->inst_imm = var->inst_offset;
11372 *need_local_opts = TRUE;
11373 spec = INS_INFO (ins->opcode);
11376 if (ins->opcode < MONO_CEE_LAST) {
11377 mono_print_ins (ins);
11378 g_assert_not_reached ();
11382 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11386 if (MONO_IS_STORE_MEMBASE (ins)) {
11387 tmp_reg = ins->dreg;
11388 ins->dreg = ins->sreg2;
11389 ins->sreg2 = tmp_reg;
11392 spec2 [MONO_INST_DEST] = ' ';
11393 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11394 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11395 spec2 [MONO_INST_SRC3] = ' ';
11397 } else if (MONO_IS_STORE_MEMINDEX (ins))
11398 g_assert_not_reached ();
11403 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11404 printf ("\t %.3s %d", spec, ins->dreg);
11405 num_sregs = mono_inst_get_src_registers (ins, sregs);
11406 for (srcindex = 0; srcindex < 3; ++srcindex)
11407 printf (" %d", sregs [srcindex]);
11414 regtype = spec [MONO_INST_DEST];
11415 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11418 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11419 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11420 MonoInst *store_ins;
11422 MonoInst *def_ins = ins;
11423 int dreg = ins->dreg; /* The original vreg */
11425 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11427 if (var->opcode == OP_REGVAR) {
11428 ins->dreg = var->dreg;
11429 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11431 * Instead of emitting a load+store, use a _membase opcode.
11433 g_assert (var->opcode == OP_REGOFFSET);
11434 if (ins->opcode == OP_MOVE) {
11438 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11439 ins->inst_basereg = var->inst_basereg;
11440 ins->inst_offset = var->inst_offset;
11443 spec = INS_INFO (ins->opcode);
11447 g_assert (var->opcode == OP_REGOFFSET);
11449 prev_dreg = ins->dreg;
11451 /* Invalidate any previous lvreg for this vreg */
11452 vreg_to_lvreg [ins->dreg] = 0;
11456 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11458 store_opcode = OP_STOREI8_MEMBASE_REG;
11461 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11463 if (regtype == 'l') {
11464 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11465 mono_bblock_insert_after_ins (bb, ins, store_ins);
11466 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11467 mono_bblock_insert_after_ins (bb, ins, store_ins);
11468 def_ins = store_ins;
11471 g_assert (store_opcode != OP_STOREV_MEMBASE);
11473 /* Try to fuse the store into the instruction itself */
11474 /* FIXME: Add more instructions */
11475 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11476 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11477 ins->inst_imm = ins->inst_c0;
11478 ins->inst_destbasereg = var->inst_basereg;
11479 ins->inst_offset = var->inst_offset;
11480 spec = INS_INFO (ins->opcode);
11481 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11482 ins->opcode = store_opcode;
11483 ins->inst_destbasereg = var->inst_basereg;
11484 ins->inst_offset = var->inst_offset;
11488 tmp_reg = ins->dreg;
11489 ins->dreg = ins->sreg2;
11490 ins->sreg2 = tmp_reg;
11493 spec2 [MONO_INST_DEST] = ' ';
11494 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11495 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11496 spec2 [MONO_INST_SRC3] = ' ';
11498 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11499 // FIXME: The backends expect the base reg to be in inst_basereg
11500 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11502 ins->inst_basereg = var->inst_basereg;
11503 ins->inst_offset = var->inst_offset;
11504 spec = INS_INFO (ins->opcode);
11506 /* printf ("INS: "); mono_print_ins (ins); */
11507 /* Create a store instruction */
11508 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11510 /* Insert it after the instruction */
11511 mono_bblock_insert_after_ins (bb, ins, store_ins);
11513 def_ins = store_ins;
11516 * We can't assign ins->dreg to var->dreg here, since the
11517 * sregs could use it. So set a flag, and do it after
11520 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11521 dest_has_lvreg = TRUE;
11526 if (def_ins && !live_range_start [dreg]) {
11527 live_range_start [dreg] = def_ins;
11528 live_range_start_bb [dreg] = bb;
11531 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11534 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11535 tmp->inst_c1 = dreg;
11536 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11543 num_sregs = mono_inst_get_src_registers (ins, sregs);
11544 for (srcindex = 0; srcindex < 3; ++srcindex) {
11545 regtype = spec [MONO_INST_SRC1 + srcindex];
11546 sreg = sregs [srcindex];
11548 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11549 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11550 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11551 MonoInst *use_ins = ins;
11552 MonoInst *load_ins;
11553 guint32 load_opcode;
11555 if (var->opcode == OP_REGVAR) {
11556 sregs [srcindex] = var->dreg;
11557 //mono_inst_set_src_registers (ins, sregs);
11558 live_range_end [sreg] = use_ins;
11559 live_range_end_bb [sreg] = bb;
11561 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11564 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11565 /* var->dreg is a hreg */
11566 tmp->inst_c1 = sreg;
11567 mono_bblock_insert_after_ins (bb, ins, tmp);
11573 g_assert (var->opcode == OP_REGOFFSET);
11575 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11577 g_assert (load_opcode != OP_LOADV_MEMBASE);
11579 if (vreg_to_lvreg [sreg]) {
11580 g_assert (vreg_to_lvreg [sreg] != -1);
11582 /* The variable is already loaded to an lvreg */
11583 if (G_UNLIKELY (cfg->verbose_level > 2))
11584 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11585 sregs [srcindex] = vreg_to_lvreg [sreg];
11586 //mono_inst_set_src_registers (ins, sregs);
11590 /* Try to fuse the load into the instruction */
11591 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11592 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11593 sregs [0] = var->inst_basereg;
11594 //mono_inst_set_src_registers (ins, sregs);
11595 ins->inst_offset = var->inst_offset;
11596 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11597 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11598 sregs [1] = var->inst_basereg;
11599 //mono_inst_set_src_registers (ins, sregs);
11600 ins->inst_offset = var->inst_offset;
11602 if (MONO_IS_REAL_MOVE (ins)) {
11603 ins->opcode = OP_NOP;
11606 //printf ("%d ", srcindex); mono_print_ins (ins);
11608 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11610 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11611 if (var->dreg == prev_dreg) {
11613 * sreg refers to the value loaded by the load
11614 * emitted below, but we need to use ins->dreg
11615 * since it refers to the store emitted earlier.
11619 g_assert (sreg != -1);
11620 vreg_to_lvreg [var->dreg] = sreg;
11621 g_assert (lvregs_len < 1024);
11622 lvregs [lvregs_len ++] = var->dreg;
11626 sregs [srcindex] = sreg;
11627 //mono_inst_set_src_registers (ins, sregs);
11629 if (regtype == 'l') {
11630 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11631 mono_bblock_insert_before_ins (bb, ins, load_ins);
11632 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11633 mono_bblock_insert_before_ins (bb, ins, load_ins);
11634 use_ins = load_ins;
11637 #if SIZEOF_REGISTER == 4
11638 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11640 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11641 mono_bblock_insert_before_ins (bb, ins, load_ins);
11642 use_ins = load_ins;
11646 if (var->dreg < orig_next_vreg) {
11647 live_range_end [var->dreg] = use_ins;
11648 live_range_end_bb [var->dreg] = bb;
11651 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11654 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11655 tmp->inst_c1 = var->dreg;
11656 mono_bblock_insert_after_ins (bb, ins, tmp);
11660 mono_inst_set_src_registers (ins, sregs);
11662 if (dest_has_lvreg) {
11663 g_assert (ins->dreg != -1);
11664 vreg_to_lvreg [prev_dreg] = ins->dreg;
11665 g_assert (lvregs_len < 1024);
11666 lvregs [lvregs_len ++] = prev_dreg;
11667 dest_has_lvreg = FALSE;
11671 tmp_reg = ins->dreg;
11672 ins->dreg = ins->sreg2;
11673 ins->sreg2 = tmp_reg;
11676 if (MONO_IS_CALL (ins)) {
11677 /* Clear vreg_to_lvreg array */
11678 for (i = 0; i < lvregs_len; i++)
11679 vreg_to_lvreg [lvregs [i]] = 0;
11681 } else if (ins->opcode == OP_NOP) {
11683 MONO_INST_NULLIFY_SREGS (ins);
11686 if (cfg->verbose_level > 2)
11687 mono_print_ins_index (1, ins);
11690 /* Extend the live range based on the liveness info */
11691 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11692 for (i = 0; i < cfg->num_varinfo; i ++) {
11693 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11695 if (vreg_is_volatile (cfg, vi->vreg))
11696 /* The liveness info is incomplete */
11699 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11700 /* Live from at least the first ins of this bb */
11701 live_range_start [vi->vreg] = bb->code;
11702 live_range_start_bb [vi->vreg] = bb;
11705 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11706 /* Live at least until the last ins of this bb */
11707 live_range_end [vi->vreg] = bb->last_ins;
11708 live_range_end_bb [vi->vreg] = bb;
11714 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11716 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11717 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11719 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11720 for (i = 0; i < cfg->num_varinfo; ++i) {
11721 int vreg = MONO_VARINFO (cfg, i)->vreg;
11724 if (live_range_start [vreg]) {
11725 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11727 ins->inst_c1 = vreg;
11728 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11730 if (live_range_end [vreg]) {
11731 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11733 ins->inst_c1 = vreg;
11734 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11735 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11737 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11743 g_free (live_range_start);
11744 g_free (live_range_end);
11745 g_free (live_range_start_bb);
11746 g_free (live_range_end_bb);
11751 * - use 'iadd' instead of 'int_add'
11752 * - handling ovf opcodes: decompose in method_to_ir.
11753 * - unify iregs/fregs
11754 * -> partly done, the missing parts are:
11755 * - a more complete unification would involve unifying the hregs as well, so
11756 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11757 * would no longer map to the machine hregs, so the code generators would need to
11758 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11759 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11760 * fp/non-fp branches speeds it up by about 15%.
11761 * - use sext/zext opcodes instead of shifts
11763 * - get rid of TEMPLOADs if possible and use vregs instead
11764 * - clean up usage of OP_P/OP_ opcodes
11765 * - cleanup usage of DUMMY_USE
11766 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11768 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11769 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11770 * - make sure handle_stack_args () is called before the branch is emitted
11771 * - when the new IR is done, get rid of all unused stuff
11772 * - COMPARE/BEQ as separate instructions or unify them ?
11773 * - keeping them separate allows specialized compare instructions like
11774 * compare_imm, compare_membase
11775 * - most back ends unify fp compare+branch, fp compare+ceq
11776 * - integrate mono_save_args into inline_method
11777 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11778 * - handle long shift opts on 32 bit platforms somehow: they require
11779 * 3 sregs (2 for arg1 and 1 for arg2)
11780 * - make byref a 'normal' type.
11781 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11782 * variable if needed.
11783 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11784 * like inline_method.
11785 * - remove inlining restrictions
11786 * - fix LNEG and enable cfold of INEG
11787 * - generalize x86 optimizations like ldelema as a peephole optimization
11788 * - add store_mem_imm for amd64
11789 * - optimize the loading of the interruption flag in the managed->native wrappers
11790 * - avoid special handling of OP_NOP in passes
11791 * - move code inserting instructions into one function/macro.
11792 * - try a coalescing phase after liveness analysis
11793 * - add float -> vreg conversion + local optimizations on !x86
11794 * - figure out how to handle decomposed branches during optimizations, ie.
11795 * compare+branch, op_jump_table+op_br etc.
11796 * - promote RuntimeXHandles to vregs
11797 * - vtype cleanups:
11798 * - add a NEW_VARLOADA_VREG macro
11799 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11800 * accessing vtype fields.
11801 * - get rid of I8CONST on 64 bit platforms
11802 * - dealing with the increase in code size due to branches created during opcode
11804 * - use extended basic blocks
11805 * - all parts of the JIT
11806 * - handle_global_vregs () && local regalloc
11807 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11808 * - sources of increase in code size:
11811 * - isinst and castclass
11812 * - lvregs not allocated to global registers even if used multiple times
11813 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11815 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11816 * - add all micro optimizations from the old JIT
11817 * - put tree optimizations into the deadce pass
11818 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11819 * specific function.
11820 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11821 * fcompare + branchCC.
11822 * - create a helper function for allocating a stack slot, taking into account
11823 * MONO_CFG_HAS_SPILLUP.
11825 * - merge the ia64 switch changes.
11826 * - optimize mono_regstate2_alloc_int/float.
11827 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11828 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11829 * parts of the tree could be separated by other instructions, killing the tree
11830 * arguments, or stores killing loads etc. Also, should we fold loads into other
11831 * instructions if the result of the load is used multiple times ?
11832 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11833 * - LAST MERGE: 108395.
11834 * - when returning vtypes in registers, generate IR and append it to the end of the
11835 * last bb instead of doing it in the epilog.
11836 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11844 - When to decompose opcodes:
11845 - earlier: this makes some optimizations hard to implement, since the low level IR
11846 no longer contains the neccessary information. But it is easier to do.
11847 - later: harder to implement, enables more optimizations.
11848 - Branches inside bblocks:
11849 - created when decomposing complex opcodes.
11850 - branches to another bblock: harmless, but not tracked by the branch
11851 optimizations, so need to branch to a label at the start of the bblock.
11852 - branches to inside the same bblock: very problematic, trips up the local
11853 reg allocator. Can be fixed by spitting the current bblock, but that is a
11854 complex operation, since some local vregs can become global vregs etc.
11855 - Local/global vregs:
11856 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11857 local register allocator.
11858 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11859 structure, created by mono_create_var (). Assigned to hregs or the stack by
11860 the global register allocator.
11861 - When to do optimizations like alu->alu_imm:
11862 - earlier -> saves work later on since the IR will be smaller/simpler
11863 - later -> can work on more instructions
11864 - Handling of valuetypes:
11865 - When a vtype is pushed on the stack, a new temporary is created, an
11866 instruction computing its address (LDADDR) is emitted and pushed on
11867 the stack. Need to optimize cases when the vtype is used immediately as in
11868 argument passing, stloc etc.
11869 - Instead of the to_end stuff in the old JIT, simply call the function handling
11870 the values on the stack before emitting the last instruction of the bb.
11873 #endif /* DISABLE_JIT */