2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
283 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
284 int _length_reg = alloc_ireg (cfg); \
285 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
286 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
287 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
291 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
292 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
293 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
296 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
297 ins->sreg1 = array_reg; \
298 ins->sreg2 = index_reg; \
299 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
300 MONO_ADD_INS ((cfg)->cbb, ins); \
301 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
302 (cfg)->cbb->has_array_access = TRUE; \
306 #if defined(__i386__) || defined(__x86_64__)
307 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
308 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
309 (dest)->dreg = alloc_preg ((cfg)); \
310 (dest)->sreg1 = (sr1); \
311 (dest)->sreg2 = (sr2); \
312 (dest)->inst_imm = (imm); \
313 (dest)->backend.shift_amount = (shift); \
314 MONO_ADD_INS ((cfg)->cbb, (dest)); \
318 #if SIZEOF_VOID_P == 8
319 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
320 /* FIXME: Need to add many more cases */ \
321 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
323 int dr = alloc_preg (cfg); \
324 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
325 (ins)->sreg2 = widen->dreg; \
329 #define ADD_WIDEN_OP(ins, arg1, arg2)
332 #define ADD_BINOP(op) do { \
333 MONO_INST_NEW (cfg, ins, (op)); \
335 ins->sreg1 = sp [0]->dreg; \
336 ins->sreg2 = sp [1]->dreg; \
337 type_from_op (ins, sp [0], sp [1]); \
339 /* Have to insert a widening op */ \
340 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
341 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
342 MONO_ADD_INS ((cfg)->cbb, (ins)); \
344 mono_decompose_opcode ((cfg), (ins)); \
347 #define ADD_UNOP(op) do { \
348 MONO_INST_NEW (cfg, ins, (op)); \
350 ins->sreg1 = sp [0]->dreg; \
351 type_from_op (ins, sp [0], NULL); \
353 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
354 MONO_ADD_INS ((cfg)->cbb, (ins)); \
356 mono_decompose_opcode (cfg, ins); \
359 #define ADD_BINCOND(next_block) do { \
362 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
363 cmp->sreg1 = sp [0]->dreg; \
364 cmp->sreg2 = sp [1]->dreg; \
365 type_from_op (cmp, sp [0], sp [1]); \
367 type_from_op (ins, sp [0], sp [1]); \
368 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
369 GET_BBLOCK (cfg, tblock, target); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_true_bb = tblock; \
372 if ((next_block)) { \
373 link_bblock (cfg, bblock, (next_block)); \
374 ins->inst_false_bb = (next_block); \
375 start_new_bblock = 1; \
377 GET_BBLOCK (cfg, tblock, ip); \
378 link_bblock (cfg, bblock, tblock); \
379 ins->inst_false_bb = tblock; \
380 start_new_bblock = 2; \
382 if (sp != stack_start) { \
383 handle_stack_args (cfg, stack_start, sp - stack_start); \
384 CHECK_UNVERIFIABLE (cfg); \
386 MONO_ADD_INS (bblock, cmp); \
387 MONO_ADD_INS (bblock, ins); \
391 * link_bblock: Links two basic blocks
393 * links two basic blocks in the control flow graph, the 'from'
394 * argument is the starting block and the 'to' argument is the block
395 * the control flow ends to after 'from'.
398 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
400 MonoBasicBlock **newa;
404 if (from->cil_code) {
406 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
408 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
411 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
413 printf ("edge from entry to exit\n");
418 for (i = 0; i < from->out_count; ++i) {
419 if (to == from->out_bb [i]) {
425 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
426 for (i = 0; i < from->out_count; ++i) {
427 newa [i] = from->out_bb [i];
435 for (i = 0; i < to->in_count; ++i) {
436 if (from == to->in_bb [i]) {
442 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
443 for (i = 0; i < to->in_count; ++i) {
444 newa [i] = to->in_bb [i];
453 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
455 link_bblock (cfg, from, to);
459 * mono_find_block_region:
461 * We mark each basic block with a region ID. We use that to avoid BB
462 * optimizations when blocks are in different regions.
465 * A region token that encodes where this region is, and information
466 * about the clause owner for this block.
468 * The region encodes the try/catch/filter clause that owns this block
469 * as well as the type. -1 is a special value that represents a block
470 * that is in none of try/catch/filter.
473 mono_find_block_region (MonoCompile *cfg, int offset)
475 MonoMethod *method = cfg->method;
476 MonoMethodHeader *header = mono_method_get_header (method);
477 MonoExceptionClause *clause;
480 /* first search for handlers and filters */
481 for (i = 0; i < header->num_clauses; ++i) {
482 clause = &header->clauses [i];
483 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
484 (offset < (clause->handler_offset)))
485 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
487 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
488 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
489 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
490 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
491 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
493 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
497 /* search the try blocks */
498 for (i = 0; i < header->num_clauses; ++i) {
499 clause = &header->clauses [i];
500 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
501 return ((i + 1) << 8) | clause->flags;
508 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
510 MonoMethod *method = cfg->method;
511 MonoMethodHeader *header = mono_method_get_header (method);
512 MonoExceptionClause *clause;
513 MonoBasicBlock *handler;
517 for (i = 0; i < header->num_clauses; ++i) {
518 clause = &header->clauses [i];
519 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
520 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
521 if (clause->flags == type) {
522 handler = cfg->cil_offset_to_bb [clause->handler_offset];
524 res = g_list_append (res, handler);
532 mono_create_spvar_for_region (MonoCompile *cfg, int region)
536 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
540 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
541 /* prevent it from being register allocated */
542 var->flags |= MONO_INST_INDIRECT;
544 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
548 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
550 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
558 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
563 /* prevent it from being register allocated */
564 var->flags |= MONO_INST_INDIRECT;
566 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
572 * Returns the type used in the eval stack when @type is loaded.
573 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
576 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
581 inst->type = STACK_MP;
582 inst->klass = mono_defaults.object_class;
586 inst->klass = klass = mono_class_from_mono_type (type);
589 switch (type->type) {
591 inst->type = STACK_INV;
595 case MONO_TYPE_BOOLEAN:
601 inst->type = STACK_I4;
606 case MONO_TYPE_FNPTR:
607 inst->type = STACK_PTR;
609 case MONO_TYPE_CLASS:
610 case MONO_TYPE_STRING:
611 case MONO_TYPE_OBJECT:
612 case MONO_TYPE_SZARRAY:
613 case MONO_TYPE_ARRAY:
614 inst->type = STACK_OBJ;
618 inst->type = STACK_I8;
622 inst->type = STACK_R8;
624 case MONO_TYPE_VALUETYPE:
625 if (type->data.klass->enumtype) {
626 type = type->data.klass->enum_basetype;
630 inst->type = STACK_VTYPE;
633 case MONO_TYPE_TYPEDBYREF:
634 inst->klass = mono_defaults.typed_reference_class;
635 inst->type = STACK_VTYPE;
637 case MONO_TYPE_GENERICINST:
638 type = &type->data.generic_class->container_class->byval_arg;
641 case MONO_TYPE_MVAR :
642 /* FIXME: all the arguments must be references for now,
643 * later look inside cfg and see if the arg num is
646 g_assert (cfg->generic_sharing_context);
647 inst->type = STACK_OBJ;
650 g_error ("unknown type 0x%02x in eval stack type", type->type);
655 * The following tables are used to quickly validate the IL code in type_from_op ().
658 bin_num_table [STACK_MAX] [STACK_MAX] = {
659 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
674 /* reduce the size of this table */
676 bin_int_table [STACK_MAX] [STACK_MAX] = {
677 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
678 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
679 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
680 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
681 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
688 bin_comp_table [STACK_MAX] [STACK_MAX] = {
689 /* Inv i L p F & O vt */
691 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
692 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
693 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
694 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
695 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
696 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
697 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
700 /* reduce the size of this table */
702 shift_table [STACK_MAX] [STACK_MAX] = {
703 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
709 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
710 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
714 * Tables to map from the non-specific opcode to the matching
715 * type-specific opcode.
717 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
719 binops_op_map [STACK_MAX] = {
720 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
723 /* handles from CEE_NEG to CEE_CONV_U8 */
725 unops_op_map [STACK_MAX] = {
726 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
729 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
731 ovfops_op_map [STACK_MAX] = {
732 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
735 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
737 ovf2ops_op_map [STACK_MAX] = {
738 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
741 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
743 ovf3ops_op_map [STACK_MAX] = {
744 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
747 /* handles from CEE_BEQ to CEE_BLT_UN */
749 beqops_op_map [STACK_MAX] = {
750 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
753 /* handles from CEE_CEQ to CEE_CLT_UN */
755 ceqops_op_map [STACK_MAX] = {
756 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
760 * Sets ins->type (the type on the eval stack) according to the
761 * type of the opcode and the arguments to it.
762 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
764 * FIXME: this function sets ins->type unconditionally in some cases, but
765 * it should set it to invalid for some types (a conv.x on an object)
768 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
770 switch (ins->opcode) {
777 /* FIXME: check unverifiable args for STACK_MP */
778 ins->type = bin_num_table [src1->type] [src2->type];
779 ins->opcode += binops_op_map [ins->type];
786 ins->type = bin_int_table [src1->type] [src2->type];
787 ins->opcode += binops_op_map [ins->type];
792 ins->type = shift_table [src1->type] [src2->type];
793 ins->opcode += binops_op_map [ins->type];
798 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
799 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
800 ins->opcode = OP_LCOMPARE;
801 else if (src1->type == STACK_R8)
802 ins->opcode = OP_FCOMPARE;
804 ins->opcode = OP_ICOMPARE;
806 case OP_ICOMPARE_IMM:
807 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
808 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
809 ins->opcode = OP_LCOMPARE_IMM;
821 ins->opcode += beqops_op_map [src1->type];
824 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
825 ins->opcode += ceqops_op_map [src1->type];
831 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
832 ins->opcode += ceqops_op_map [src1->type];
836 ins->type = neg_table [src1->type];
837 ins->opcode += unops_op_map [ins->type];
840 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
841 ins->type = src1->type;
843 ins->type = STACK_INV;
844 ins->opcode += unops_op_map [ins->type];
850 ins->type = STACK_I4;
851 ins->opcode += unops_op_map [src1->type];
854 ins->type = STACK_R8;
855 switch (src1->type) {
858 ins->opcode = OP_ICONV_TO_R_UN;
861 ins->opcode = OP_LCONV_TO_R_UN;
865 case CEE_CONV_OVF_I1:
866 case CEE_CONV_OVF_U1:
867 case CEE_CONV_OVF_I2:
868 case CEE_CONV_OVF_U2:
869 case CEE_CONV_OVF_I4:
870 case CEE_CONV_OVF_U4:
871 ins->type = STACK_I4;
872 ins->opcode += ovf3ops_op_map [src1->type];
874 case CEE_CONV_OVF_I_UN:
875 case CEE_CONV_OVF_U_UN:
876 ins->type = STACK_PTR;
877 ins->opcode += ovf2ops_op_map [src1->type];
879 case CEE_CONV_OVF_I1_UN:
880 case CEE_CONV_OVF_I2_UN:
881 case CEE_CONV_OVF_I4_UN:
882 case CEE_CONV_OVF_U1_UN:
883 case CEE_CONV_OVF_U2_UN:
884 case CEE_CONV_OVF_U4_UN:
885 ins->type = STACK_I4;
886 ins->opcode += ovf2ops_op_map [src1->type];
889 ins->type = STACK_PTR;
890 switch (src1->type) {
892 ins->opcode = OP_MOVE;
896 #if SIZEOF_VOID_P == 8
897 ins->opcode = OP_LCONV_TO_U;
899 ins->opcode = OP_MOVE;
903 ins->opcode = OP_LCONV_TO_U;
906 ins->opcode = OP_FCONV_TO_U;
912 ins->type = STACK_I8;
913 ins->opcode += unops_op_map [src1->type];
915 case CEE_CONV_OVF_I8:
916 case CEE_CONV_OVF_U8:
917 ins->type = STACK_I8;
918 ins->opcode += ovf3ops_op_map [src1->type];
920 case CEE_CONV_OVF_U8_UN:
921 case CEE_CONV_OVF_I8_UN:
922 ins->type = STACK_I8;
923 ins->opcode += ovf2ops_op_map [src1->type];
927 ins->type = STACK_R8;
928 ins->opcode += unops_op_map [src1->type];
931 ins->type = STACK_R8;
935 ins->type = STACK_I4;
936 ins->opcode += ovfops_op_map [src1->type];
941 ins->type = STACK_PTR;
942 ins->opcode += ovfops_op_map [src1->type];
950 ins->type = bin_num_table [src1->type] [src2->type];
951 ins->opcode += ovfops_op_map [src1->type];
952 if (ins->type == STACK_R8)
953 ins->type = STACK_INV;
955 case OP_LOAD_MEMBASE:
956 ins->type = STACK_PTR;
958 case OP_LOADI1_MEMBASE:
959 case OP_LOADU1_MEMBASE:
960 case OP_LOADI2_MEMBASE:
961 case OP_LOADU2_MEMBASE:
962 case OP_LOADI4_MEMBASE:
963 case OP_LOADU4_MEMBASE:
964 ins->type = STACK_PTR;
966 case OP_LOADI8_MEMBASE:
967 ins->type = STACK_I8;
969 case OP_LOADR4_MEMBASE:
970 case OP_LOADR8_MEMBASE:
971 ins->type = STACK_R8;
974 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
978 if (ins->type == STACK_MP)
979 ins->klass = mono_defaults.object_class;
984 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
990 param_table [STACK_MAX] [STACK_MAX] = {
995 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
999 switch (args->type) {
1009 for (i = 0; i < sig->param_count; ++i) {
1010 switch (args [i].type) {
1014 if (!sig->params [i]->byref)
1018 if (sig->params [i]->byref)
1020 switch (sig->params [i]->type) {
1021 case MONO_TYPE_CLASS:
1022 case MONO_TYPE_STRING:
1023 case MONO_TYPE_OBJECT:
1024 case MONO_TYPE_SZARRAY:
1025 case MONO_TYPE_ARRAY:
1032 if (sig->params [i]->byref)
1034 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1043 /*if (!param_table [args [i].type] [sig->params [i]->type])
1051 * When we need a pointer to the current domain many times in a method, we
1052 * call mono_domain_get() once and we store the result in a local variable.
1053 * This function returns the variable that represents the MonoDomain*.
1055 inline static MonoInst *
1056 mono_get_domainvar (MonoCompile *cfg)
1058 if (!cfg->domainvar)
1059 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1060 return cfg->domainvar;
1064 * The got_var contains the address of the Global Offset Table when AOT
1067 inline static MonoInst *
1068 mono_get_got_var (MonoCompile *cfg)
1070 #ifdef MONO_ARCH_NEED_GOT_VAR
1071 if (!cfg->compile_aot)
1073 if (!cfg->got_var) {
1074 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1076 return cfg->got_var;
1083 mono_get_vtable_var (MonoCompile *cfg)
1085 g_assert (cfg->generic_sharing_context);
1087 if (!cfg->rgctx_var) {
1088 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1089 /* force the var to be stack allocated */
1090 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1093 return cfg->rgctx_var;
1097 type_from_stack_type (MonoInst *ins) {
1098 switch (ins->type) {
1099 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1100 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1101 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1102 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1104 return &ins->klass->this_arg;
1105 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1106 case STACK_VTYPE: return &ins->klass->byval_arg;
1108 g_error ("stack type %d to montype not handled\n", ins->type);
1113 static G_GNUC_UNUSED int
1114 type_to_stack_type (MonoType *t)
1116 switch (mono_type_get_underlying_type (t)->type) {
1119 case MONO_TYPE_BOOLEAN:
1122 case MONO_TYPE_CHAR:
1129 case MONO_TYPE_FNPTR:
1131 case MONO_TYPE_CLASS:
1132 case MONO_TYPE_STRING:
1133 case MONO_TYPE_OBJECT:
1134 case MONO_TYPE_SZARRAY:
1135 case MONO_TYPE_ARRAY:
1143 case MONO_TYPE_VALUETYPE:
1144 case MONO_TYPE_TYPEDBYREF:
1146 case MONO_TYPE_GENERICINST:
1147 if (mono_type_generic_inst_is_valuetype (t))
1153 g_assert_not_reached ();
1160 array_access_to_klass (int opcode)
1164 return mono_defaults.byte_class;
1166 return mono_defaults.uint16_class;
1169 return mono_defaults.int_class;
1172 return mono_defaults.sbyte_class;
1175 return mono_defaults.int16_class;
1178 return mono_defaults.int32_class;
1180 return mono_defaults.uint32_class;
1183 return mono_defaults.int64_class;
1186 return mono_defaults.single_class;
1189 return mono_defaults.double_class;
1190 case CEE_LDELEM_REF:
1191 case CEE_STELEM_REF:
1192 return mono_defaults.object_class;
1194 g_assert_not_reached ();
1200 * We try to share variables when possible
1203 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1208 /* inlining can result in deeper stacks */
1209 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1210 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1212 pos = ins->type - 1 + slot * STACK_MAX;
1214 switch (ins->type) {
1221 if ((vnum = cfg->intvars [pos]))
1222 return cfg->varinfo [vnum];
1223 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1224 cfg->intvars [pos] = res->inst_c0;
1227 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1233 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1235 if (cfg->compile_aot) {
1236 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1237 jump_info_token->image = image;
1238 jump_info_token->token = token;
1239 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1244 * This function is called to handle items that are left on the evaluation stack
1245 * at basic block boundaries. What happens is that we save the values to local variables
1246 * and we reload them later when first entering the target basic block (with the
1247 * handle_loaded_temps () function).
1248 * A single joint point will use the same variables (stored in the array bb->out_stack or
1249 * bb->in_stack, if the basic block is before or after the joint point).
1251 * This function needs to be called _before_ emitting the last instruction of
1252 * the bb (i.e. before emitting a branch).
1253 * If the stack merge fails at a join point, cfg->unverifiable is set.
1256 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1259 MonoBasicBlock *bb = cfg->cbb;
1260 MonoBasicBlock *outb;
1261 MonoInst *inst, **locals;
1266 if (cfg->verbose_level > 3)
1267 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1268 if (!bb->out_scount) {
1269 bb->out_scount = count;
1270 //printf ("bblock %d has out:", bb->block_num);
1272 for (i = 0; i < bb->out_count; ++i) {
1273 outb = bb->out_bb [i];
1274 /* exception handlers are linked, but they should not be considered for stack args */
1275 if (outb->flags & BB_EXCEPTION_HANDLER)
1277 //printf (" %d", outb->block_num);
1278 if (outb->in_stack) {
1280 bb->out_stack = outb->in_stack;
1286 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1287 for (i = 0; i < count; ++i) {
1289 * try to reuse temps already allocated for this purpouse, if they occupy the same
1290 * stack slot and if they are of the same type.
1291 * This won't cause conflicts since if 'local' is used to
1292 * store one of the values in the in_stack of a bblock, then
1293 * the same variable will be used for the same outgoing stack
1295 * This doesn't work when inlining methods, since the bblocks
1296 * in the inlined methods do not inherit their in_stack from
1297 * the bblock they are inlined to. See bug #58863 for an
1300 if (cfg->inlined_method)
1301 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1303 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1308 for (i = 0; i < bb->out_count; ++i) {
1309 outb = bb->out_bb [i];
1310 /* exception handlers are linked, but they should not be considered for stack args */
1311 if (outb->flags & BB_EXCEPTION_HANDLER)
1313 if (outb->in_scount) {
1314 if (outb->in_scount != bb->out_scount) {
1315 cfg->unverifiable = TRUE;
1318 continue; /* check they are the same locals */
1320 outb->in_scount = count;
1321 outb->in_stack = bb->out_stack;
1324 locals = bb->out_stack;
1326 for (i = 0; i < count; ++i) {
1327 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1328 inst->cil_code = sp [i]->cil_code;
1329 sp [i] = locals [i];
1330 if (cfg->verbose_level > 3)
1331 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1335 * It is possible that the out bblocks already have in_stack assigned, and
1336 * the in_stacks differ. In this case, we will store to all the different
1343 /* Find a bblock which has a different in_stack */
1345 while (bindex < bb->out_count) {
1346 outb = bb->out_bb [bindex];
1347 /* exception handlers are linked, but they should not be considered for stack args */
1348 if (outb->flags & BB_EXCEPTION_HANDLER) {
1352 if (outb->in_stack != locals) {
1353 for (i = 0; i < count; ++i) {
1354 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1355 inst->cil_code = sp [i]->cil_code;
1356 sp [i] = locals [i];
1357 if (cfg->verbose_level > 3)
1358 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1360 locals = outb->in_stack;
1369 /* Emit code which loads interface_offsets [klass->interface_id]
1370 * The array is stored in memory before vtable.
1373 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1375 if (cfg->compile_aot) {
1376 int ioffset_reg = alloc_preg (cfg);
1377 int iid_reg = alloc_preg (cfg);
1379 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1389 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1390 * stored in "klass_reg" implements the interface "klass".
1393 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1395 int ibitmap_reg = alloc_preg (cfg);
1396 int ibitmap_byte_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1400 if (cfg->compile_aot) {
1401 int iid_reg = alloc_preg (cfg);
1402 int shifted_iid_reg = alloc_preg (cfg);
1403 int ibitmap_byte_address_reg = alloc_preg (cfg);
1404 int masked_iid_reg = alloc_preg (cfg);
1405 int iid_one_bit_reg = alloc_preg (cfg);
1406 int iid_bit_reg = alloc_preg (cfg);
1407 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1412 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1414 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1416 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1422 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1423 * stored in "vtable_reg" implements the interface "klass".
1426 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1428 int ibitmap_reg = alloc_preg (cfg);
1429 int ibitmap_byte_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1433 if (cfg->compile_aot) {
1434 int iid_reg = alloc_preg (cfg);
1435 int shifted_iid_reg = alloc_preg (cfg);
1436 int ibitmap_byte_address_reg = alloc_preg (cfg);
1437 int masked_iid_reg = alloc_preg (cfg);
1438 int iid_one_bit_reg = alloc_preg (cfg);
1439 int iid_bit_reg = alloc_preg (cfg);
1440 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1442 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1443 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1445 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1446 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1455 * Emit code which checks whenever the interface id of @klass is smaller than
1456 * than the value given by max_iid_reg.
1459 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 if (cfg->compile_aot) {
1463 int iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1472 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1475 /* Same as above, but obtains max_iid from a vtable */
1477 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 /* Same as above, but obtains max_iid from a klass */
1488 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1489 MonoBasicBlock *false_target)
1491 int max_iid_reg = alloc_preg (cfg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1494 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1498 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1500 int idepth_reg = alloc_preg (cfg);
1501 int stypes_reg = alloc_preg (cfg);
1502 int stype = alloc_preg (cfg);
1504 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1511 if (cfg->compile_aot) {
1512 int const_reg = alloc_preg (cfg);
1513 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1514 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1522 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1524 int intf_reg = alloc_preg (cfg);
1526 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1527 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1528 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1532 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1536 * Variant of the above that takes a register to the class, not the vtable.
1539 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1541 int intf_bit_reg = alloc_preg (cfg);
1543 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1544 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1547 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1549 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1553 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int const_reg = alloc_preg (cfg);
1557 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1558 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1562 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1566 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1568 if (cfg->compile_aot) {
1569 int const_reg = alloc_preg (cfg);
1570 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1571 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1579 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1582 int rank_reg = alloc_preg (cfg);
1583 int eclass_reg = alloc_preg (cfg);
1585 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1587 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1588 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1590 if (klass->cast_class == mono_defaults.object_class) {
1591 int parent_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1593 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1594 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1595 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1596 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class) {
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1601 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1603 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1606 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1607 /* Check that the object is a vector too */
1608 int bounds_reg = alloc_preg (cfg);
1609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1614 int idepth_reg = alloc_preg (cfg);
1615 int stypes_reg = alloc_preg (cfg);
1616 int stype = alloc_preg (cfg);
1618 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1619 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1621 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1625 mini_emit_class_check (cfg, stype, klass);
1630 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1634 g_assert (val == 0);
1636 if ((size <= 4) && (size <= align)) {
1639 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1647 #if SIZEOF_VOID_P == 8
1649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1655 val_reg = alloc_preg (cfg);
1657 if (sizeof (gpointer) == 8)
1658 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1660 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1663 /* This could be optimized further if neccesary */
1665 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1672 #if !NO_UNALIGNED_ACCESS
1673 if (sizeof (gpointer) == 8) {
1675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1698 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1705 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1710 /* This could be optimized further if neccesary */
1712 cur_reg = alloc_preg (cfg);
1713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1721 #if !NO_UNALIGNED_ACCESS
1722 if (sizeof (gpointer) == 8) {
1724 cur_reg = alloc_preg (cfg);
1725 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1726 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1735 cur_reg = alloc_preg (cfg);
1736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1751 cur_reg = alloc_preg (cfg);
1752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1761 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1763 int vtable_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1767 if (cfg->opt & MONO_OPT_SHARED) {
1768 int class_reg = alloc_preg (cfg);
1769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1770 if (cfg->compile_aot) {
1771 int klass_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1773 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1775 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1778 if (cfg->compile_aot) {
1779 int vt_reg = alloc_preg (cfg);
1780 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1781 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1787 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1791 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1794 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1797 type = mini_get_basic_type_from_generic (gsctx, type);
1798 switch (type->type) {
1799 case MONO_TYPE_VOID:
1800 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1803 case MONO_TYPE_BOOLEAN:
1806 case MONO_TYPE_CHAR:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1813 case MONO_TYPE_FNPTR:
1814 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 case MONO_TYPE_CLASS:
1816 case MONO_TYPE_STRING:
1817 case MONO_TYPE_OBJECT:
1818 case MONO_TYPE_SZARRAY:
1819 case MONO_TYPE_ARRAY:
1820 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1823 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1826 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1827 case MONO_TYPE_VALUETYPE:
1828 if (type->data.klass->enumtype) {
1829 type = type->data.klass->enum_basetype;
1832 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1833 case MONO_TYPE_TYPEDBYREF:
1834 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1835 case MONO_TYPE_GENERICINST:
1836 type = &type->data.generic_class->container_class->byval_arg;
1839 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1845 * target_type_is_incompatible:
1846 * @cfg: MonoCompile context
1848 * Check that the item @arg on the evaluation stack can be stored
1849 * in the target type (can be a local, or field, etc).
1850 * The cfg arg can be used to check if we need verification or just
1853 * Returns: non-0 value if arg can't be stored on a target.
1856 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1858 MonoType *simple_type;
1861 if (target->byref) {
1862 /* FIXME: check that the pointed to types match */
1863 if (arg->type == STACK_MP)
1864 return arg->klass != mono_class_from_mono_type (target);
1865 if (arg->type == STACK_PTR)
1870 simple_type = mono_type_get_underlying_type (target);
1871 switch (simple_type->type) {
1872 case MONO_TYPE_VOID:
1876 case MONO_TYPE_BOOLEAN:
1879 case MONO_TYPE_CHAR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1886 /* STACK_MP is needed when setting pinned locals */
1887 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1892 case MONO_TYPE_FNPTR:
1893 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 if (arg->type != STACK_OBJ)
1903 /* FIXME: check type compatibility */
1907 if (arg->type != STACK_I8)
1912 if (arg->type != STACK_R8)
1915 case MONO_TYPE_VALUETYPE:
1916 if (arg->type != STACK_VTYPE)
1918 klass = mono_class_from_mono_type (simple_type);
1919 if (klass != arg->klass)
1922 case MONO_TYPE_TYPEDBYREF:
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1929 case MONO_TYPE_GENERICINST:
1930 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1931 if (arg->type != STACK_VTYPE)
1933 klass = mono_class_from_mono_type (simple_type);
1934 if (klass != arg->klass)
1938 if (arg->type != STACK_OBJ)
1940 /* FIXME: check type compatibility */
1944 case MONO_TYPE_MVAR:
1945 /* FIXME: all the arguments must be references for now,
1946 * later look inside cfg and see if the arg num is
1947 * really a reference
1949 g_assert (cfg->generic_sharing_context);
1950 if (arg->type != STACK_OBJ)
1954 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1960 * Prepare arguments for passing to a function call.
1961 * Return a non-zero value if the arguments can't be passed to the given
1963 * The type checks are not yet complete and some conversions may need
1964 * casts on 32 or 64 bit architectures.
1966 * FIXME: implement this using target_type_is_incompatible ()
1969 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1971 MonoType *simple_type;
1975 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1979 for (i = 0; i < sig->param_count; ++i) {
1980 if (sig->params [i]->byref) {
1981 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1985 simple_type = sig->params [i];
1986 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1988 switch (simple_type->type) {
1989 case MONO_TYPE_VOID:
1994 case MONO_TYPE_BOOLEAN:
1997 case MONO_TYPE_CHAR:
2000 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2006 case MONO_TYPE_FNPTR:
2007 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2010 case MONO_TYPE_CLASS:
2011 case MONO_TYPE_STRING:
2012 case MONO_TYPE_OBJECT:
2013 case MONO_TYPE_SZARRAY:
2014 case MONO_TYPE_ARRAY:
2015 if (args [i]->type != STACK_OBJ)
2020 if (args [i]->type != STACK_I8)
2025 if (args [i]->type != STACK_R8)
2028 case MONO_TYPE_VALUETYPE:
2029 if (simple_type->data.klass->enumtype) {
2030 simple_type = simple_type->data.klass->enum_basetype;
2033 if (args [i]->type != STACK_VTYPE)
2036 case MONO_TYPE_TYPEDBYREF:
2037 if (args [i]->type != STACK_VTYPE)
2040 case MONO_TYPE_GENERICINST:
2041 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2045 g_error ("unknown type 0x%02x in check_call_signature",
2053 callvirt_to_call (int opcode)
2058 case OP_VOIDCALLVIRT:
2067 g_assert_not_reached ();
2074 callvirt_to_call_membase (int opcode)
2078 return OP_CALL_MEMBASE;
2079 case OP_VOIDCALLVIRT:
2080 return OP_VOIDCALL_MEMBASE;
2082 return OP_FCALL_MEMBASE;
2084 return OP_LCALL_MEMBASE;
2086 return OP_VCALL_MEMBASE;
2088 g_assert_not_reached ();
2094 #ifdef MONO_ARCH_HAVE_IMT
2096 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2098 #ifdef MONO_ARCH_IMT_REG
2099 int method_reg = alloc_preg (cfg);
2102 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2103 } else if (cfg->compile_aot) {
2104 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2107 MONO_INST_NEW (cfg, ins, OP_PCONST);
2108 ins->inst_p0 = call->method;
2109 ins->dreg = method_reg;
2110 MONO_ADD_INS (cfg->cbb, ins);
2113 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2115 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2120 static MonoJumpInfo *
2121 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2123 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2127 ji->data.target = target;
2132 inline static MonoInst*
2133 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2135 inline static MonoCallInst *
2136 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2137 MonoInst **args, int calli, int virtual)
2140 #ifdef MONO_ARCH_SOFT_FLOAT
2144 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2147 call->signature = sig;
2149 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2151 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2152 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2155 temp->backend.is_pinvoke = sig->pinvoke;
2158 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2159 * address of return value to increase optimization opportunities.
2160 * Before vtype decomposition, the dreg of the call ins itself represents the
2161 * fact the call modifies the return value. After decomposition, the call will
2162 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2163 * will be transformed into an LDADDR.
2165 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2166 loada->dreg = alloc_preg (cfg);
2167 loada->inst_p0 = temp;
2168 /* We reference the call too since call->dreg could change during optimization */
2169 loada->inst_p1 = call;
2170 MONO_ADD_INS (cfg->cbb, loada);
2172 call->inst.dreg = temp->dreg;
2174 call->vret_var = loada;
2175 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2176 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2178 #ifdef MONO_ARCH_SOFT_FLOAT
2180 * If the call has a float argument, we would need to do an r8->r4 conversion using
2181 * an icall, but that cannot be done during the call sequence since it would clobber
2182 * the call registers + the stack. So we do it before emitting the call.
2184 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2186 MonoInst *in = call->args [i];
2188 if (i >= sig->hasthis)
2189 t = sig->params [i - sig->hasthis];
2191 t = &mono_defaults.int_class->byval_arg;
2192 t = mono_type_get_underlying_type (t);
2194 if (!t->byref && t->type == MONO_TYPE_R4) {
2195 MonoInst *iargs [1];
2199 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2201 /* The result will be in an int vreg */
2202 call->args [i] = conv;
2207 mono_arch_emit_call (cfg, call);
2209 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2210 cfg->flags |= MONO_CFG_HAS_CALLS;
2215 inline static MonoInst*
2216 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2218 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2220 call->inst.sreg1 = addr->dreg;
2222 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2224 return (MonoInst*)call;
2227 inline static MonoInst*
2228 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2230 #ifdef MONO_ARCH_RGCTX_REG
2232 int rgctx_reg = mono_alloc_preg (cfg);
2234 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2235 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2236 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2237 cfg->uses_rgctx_reg = TRUE;
2238 return (MonoInst*)call;
2240 g_assert_not_reached ();
2246 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2247 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2249 gboolean virtual = this != NULL;
2250 gboolean enable_for_aot = TRUE;
2253 if (method->string_ctor) {
2254 /* Create the real signature */
2255 /* FIXME: Cache these */
2256 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2257 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2262 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2264 if (this && sig->hasthis &&
2265 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2266 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2267 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2269 call->method = method;
2271 call->inst.flags |= MONO_INST_HAS_METHOD;
2272 call->inst.inst_left = this;
2275 int vtable_reg, slot_reg, this_reg;
2277 this_reg = this->dreg;
2279 if ((!cfg->compile_aot || enable_for_aot) &&
2280 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2281 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2282 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2284 * the method is not virtual, we just need to ensure this is not null
2285 * and then we can call the method directly.
2287 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2288 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2291 if (!method->string_ctor) {
2292 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2293 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2294 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2297 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2299 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2301 return (MonoInst*)call;
2304 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2305 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2306 /* Make a call to delegate->invoke_impl */
2307 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2308 call->inst.inst_basereg = this_reg;
2309 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2310 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2312 return (MonoInst*)call;
2316 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2317 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2318 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2320 * the method is virtual, but we can statically dispatch since either
2321 * it's class or the method itself are sealed.
2322 * But first we need to ensure it's not a null reference.
2324 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2325 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2326 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2328 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2329 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2331 return (MonoInst*)call;
2334 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2336 /* Initialize method->slot */
2337 mono_class_setup_vtable (method->klass);
2339 vtable_reg = alloc_preg (cfg);
2340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2341 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2343 #ifdef MONO_ARCH_HAVE_IMT
2345 guint32 imt_slot = mono_method_get_imt_slot (method);
2346 emit_imt_argument (cfg, call, imt_arg);
2347 slot_reg = vtable_reg;
2348 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2351 if (slot_reg == -1) {
2352 slot_reg = alloc_preg (cfg);
2353 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2354 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2357 slot_reg = vtable_reg;
2358 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2361 call->inst.sreg1 = slot_reg;
2362 call->virtual = TRUE;
2365 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2367 return (MonoInst*)call;
2370 static inline MonoInst*
2371 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2373 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2377 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2384 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2387 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2389 return (MonoInst*)call;
2392 inline static MonoInst*
2393 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2395 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2399 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2403 * mono_emit_abs_call:
2405 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2407 inline static MonoInst*
2408 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2409 MonoMethodSignature *sig, MonoInst **args)
2411 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2415 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2418 if (cfg->abs_patches == NULL)
2419 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2420 g_hash_table_insert (cfg->abs_patches, ji, ji);
2421 ins = mono_emit_native_call (cfg, ji, sig, args);
2422 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2427 get_memcpy_method (void)
2429 static MonoMethod *memcpy_method = NULL;
2430 if (!memcpy_method) {
2431 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2433 g_error ("Old corlib found. Install a new one");
2435 return memcpy_method;
2439 * Emit code to copy a valuetype of type @klass whose address is stored in
2440 * @src->dreg to memory whose address is stored at @dest->dreg.
2443 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2445 MonoInst *iargs [3];
2448 MonoMethod *memcpy_method;
2452 * This check breaks with spilled vars... need to handle it during verification anyway.
2453 * g_assert (klass && klass == src->klass && klass == dest->klass);
2457 n = mono_class_native_size (klass, &align);
2459 n = mono_class_value_size (klass, &align);
2461 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2462 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2463 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2467 EMIT_NEW_ICONST (cfg, iargs [2], n);
2469 memcpy_method = get_memcpy_method ();
2470 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2475 get_memset_method (void)
2477 static MonoMethod *memset_method = NULL;
2478 if (!memset_method) {
2479 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2481 g_error ("Old corlib found. Install a new one");
2483 return memset_method;
2487 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2489 MonoInst *iargs [3];
2492 MonoMethod *memset_method;
2494 /* FIXME: Optimize this for the case when dest is an LDADDR */
2496 mono_class_init (klass);
2497 n = mono_class_value_size (klass, &align);
2499 if (n <= sizeof (gpointer) * 5) {
2500 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2503 memset_method = get_memset_method ();
2505 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2506 EMIT_NEW_ICONST (cfg, iargs [2], n);
2507 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2512 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2514 MonoInst *this = NULL;
2516 g_assert (!method->klass->valuetype);
2518 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2519 EMIT_NEW_ARGLOAD (cfg, this, 0);
2521 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2522 MonoInst *mrgctx_loc, *mrgctx_var;
2525 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2527 mrgctx_loc = mono_get_vtable_var (cfg);
2528 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2531 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2532 MonoInst *vtable_loc, *vtable_var;
2536 vtable_loc = mono_get_vtable_var (cfg);
2537 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2539 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2540 MonoInst *mrgctx_var = vtable_var;
2543 vtable_reg = alloc_preg (cfg);
2544 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2545 vtable_var->type = STACK_PTR;
2551 int vtable_reg, res_reg;
2553 vtable_reg = alloc_preg (cfg);
2554 res_reg = alloc_preg (cfg);
2555 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2560 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2561 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2562 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2565 static MonoJumpInfoRgctxEntry *
2566 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2568 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2569 res->method = method;
2570 res->in_mrgctx = in_mrgctx;
2571 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2572 res->data->type = patch_type;
2573 res->data->data.target = patch_data;
2574 res->info_type = info_type;
2579 static inline MonoInst*
2580 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2582 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2586 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2587 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2589 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2591 return emit_rgctx_fetch (cfg, rgctx, entry);
2595 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2596 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2598 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2600 return emit_rgctx_fetch (cfg, rgctx, entry);
2604 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2605 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2607 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2609 return emit_rgctx_fetch (cfg, rgctx, entry);
2613 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2617 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2619 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2622 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2623 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2625 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2627 return mono_emit_method_call (cfg, method, &val, NULL);
2632 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2636 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2637 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2638 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2639 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2641 obj_reg = sp [0]->dreg;
2642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2643 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2645 /* FIXME: generics */
2646 g_assert (klass->rank == 0);
2649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2650 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2656 MonoInst *element_class;
2658 /* This assertion is from the unboxcast insn */
2659 g_assert (klass->rank == 0);
2661 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2662 klass->element_class, MONO_RGCTX_INFO_KLASS);
2664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2665 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2667 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2670 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2671 MONO_ADD_INS (cfg->cbb, add);
2672 add->type = STACK_MP;
2679 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2681 MonoInst *iargs [2];
2684 if (cfg->opt & MONO_OPT_SHARED) {
2685 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2686 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2688 alloc_ftn = mono_object_new;
2689 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2690 /* This happens often in argument checking code, eg. throw new FooException... */
2691 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2692 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2693 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2695 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2696 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2699 if (managed_alloc) {
2700 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2701 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2703 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2705 guint32 lw = vtable->klass->instance_size;
2706 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2707 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2708 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2711 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2715 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2719 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2722 MonoInst *iargs [2];
2723 MonoMethod *managed_alloc = NULL;
2727 FIXME: we cannot get managed_alloc here because we can't get
2728 the class's vtable (because it's not a closed class)
2730 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2731 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2734 if (cfg->opt & MONO_OPT_SHARED) {
2735 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2736 iargs [1] = data_inst;
2737 alloc_ftn = mono_object_new;
2739 if (managed_alloc) {
2740 iargs [0] = data_inst;
2741 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2744 iargs [0] = data_inst;
2745 alloc_ftn = mono_object_new_specific;
2748 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2752 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2754 MonoInst *alloc, *ins;
2756 if (mono_class_is_nullable (klass)) {
2757 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2758 return mono_emit_method_call (cfg, method, &val, NULL);
2761 alloc = handle_alloc (cfg, klass, TRUE);
2763 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2769 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *rgctx, MonoInst *data_inst)
2771 MonoInst *alloc, *ins;
2773 if (mono_class_is_nullable (klass)) {
2774 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2775 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2776 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2778 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2780 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2782 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2789 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2791 MonoBasicBlock *is_null_bb;
2792 int obj_reg = src->dreg;
2793 int vtable_reg = alloc_preg (cfg);
2795 NEW_BBLOCK (cfg, is_null_bb);
2797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2800 if (mini_get_debug_options ()->better_cast_details) {
2801 int to_klass_reg = alloc_preg (cfg);
2802 int klass_reg = alloc_preg (cfg);
2803 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2806 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2810 MONO_ADD_INS (cfg->cbb, tls_get);
2811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2815 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2819 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2821 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2823 int klass_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2827 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2828 /* the remoting code is broken, access the class for now */
2830 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2836 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2839 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2843 MONO_START_BB (cfg, is_null_bb);
2845 /* Reset the variables holding the cast details */
2846 if (mini_get_debug_options ()->better_cast_details) {
2847 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2849 MONO_ADD_INS (cfg->cbb, tls_get);
2850 /* It is enough to reset the from field */
2851 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2858 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2861 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2862 int obj_reg = src->dreg;
2863 int vtable_reg = alloc_preg (cfg);
2864 int res_reg = alloc_preg (cfg);
2866 NEW_BBLOCK (cfg, is_null_bb);
2867 NEW_BBLOCK (cfg, false_bb);
2868 NEW_BBLOCK (cfg, end_bb);
2870 /* Do the assignment at the beginning, so the other assignment can be if converted */
2871 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2872 ins->type = STACK_OBJ;
2875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2878 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2879 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2880 /* the is_null_bb target simply copies the input register to the output */
2881 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2883 int klass_reg = alloc_preg (cfg);
2885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2888 int rank_reg = alloc_preg (cfg);
2889 int eclass_reg = alloc_preg (cfg);
2891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2893 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2894 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2896 if (klass->cast_class == mono_defaults.object_class) {
2897 int parent_reg = alloc_preg (cfg);
2898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2899 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2900 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2901 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2902 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2903 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2904 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2906 } else if (klass->cast_class == mono_defaults.enum_class) {
2907 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2909 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2910 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2912 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2913 /* Check that the object is a vector too */
2914 int bounds_reg = alloc_preg (cfg);
2915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2920 /* the is_null_bb target simply copies the input register to the output */
2921 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2923 } else if (mono_class_is_nullable (klass)) {
2924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2925 /* the is_null_bb target simply copies the input register to the output */
2926 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2928 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2929 /* the remoting code is broken, access the class for now */
2931 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2932 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2941 /* the is_null_bb target simply copies the input register to the output */
2942 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2947 MONO_START_BB (cfg, false_bb);
2949 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2952 MONO_START_BB (cfg, is_null_bb);
2954 MONO_START_BB (cfg, end_bb);
2960 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2962 /* This opcode takes as input an object reference and a class, and returns:
2963 0) if the object is an instance of the class,
2964 1) if the object is not instance of the class,
2965 2) if the object is a proxy whose type cannot be determined */
2968 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
2969 int obj_reg = src->dreg;
2970 int dreg = alloc_ireg (cfg);
2972 int klass_reg = alloc_preg (cfg);
2974 NEW_BBLOCK (cfg, true_bb);
2975 NEW_BBLOCK (cfg, false_bb);
2976 NEW_BBLOCK (cfg, false2_bb);
2977 NEW_BBLOCK (cfg, end_bb);
2978 NEW_BBLOCK (cfg, no_proxy_bb);
2980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
2983 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2984 NEW_BBLOCK (cfg, interface_fail_bb);
2986 tmp_reg = alloc_preg (cfg);
2987 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2988 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
2989 MONO_START_BB (cfg, interface_fail_bb);
2990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2992 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
2994 tmp_reg = alloc_preg (cfg);
2995 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
2996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
2997 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
2999 tmp_reg = alloc_preg (cfg);
3000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3001 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3003 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3004 tmp_reg = alloc_preg (cfg);
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3006 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3008 tmp_reg = alloc_preg (cfg);
3009 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3011 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3013 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3016 MONO_START_BB (cfg, no_proxy_bb);
3018 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3021 MONO_START_BB (cfg, false_bb);
3023 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3026 MONO_START_BB (cfg, false2_bb);
3028 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3029 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3031 MONO_START_BB (cfg, true_bb);
3033 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3035 MONO_START_BB (cfg, end_bb);
3038 MONO_INST_NEW (cfg, ins, OP_ICONST);
3040 ins->type = STACK_I4;
3046 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3048 /* This opcode takes as input an object reference and a class, and returns:
3049 0) if the object is an instance of the class,
3050 1) if the object is a proxy whose type cannot be determined
3051 an InvalidCastException exception is thrown otherwhise*/
3054 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3055 int obj_reg = src->dreg;
3056 int dreg = alloc_ireg (cfg);
3057 int tmp_reg = alloc_preg (cfg);
3058 int klass_reg = alloc_preg (cfg);
3060 NEW_BBLOCK (cfg, end_bb);
3061 NEW_BBLOCK (cfg, ok_result_bb);
3063 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3064 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3066 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3067 NEW_BBLOCK (cfg, interface_fail_bb);
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3070 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3071 MONO_START_BB (cfg, interface_fail_bb);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3074 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3076 tmp_reg = alloc_preg (cfg);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3079 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3081 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3085 NEW_BBLOCK (cfg, no_proxy_bb);
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3089 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3091 tmp_reg = alloc_preg (cfg);
3092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3095 tmp_reg = alloc_preg (cfg);
3096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3100 NEW_BBLOCK (cfg, fail_1_bb);
3102 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3104 MONO_START_BB (cfg, fail_1_bb);
3106 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3109 MONO_START_BB (cfg, no_proxy_bb);
3111 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3114 MONO_START_BB (cfg, ok_result_bb);
3116 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3118 MONO_START_BB (cfg, end_bb);
3121 MONO_INST_NEW (cfg, ins, OP_ICONST);
3123 ins->type = STACK_I4;
3128 static G_GNUC_UNUSED MonoInst*
3129 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3131 gpointer *trampoline;
3132 MonoInst *obj, *method_ins, *tramp_ins;
3136 obj = handle_alloc (cfg, klass, FALSE);
3138 /* Inline the contents of mono_delegate_ctor */
3140 /* Set target field */
3141 /* Optimize away setting of NULL target */
3142 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3143 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3145 /* Set method field */
3146 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3150 * To avoid looking up the compiled code belonging to the target method
3151 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3152 * store it, and we fill it after the method has been compiled.
3154 if (!cfg->compile_aot && !method->dynamic) {
3155 MonoInst *code_slot_ins;
3157 domain = mono_domain_get ();
3158 mono_domain_lock (domain);
3159 if (!domain->method_code_hash)
3160 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3161 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3163 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3164 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3166 mono_domain_unlock (domain);
3168 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3169 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3172 /* Set invoke_impl field */
3173 trampoline = mono_create_delegate_trampoline (klass);
3174 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3177 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3183 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3185 MonoJitICallInfo *info;
3187 /* Need to register the icall so it gets an icall wrapper */
3188 info = mono_get_array_new_va_icall (rank);
3190 cfg->flags |= MONO_CFG_HAS_VARARGS;
3192 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3193 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3197 mono_emit_load_got_addr (MonoCompile *cfg)
3199 MonoInst *getaddr, *dummy_use;
3201 if (!cfg->got_var || cfg->got_var_allocated)
3204 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3205 getaddr->dreg = cfg->got_var->dreg;
3207 /* Add it to the start of the first bblock */
3208 if (cfg->bb_entry->code) {
3209 getaddr->next = cfg->bb_entry->code;
3210 cfg->bb_entry->code = getaddr;
3213 MONO_ADD_INS (cfg->bb_entry, getaddr);
3215 cfg->got_var_allocated = TRUE;
3218 * Add a dummy use to keep the got_var alive, since real uses might
3219 * only be generated by the back ends.
3220 * Add it to end_bblock, so the variable's lifetime covers the whole
3222 * It would be better to make the usage of the got var explicit in all
3223 * cases when the backend needs it (i.e. calls, throw etc.), so this
3224 * wouldn't be needed.
3226 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3227 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3231 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3233 MonoMethodHeader *header = mono_method_get_header (method);
3235 #ifdef MONO_ARCH_SOFT_FLOAT
3236 MonoMethodSignature *sig = mono_method_signature (method);
3240 if (cfg->generic_sharing_context)
3243 #ifdef MONO_ARCH_HAVE_LMF_OPS
3244 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3245 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3246 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3250 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3251 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3252 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3253 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3254 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3255 (method->klass->marshalbyref) ||
3256 !header || header->num_clauses)
3259 /* also consider num_locals? */
3260 /* Do the size check early to avoid creating vtables */
3261 if (getenv ("MONO_INLINELIMIT")) {
3262 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3265 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3269 * if we can initialize the class of the method right away, we do,
3270 * otherwise we don't allow inlining if the class needs initialization,
3271 * since it would mean inserting a call to mono_runtime_class_init()
3272 * inside the inlined code
3274 if (!(cfg->opt & MONO_OPT_SHARED)) {
3275 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3276 if (cfg->run_cctors && method->klass->has_cctor) {
3277 if (!method->klass->runtime_info)
3278 /* No vtable created yet */
3280 vtable = mono_class_vtable (cfg->domain, method->klass);
3283 /* This makes so that inline cannot trigger */
3284 /* .cctors: too many apps depend on them */
3285 /* running with a specific order... */
3286 if (! vtable->initialized)
3288 mono_runtime_class_init (vtable);
3290 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3291 if (!method->klass->runtime_info)
3292 /* No vtable created yet */
3294 vtable = mono_class_vtable (cfg->domain, method->klass);
3297 if (!vtable->initialized)
3302 * If we're compiling for shared code
3303 * the cctor will need to be run at aot method load time, for example,
3304 * or at the end of the compilation of the inlining method.
3306 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3311 * CAS - do not inline methods with declarative security
3312 * Note: this has to be before any possible return TRUE;
3314 if (mono_method_has_declsec (method))
3317 #ifdef MONO_ARCH_SOFT_FLOAT
3319 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3321 for (i = 0; i < sig->param_count; ++i)
3322 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3330 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3332 if (vtable->initialized && !cfg->compile_aot)
3335 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3338 if (!mono_class_needs_cctor_run (vtable->klass, method))
3341 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3342 /* The initialization is already done before the method is called */
3349 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3353 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3355 mono_class_init (klass);
3356 size = mono_class_array_element_size (klass);
3358 mult_reg = alloc_preg (cfg);
3359 array_reg = arr->dreg;
3360 index_reg = index->dreg;
3362 #if SIZEOF_VOID_P == 8
3363 /* The array reg is 64 bits but the index reg is only 32 */
3364 index2_reg = alloc_preg (cfg);
3365 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3367 index2_reg = index_reg;
3370 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3372 #if defined(__i386__) || defined(__x86_64__)
3373 if (size == 1 || size == 2 || size == 4 || size == 8) {
3374 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3376 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3377 ins->type = STACK_PTR;
3383 add_reg = alloc_preg (cfg);
3385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3386 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3387 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3388 ins->type = STACK_PTR;
3389 MONO_ADD_INS (cfg->cbb, ins);
3394 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3396 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3398 int bounds_reg = alloc_preg (cfg);
3399 int add_reg = alloc_preg (cfg);
3400 int mult_reg = alloc_preg (cfg);
3401 int mult2_reg = alloc_preg (cfg);
3402 int low1_reg = alloc_preg (cfg);
3403 int low2_reg = alloc_preg (cfg);
3404 int high1_reg = alloc_preg (cfg);
3405 int high2_reg = alloc_preg (cfg);
3406 int realidx1_reg = alloc_preg (cfg);
3407 int realidx2_reg = alloc_preg (cfg);
3408 int sum_reg = alloc_preg (cfg);
3413 mono_class_init (klass);
3414 size = mono_class_array_element_size (klass);
3416 index1 = index_ins1->dreg;
3417 index2 = index_ins2->dreg;
3419 /* range checking */
3420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3421 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3424 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3425 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3426 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3427 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3428 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3429 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3432 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3433 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3435 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3436 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3437 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3439 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3440 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3442 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3443 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3445 ins->type = STACK_MP;
3447 MONO_ADD_INS (cfg->cbb, ins);
3454 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3458 MonoMethod *addr_method;
3461 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3464 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3466 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3467 /* emit_ldelema_2 depends on OP_LMUL */
3468 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3469 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3473 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3474 addr_method = mono_marshal_get_array_address (rank, element_size);
3475 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3481 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3483 MonoInst *ins = NULL;
3485 static MonoClass *runtime_helpers_class = NULL;
3486 if (! runtime_helpers_class)
3487 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3488 "System.Runtime.CompilerServices", "RuntimeHelpers");
3490 if (cmethod->klass == mono_defaults.string_class) {
3491 if (strcmp (cmethod->name, "get_Chars") == 0) {
3492 int dreg = alloc_ireg (cfg);
3493 int index_reg = alloc_preg (cfg);
3494 int mult_reg = alloc_preg (cfg);
3495 int add_reg = alloc_preg (cfg);
3497 #if SIZEOF_VOID_P == 8
3498 /* The array reg is 64 bits but the index reg is only 32 */
3499 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3501 index_reg = args [1]->dreg;
3503 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3505 #if defined(__i386__) || defined(__x86_64__)
3506 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3507 add_reg = ins->dreg;
3508 /* Avoid a warning */
3510 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3515 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3516 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3518 type_from_op (ins, NULL, NULL);
3520 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3521 int dreg = alloc_ireg (cfg);
3522 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3523 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3524 type_from_op (ins, NULL, NULL);
3527 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3528 int mult_reg = alloc_preg (cfg);
3529 int add_reg = alloc_preg (cfg);
3531 /* The corlib functions check for oob already. */
3532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3533 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3537 } else if (cmethod->klass == mono_defaults.object_class) {
3539 if (strcmp (cmethod->name, "GetType") == 0) {
3540 int dreg = alloc_preg (cfg);
3541 int vt_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3543 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3544 type_from_op (ins, NULL, NULL);
3547 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3548 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3549 int dreg = alloc_ireg (cfg);
3550 int t1 = alloc_ireg (cfg);
3552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3553 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3554 ins->type = STACK_I4;
3558 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3559 MONO_INST_NEW (cfg, ins, OP_NOP);
3560 MONO_ADD_INS (cfg->cbb, ins);
3564 } else if (cmethod->klass == mono_defaults.array_class) {
3565 if (cmethod->name [0] != 'g')
3568 if (strcmp (cmethod->name, "get_Rank") == 0) {
3569 int dreg = alloc_ireg (cfg);
3570 int vtable_reg = alloc_preg (cfg);
3571 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3572 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3574 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3575 type_from_op (ins, NULL, NULL);
3578 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3579 int dreg = alloc_ireg (cfg);
3581 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3582 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3583 type_from_op (ins, NULL, NULL);
3588 } else if (cmethod->klass == runtime_helpers_class) {
3590 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3591 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3595 } else if (cmethod->klass == mono_defaults.thread_class) {
3596 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3597 ins->dreg = alloc_preg (cfg);
3598 ins->type = STACK_OBJ;
3599 MONO_ADD_INS (cfg->cbb, ins);
3601 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3602 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3603 MONO_ADD_INS (cfg->cbb, ins);
3606 } else if (mini_class_is_system_array (cmethod->klass) &&
3607 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3608 MonoInst *addr, *store, *load;
3609 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3611 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3612 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3613 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3615 } else if (cmethod->klass->image == mono_defaults.corlib &&
3616 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3617 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3620 #if SIZEOF_VOID_P == 8
3621 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3622 /* 64 bit reads are already atomic */
3623 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3624 ins->dreg = mono_alloc_preg (cfg);
3625 ins->inst_basereg = args [0]->dreg;
3626 ins->inst_offset = 0;
3627 MONO_ADD_INS (cfg->cbb, ins);
3631 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3632 if (strcmp (cmethod->name, "Increment") == 0) {
3633 MonoInst *ins_iconst;
3636 if (fsig->params [0]->type == MONO_TYPE_I4)
3637 opcode = OP_ATOMIC_ADD_NEW_I4;
3638 #if SIZEOF_VOID_P == 8
3639 else if (fsig->params [0]->type == MONO_TYPE_I8)
3640 opcode = OP_ATOMIC_ADD_NEW_I8;
3643 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3644 ins_iconst->inst_c0 = 1;
3645 ins_iconst->dreg = mono_alloc_ireg (cfg);
3646 MONO_ADD_INS (cfg->cbb, ins_iconst);
3648 MONO_INST_NEW (cfg, ins, opcode);
3649 ins->dreg = mono_alloc_ireg (cfg);
3650 ins->inst_basereg = args [0]->dreg;
3651 ins->inst_offset = 0;
3652 ins->sreg2 = ins_iconst->dreg;
3653 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3654 MONO_ADD_INS (cfg->cbb, ins);
3656 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3657 MonoInst *ins_iconst;
3660 if (fsig->params [0]->type == MONO_TYPE_I4)
3661 opcode = OP_ATOMIC_ADD_NEW_I4;
3662 #if SIZEOF_VOID_P == 8
3663 else if (fsig->params [0]->type == MONO_TYPE_I8)
3664 opcode = OP_ATOMIC_ADD_NEW_I8;
3667 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3668 ins_iconst->inst_c0 = -1;
3669 ins_iconst->dreg = mono_alloc_ireg (cfg);
3670 MONO_ADD_INS (cfg->cbb, ins_iconst);
3672 MONO_INST_NEW (cfg, ins, opcode);
3673 ins->dreg = mono_alloc_ireg (cfg);
3674 ins->inst_basereg = args [0]->dreg;
3675 ins->inst_offset = 0;
3676 ins->sreg2 = ins_iconst->dreg;
3677 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3678 MONO_ADD_INS (cfg->cbb, ins);
3680 } else if (strcmp (cmethod->name, "Add") == 0) {
3683 if (fsig->params [0]->type == MONO_TYPE_I4)
3684 opcode = OP_ATOMIC_ADD_NEW_I4;
3685 #if SIZEOF_VOID_P == 8
3686 else if (fsig->params [0]->type == MONO_TYPE_I8)
3687 opcode = OP_ATOMIC_ADD_NEW_I8;
3691 MONO_INST_NEW (cfg, ins, opcode);
3692 ins->dreg = mono_alloc_ireg (cfg);
3693 ins->inst_basereg = args [0]->dreg;
3694 ins->inst_offset = 0;
3695 ins->sreg2 = args [1]->dreg;
3696 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3697 MONO_ADD_INS (cfg->cbb, ins);
3700 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3702 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3703 if (strcmp (cmethod->name, "Exchange") == 0) {
3706 if (fsig->params [0]->type == MONO_TYPE_I4)
3707 opcode = OP_ATOMIC_EXCHANGE_I4;
3708 #if SIZEOF_VOID_P == 8
3709 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3710 (fsig->params [0]->type == MONO_TYPE_I) ||
3711 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3712 opcode = OP_ATOMIC_EXCHANGE_I8;
3714 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3715 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3716 opcode = OP_ATOMIC_EXCHANGE_I4;
3721 MONO_INST_NEW (cfg, ins, opcode);
3722 ins->dreg = mono_alloc_ireg (cfg);
3723 ins->inst_basereg = args [0]->dreg;
3724 ins->inst_offset = 0;
3725 ins->sreg2 = args [1]->dreg;
3726 MONO_ADD_INS (cfg->cbb, ins);
3728 switch (fsig->params [0]->type) {
3730 ins->type = STACK_I4;
3734 ins->type = STACK_I8;
3736 case MONO_TYPE_OBJECT:
3737 ins->type = STACK_OBJ;
3740 g_assert_not_reached ();
3743 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3745 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3747 * Can't implement CompareExchange methods this way since they have
3748 * three arguments. We can implement one of the common cases, where the new
3749 * value is a constant.
3751 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3752 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3753 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3754 ins->dreg = alloc_ireg (cfg);
3755 ins->sreg1 = args [0]->dreg;
3756 ins->sreg2 = args [1]->dreg;
3757 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3758 ins->type = STACK_I4;
3759 MONO_ADD_INS (cfg->cbb, ins);
3761 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3763 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3767 } else if (cmethod->klass->image == mono_defaults.corlib) {
3768 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3769 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3770 MONO_INST_NEW (cfg, ins, OP_BREAK);
3771 MONO_ADD_INS (cfg->cbb, ins);
3774 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3775 && strcmp (cmethod->klass->name, "Environment") == 0) {
3776 #ifdef PLATFORM_WIN32
3777 EMIT_NEW_ICONST (cfg, ins, 1);
3779 EMIT_NEW_ICONST (cfg, ins, 0);
3783 } else if (cmethod->klass == mono_defaults.math_class) {
3785 * There is general branches code for Min/Max, but it does not work for
3787 * http://everything2.com/?node_id=1051618
3791 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3795 * This entry point could be used later for arbitrary method
3798 inline static MonoInst*
3799 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3800 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3802 if (method->klass == mono_defaults.string_class) {
3803 /* managed string allocation support */
3804 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3805 MonoInst *iargs [2];
3806 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3807 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3810 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3811 iargs [1] = args [0];
3812 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3819 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3821 MonoInst *store, *temp;
3824 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3825 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3828 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3829 * would be different than the MonoInst's used to represent arguments, and
3830 * the ldelema implementation can't deal with that.
3831 * Solution: When ldelema is used on an inline argument, create a var for
3832 * it, emit ldelema on that var, and emit the saving code below in
3833 * inline_method () if needed.
3835 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3836 cfg->args [i] = temp;
3837 /* This uses cfg->args [i] which is set by the preceeding line */
3838 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3839 store->cil_code = sp [0]->cil_code;
3844 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3845 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3847 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3849 check_inline_called_method_name_limit (MonoMethod *called_method)
3852 static char *limit = NULL;
3854 if (limit == NULL) {
3855 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3857 if (limit_string != NULL)
3858 limit = limit_string;
3860 limit = (char *) "";
3863 if (limit [0] != '\0') {
3864 char *called_method_name = mono_method_full_name (called_method, TRUE);
3866 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3867 g_free (called_method_name);
3869 //return (strncmp_result <= 0);
3870 return (strncmp_result == 0);
3877 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3879 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3882 static char *limit = NULL;
3884 if (limit == NULL) {
3885 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3886 if (limit_string != NULL) {
3887 limit = limit_string;
3889 limit = (char *) "";
3893 if (limit [0] != '\0') {
3894 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3896 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3897 g_free (caller_method_name);
3899 //return (strncmp_result <= 0);
3900 return (strncmp_result == 0);
3908 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3909 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3911 MonoInst *ins, *rvar = NULL;
3912 MonoMethodHeader *cheader;
3913 MonoBasicBlock *ebblock, *sbblock;
3915 MonoMethod *prev_inlined_method;
3916 MonoInst **prev_locals, **prev_args;
3917 MonoType **prev_arg_types;
3918 guint prev_real_offset;
3919 GHashTable *prev_cbb_hash;
3920 MonoBasicBlock **prev_cil_offset_to_bb;
3921 MonoBasicBlock *prev_cbb;
3922 unsigned char* prev_cil_start;
3923 guint32 prev_cil_offset_to_bb_len;
3924 MonoMethod *prev_current_method;
3925 MonoGenericContext *prev_generic_context;
3927 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3929 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3930 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3933 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3934 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3938 if (cfg->verbose_level > 2)
3939 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3941 if (!cmethod->inline_info) {
3942 mono_jit_stats.inlineable_methods++;
3943 cmethod->inline_info = 1;
3945 /* allocate space to store the return value */
3946 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3947 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3950 /* allocate local variables */
3951 cheader = mono_method_get_header (cmethod);
3952 prev_locals = cfg->locals;
3953 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3954 for (i = 0; i < cheader->num_locals; ++i)
3955 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3957 /* allocate start and end blocks */
3958 /* This is needed so if the inline is aborted, we can clean up */
3959 NEW_BBLOCK (cfg, sbblock);
3960 sbblock->real_offset = real_offset;
3962 NEW_BBLOCK (cfg, ebblock);
3963 ebblock->block_num = cfg->num_bblocks++;
3964 ebblock->real_offset = real_offset;
3966 prev_args = cfg->args;
3967 prev_arg_types = cfg->arg_types;
3968 prev_inlined_method = cfg->inlined_method;
3969 cfg->inlined_method = cmethod;
3970 cfg->ret_var_set = FALSE;
3971 prev_real_offset = cfg->real_offset;
3972 prev_cbb_hash = cfg->cbb_hash;
3973 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
3974 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
3975 prev_cil_start = cfg->cil_start;
3976 prev_cbb = cfg->cbb;
3977 prev_current_method = cfg->current_method;
3978 prev_generic_context = cfg->generic_context;
3980 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
3982 cfg->inlined_method = prev_inlined_method;
3983 cfg->real_offset = prev_real_offset;
3984 cfg->cbb_hash = prev_cbb_hash;
3985 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
3986 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
3987 cfg->cil_start = prev_cil_start;
3988 cfg->locals = prev_locals;
3989 cfg->args = prev_args;
3990 cfg->arg_types = prev_arg_types;
3991 cfg->current_method = prev_current_method;
3992 cfg->generic_context = prev_generic_context;
3994 if ((costs >= 0 && costs < 60) || inline_allways) {
3995 if (cfg->verbose_level > 2)
3996 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3998 mono_jit_stats.inlined_methods++;
4000 /* always add some code to avoid block split failures */
4001 MONO_INST_NEW (cfg, ins, OP_NOP);
4002 MONO_ADD_INS (prev_cbb, ins);
4004 prev_cbb->next_bb = sbblock;
4005 link_bblock (cfg, prev_cbb, sbblock);
4008 * Get rid of the begin and end bblocks if possible to aid local
4011 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4013 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4014 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4016 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4017 MonoBasicBlock *prev = ebblock->in_bb [0];
4018 mono_merge_basic_blocks (cfg, prev, ebblock);
4026 * If the inlined method contains only a throw, then the ret var is not
4027 * set, so set it to a dummy value.
4029 if (!cfg->ret_var_set) {
4030 static double r8_0 = 0.0;
4032 switch (rvar->type) {
4034 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4037 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4042 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4045 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4046 ins->type = STACK_R8;
4047 ins->inst_p0 = (void*)&r8_0;
4048 ins->dreg = rvar->dreg;
4049 MONO_ADD_INS (cfg->cbb, ins);
4052 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4055 g_assert_not_reached ();
4059 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4064 if (cfg->verbose_level > 2)
4065 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4066 cfg->exception_type = MONO_EXCEPTION_NONE;
4067 mono_loader_clear_error ();
4069 /* This gets rid of the newly added bblocks */
4070 cfg->cbb = prev_cbb;
4076 * Some of these comments may well be out-of-date.
4077 * Design decisions: we do a single pass over the IL code (and we do bblock
4078 * splitting/merging in the few cases when it's required: a back jump to an IL
4079 * address that was not already seen as bblock starting point).
4080 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4081 * Complex operations are decomposed in simpler ones right away. We need to let the
4082 * arch-specific code peek and poke inside this process somehow (except when the
4083 * optimizations can take advantage of the full semantic info of coarse opcodes).
4084 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4085 * MonoInst->opcode initially is the IL opcode or some simplification of that
4086 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4087 * opcode with value bigger than OP_LAST.
4088 * At this point the IR can be handed over to an interpreter, a dumb code generator
4089 * or to the optimizing code generator that will translate it to SSA form.
4091 * Profiling directed optimizations.
4092 * We may compile by default with few or no optimizations and instrument the code
4093 * or the user may indicate what methods to optimize the most either in a config file
4094 * or through repeated runs where the compiler applies offline the optimizations to
4095 * each method and then decides if it was worth it.
4098 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4099 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4100 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4101 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4102 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4103 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4104 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4105 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4107 /* offset from br.s -> br like opcodes */
4108 #define BIG_BRANCH_OFFSET 13
4111 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4113 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4115 return b == NULL || b == bb;
4119 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4121 unsigned char *ip = start;
4122 unsigned char *target;
4125 MonoBasicBlock *bblock;
4126 const MonoOpcode *opcode;
4129 cli_addr = ip - start;
4130 i = mono_opcode_value ((const guint8 **)&ip, end);
4133 opcode = &mono_opcodes [i];
4134 switch (opcode->argument) {
4135 case MonoInlineNone:
4138 case MonoInlineString:
4139 case MonoInlineType:
4140 case MonoInlineField:
4141 case MonoInlineMethod:
4144 case MonoShortInlineR:
4151 case MonoShortInlineVar:
4152 case MonoShortInlineI:
4155 case MonoShortInlineBrTarget:
4156 target = start + cli_addr + 2 + (signed char)ip [1];
4157 GET_BBLOCK (cfg, bblock, target);
4160 GET_BBLOCK (cfg, bblock, ip);
4162 case MonoInlineBrTarget:
4163 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4164 GET_BBLOCK (cfg, bblock, target);
4167 GET_BBLOCK (cfg, bblock, ip);
4169 case MonoInlineSwitch: {
4170 guint32 n = read32 (ip + 1);
4173 cli_addr += 5 + 4 * n;
4174 target = start + cli_addr;
4175 GET_BBLOCK (cfg, bblock, target);
4177 for (j = 0; j < n; ++j) {
4178 target = start + cli_addr + (gint32)read32 (ip);
4179 GET_BBLOCK (cfg, bblock, target);
4189 g_assert_not_reached ();
4192 if (i == CEE_THROW) {
4193 unsigned char *bb_start = ip - 1;
4195 /* Find the start of the bblock containing the throw */
4197 while ((bb_start >= start) && !bblock) {
4198 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4202 bblock->out_of_line = 1;
4211 static inline MonoMethod *
4212 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4216 if (m->wrapper_type != MONO_WRAPPER_NONE)
4217 return mono_method_get_wrapper_data (m, token);
4219 method = mono_get_method_full (m->klass->image, token, klass, context);
4224 static inline MonoMethod *
4225 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4227 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4229 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4235 static inline MonoClass*
4236 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4240 if (method->wrapper_type != MONO_WRAPPER_NONE)
4241 klass = mono_method_get_wrapper_data (method, token);
4243 klass = mono_class_get_full (method->klass->image, token, context);
4245 mono_class_init (klass);
4250 * Returns TRUE if the JIT should abort inlining because "callee"
4251 * is influenced by security attributes.
4254 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4258 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4262 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4263 if (result == MONO_JIT_SECURITY_OK)
4266 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4267 /* Generate code to throw a SecurityException before the actual call/link */
4268 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4271 NEW_ICONST (cfg, args [0], 4);
4272 NEW_METHODCONST (cfg, args [1], caller);
4273 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4274 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4275 /* don't hide previous results */
4276 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4277 cfg->exception_data = result;
4285 method_access_exception (void)
4287 static MonoMethod *method = NULL;
4290 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4291 method = mono_class_get_method_from_name (secman->securitymanager,
4292 "MethodAccessException", 2);
4299 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4300 MonoBasicBlock *bblock, unsigned char *ip)
4302 MonoMethod *thrower = method_access_exception ();
4305 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4306 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4307 mono_emit_method_call (cfg, thrower, args, NULL);
4311 verification_exception (void)
4313 static MonoMethod *method = NULL;
4316 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4317 method = mono_class_get_method_from_name (secman->securitymanager,
4318 "VerificationException", 0);
4325 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4327 MonoMethod *thrower = verification_exception ();
4329 mono_emit_method_call (cfg, thrower, NULL, NULL);
4333 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4334 MonoBasicBlock *bblock, unsigned char *ip)
4336 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4337 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4338 gboolean is_safe = TRUE;
4340 if (!(caller_level >= callee_level ||
4341 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4342 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4347 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4351 method_is_safe (MonoMethod *method)
4354 if (strcmp (method->name, "unsafeMethod") == 0)
4361 * Check that the IL instructions at ip are the array initialization
4362 * sequence and return the pointer to the data and the size.
4365 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4368 * newarr[System.Int32]
4370 * ldtoken field valuetype ...
4371 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4373 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4374 guint32 token = read32 (ip + 7);
4375 guint32 field_token = read32 (ip + 2);
4376 guint32 field_index = field_token & 0xffffff;
4378 const char *data_ptr;
4380 MonoMethod *cmethod;
4381 MonoClass *dummy_class;
4382 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4388 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4391 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4393 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4394 case MONO_TYPE_BOOLEAN:
4398 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4399 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4400 case MONO_TYPE_CHAR:
4410 return NULL; /* stupid ARM FP swapped format */
4420 if (size > mono_type_size (field->type, &dummy_align))
4423 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4424 if (!method->klass->image->dynamic) {
4425 field_index = read32 (ip + 2) & 0xffffff;
4426 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4427 data_ptr = mono_image_rva_map (method->klass->image, rva);
4428 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4429 /* for aot code we do the lookup on load */
4430 if (aot && data_ptr)
4431 return GUINT_TO_POINTER (rva);
4433 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4435 data_ptr = field->data;
4443 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4445 char *method_fname = mono_method_full_name (method, TRUE);
4448 if (mono_method_get_header (method)->code_size == 0)
4449 method_code = g_strdup ("method body is empty.");
4451 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4452 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4453 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4454 g_free (method_fname);
4455 g_free (method_code);
4459 set_exception_object (MonoCompile *cfg, MonoException *exception)
4461 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4462 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4463 cfg->exception_ptr = exception;
4467 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4471 if (cfg->generic_sharing_context)
4472 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4474 type = &klass->byval_arg;
4475 return MONO_TYPE_IS_REFERENCE (type);
4479 * mono_decompose_array_access_opts:
4481 * Decompose array access opcodes.
4484 mono_decompose_array_access_opts (MonoCompile *cfg)
4486 MonoBasicBlock *bb, *first_bb;
4489 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4490 * can be executed anytime. It should be run before decompose_long
4494 * Create a dummy bblock and emit code into it so we can use the normal
4495 * code generation macros.
4497 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4498 first_bb = cfg->cbb;
4500 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4502 MonoInst *prev = NULL;
4504 MonoInst *iargs [3];
4507 if (!bb->has_array_access)
4510 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4512 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4518 for (ins = bb->code; ins; ins = ins->next) {
4519 switch (ins->opcode) {
4521 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4522 G_STRUCT_OFFSET (MonoArray, max_length));
4523 MONO_ADD_INS (cfg->cbb, dest);
4525 case OP_BOUNDS_CHECK:
4526 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4529 if (cfg->opt & MONO_OPT_SHARED) {
4530 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4531 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4532 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4533 iargs [2]->dreg = ins->sreg1;
4535 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4536 dest->dreg = ins->dreg;
4538 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4541 NEW_VTABLECONST (cfg, iargs [0], vtable);
4542 MONO_ADD_INS (cfg->cbb, iargs [0]);
4543 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4544 iargs [1]->dreg = ins->sreg1;
4546 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4547 dest->dreg = ins->dreg;
4554 g_assert (cfg->cbb == first_bb);
4556 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4557 /* Replace the original instruction with the new code sequence */
4559 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4560 first_bb->code = first_bb->last_ins = NULL;
4561 first_bb->in_count = first_bb->out_count = 0;
4562 cfg->cbb = first_bb;
4569 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4579 #ifdef MONO_ARCH_SOFT_FLOAT
4582 * mono_handle_soft_float:
4584 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4585 * similar to long support on 32 bit platforms. 32 bit float values require special
4586 * handling when used as locals, arguments, and in calls.
4587 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4590 mono_handle_soft_float (MonoCompile *cfg)
4592 MonoBasicBlock *bb, *first_bb;
4595 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4599 * Create a dummy bblock and emit code into it so we can use the normal
4600 * code generation macros.
4602 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4603 first_bb = cfg->cbb;
4605 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4607 MonoInst *prev = NULL;
4610 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4612 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4618 for (ins = bb->code; ins; ins = ins->next) {
4619 const char *spec = INS_INFO (ins->opcode);
4621 /* Most fp operations are handled automatically by opcode emulation */
4623 switch (ins->opcode) {
4626 d.vald = *(double*)ins->inst_p0;
4627 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4632 /* We load the r8 value */
4633 d.vald = *(float*)ins->inst_p0;
4634 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4638 ins->opcode = OP_LMOVE;
4641 ins->opcode = OP_MOVE;
4642 ins->sreg1 = ins->sreg1 + 1;
4645 ins->opcode = OP_MOVE;
4646 ins->sreg1 = ins->sreg1 + 2;
4649 int reg = ins->sreg1;
4651 ins->opcode = OP_SETLRET;
4653 ins->sreg1 = reg + 1;
4654 ins->sreg2 = reg + 2;
4657 case OP_LOADR8_MEMBASE:
4658 ins->opcode = OP_LOADI8_MEMBASE;
4660 case OP_STORER8_MEMBASE_REG:
4661 ins->opcode = OP_STOREI8_MEMBASE_REG;
4663 case OP_STORER4_MEMBASE_REG: {
4664 MonoInst *iargs [2];
4667 /* Arg 1 is the double value */
4668 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4669 iargs [0]->dreg = ins->sreg1;
4671 /* Arg 2 is the address to store to */
4672 addr_reg = mono_alloc_preg (cfg);
4673 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4674 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4678 case OP_LOADR4_MEMBASE: {
4679 MonoInst *iargs [1];
4683 addr_reg = mono_alloc_preg (cfg);
4684 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4685 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4686 conv->dreg = ins->dreg;
4691 case OP_FCALL_MEMBASE: {
4692 MonoCallInst *call = (MonoCallInst*)ins;
4693 if (call->signature->ret->type == MONO_TYPE_R4) {
4694 MonoCallInst *call2;
4695 MonoInst *iargs [1];
4698 /* Convert the call into a call returning an int */
4699 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4700 memcpy (call2, call, sizeof (MonoCallInst));
4701 switch (ins->opcode) {
4703 call2->inst.opcode = OP_CALL;
4706 call2->inst.opcode = OP_CALL_REG;
4708 case OP_FCALL_MEMBASE:
4709 call2->inst.opcode = OP_CALL_MEMBASE;
4712 g_assert_not_reached ();
4714 call2->inst.dreg = mono_alloc_ireg (cfg);
4715 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4717 /* FIXME: Optimize this */
4719 /* Emit an r4->r8 conversion */
4720 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4721 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4722 conv->dreg = ins->dreg;
4724 switch (ins->opcode) {
4726 ins->opcode = OP_LCALL;
4729 ins->opcode = OP_LCALL_REG;
4731 case OP_FCALL_MEMBASE:
4732 ins->opcode = OP_LCALL_MEMBASE;
4735 g_assert_not_reached ();
4741 MonoJitICallInfo *info;
4742 MonoInst *iargs [2];
4743 MonoInst *call, *cmp, *br;
4745 /* Convert fcompare+fbcc to icall+icompare+beq */
4747 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4750 /* Create dummy MonoInst's for the arguments */
4751 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4752 iargs [0]->dreg = ins->sreg1;
4753 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4754 iargs [1]->dreg = ins->sreg2;
4756 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4758 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4759 cmp->sreg1 = call->dreg;
4761 MONO_ADD_INS (cfg->cbb, cmp);
4763 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4764 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4765 br->inst_true_bb = ins->next->inst_true_bb;
4766 br->inst_false_bb = ins->next->inst_false_bb;
4767 MONO_ADD_INS (cfg->cbb, br);
4769 /* The call sequence might include fp ins */
4772 /* Skip fbcc or fccc */
4773 NULLIFY_INS (ins->next);
4781 MonoJitICallInfo *info;
4782 MonoInst *iargs [2];
4785 /* Convert fccc to icall+icompare+iceq */
4787 info = mono_find_jit_opcode_emulation (ins->opcode);
4790 /* Create dummy MonoInst's for the arguments */
4791 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4792 iargs [0]->dreg = ins->sreg1;
4793 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4794 iargs [1]->dreg = ins->sreg2;
4796 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4799 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4801 /* The call sequence might include fp ins */
4806 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4807 mono_print_ins (ins);
4808 g_assert_not_reached ();
4813 g_assert (cfg->cbb == first_bb);
4815 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4816 /* Replace the original instruction with the new code sequence */
4818 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4819 first_bb->code = first_bb->last_ins = NULL;
4820 first_bb->in_count = first_bb->out_count = 0;
4821 cfg->cbb = first_bb;
4828 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4831 mono_decompose_long_opts (cfg);
4837 * mono_method_to_ir: translates IL into basic blocks containing trees
4840 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4841 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4842 guint inline_offset, gboolean is_virtual_call)
4844 MonoInst *ins, **sp, **stack_start;
4845 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4846 MonoMethod *cmethod, *method_definition;
4847 MonoInst **arg_array;
4848 MonoMethodHeader *header;
4850 guint32 token, ins_flag;
4852 MonoClass *constrained_call = NULL;
4853 unsigned char *ip, *end, *target, *err_pos;
4854 static double r8_0 = 0.0;
4855 MonoMethodSignature *sig;
4856 MonoGenericContext *generic_context = NULL;
4857 MonoGenericContainer *generic_container = NULL;
4858 MonoType **param_types;
4859 int i, n, start_new_bblock, dreg;
4860 int num_calls = 0, inline_costs = 0;
4861 int breakpoint_id = 0;
4863 MonoBoolean security, pinvoke;
4864 MonoSecurityManager* secman = NULL;
4865 MonoDeclSecurityActions actions;
4866 GSList *class_inits = NULL;
4867 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4870 /* serialization and xdomain stuff may need access to private fields and methods */
4871 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4872 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4873 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4874 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4875 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4876 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4878 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4880 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4881 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4882 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4883 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4885 image = method->klass->image;
4886 header = mono_method_get_header (method);
4887 generic_container = mono_method_get_generic_container (method);
4888 sig = mono_method_signature (method);
4889 num_args = sig->hasthis + sig->param_count;
4890 ip = (unsigned char*)header->code;
4891 cfg->cil_start = ip;
4892 end = ip + header->code_size;
4893 mono_jit_stats.cil_code_size += header->code_size;
4895 method_definition = method;
4896 while (method_definition->is_inflated) {
4897 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4898 method_definition = imethod->declaring;
4901 /* SkipVerification is not allowed if core-clr is enabled */
4902 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4904 dont_verify_stloc = TRUE;
4907 if (!dont_verify && mini_method_verify (cfg, method_definition))
4908 goto exception_exit;
4910 if (sig->is_inflated)
4911 generic_context = mono_method_get_context (method);
4912 else if (generic_container)
4913 generic_context = &generic_container->context;
4914 cfg->generic_context = generic_context;
4916 if (!cfg->generic_sharing_context)
4917 g_assert (!sig->has_type_parameters);
4919 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4920 g_assert (method->is_inflated);
4921 g_assert (mono_method_get_context (method)->method_inst);
4923 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4924 g_assert (sig->generic_param_count);
4926 if (cfg->method == method) {
4927 cfg->real_offset = 0;
4929 cfg->real_offset = inline_offset;
4932 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4933 cfg->cil_offset_to_bb_len = header->code_size;
4935 cfg->current_method = method;
4937 if (cfg->verbose_level > 2)
4938 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4940 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
4942 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
4943 for (n = 0; n < sig->param_count; ++n)
4944 param_types [n + sig->hasthis] = sig->params [n];
4945 cfg->arg_types = param_types;
4947 dont_inline = g_list_prepend (dont_inline, method);
4948 if (cfg->method == method) {
4950 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4951 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4954 NEW_BBLOCK (cfg, start_bblock);
4955 cfg->bb_entry = start_bblock;
4956 start_bblock->cil_code = NULL;
4957 start_bblock->cil_length = 0;
4960 NEW_BBLOCK (cfg, end_bblock);
4961 cfg->bb_exit = end_bblock;
4962 end_bblock->cil_code = NULL;
4963 end_bblock->cil_length = 0;
4964 g_assert (cfg->num_bblocks == 2);
4966 arg_array = cfg->args;
4968 if (header->num_clauses) {
4969 cfg->spvars = g_hash_table_new (NULL, NULL);
4970 cfg->exvars = g_hash_table_new (NULL, NULL);
4972 /* handle exception clauses */
4973 for (i = 0; i < header->num_clauses; ++i) {
4974 MonoBasicBlock *try_bb;
4975 MonoExceptionClause *clause = &header->clauses [i];
4976 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4977 try_bb->real_offset = clause->try_offset;
4978 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4979 tblock->real_offset = clause->handler_offset;
4980 tblock->flags |= BB_EXCEPTION_HANDLER;
4982 link_bblock (cfg, try_bb, tblock);
4984 if (*(ip + clause->handler_offset) == CEE_POP)
4985 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
4987 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
4988 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
4989 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
4990 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
4991 MONO_ADD_INS (tblock, ins);
4993 /* todo: is a fault block unsafe to optimize? */
4994 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
4995 tblock->flags |= BB_EXCEPTION_UNSAFE;
4999 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5001 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5003 /* catch and filter blocks get the exception object on the stack */
5004 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5005 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5006 MonoInst *dummy_use;
5008 /* mostly like handle_stack_args (), but just sets the input args */
5009 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5010 tblock->in_scount = 1;
5011 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5012 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5015 * Add a dummy use for the exvar so its liveness info will be
5019 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5021 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5022 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5023 tblock->real_offset = clause->data.filter_offset;
5024 tblock->in_scount = 1;
5025 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5026 /* The filter block shares the exvar with the handler block */
5027 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5028 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5029 MONO_ADD_INS (tblock, ins);
5033 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5034 clause->data.catch_class &&
5035 cfg->generic_sharing_context &&
5036 mono_class_check_context_used (clause->data.catch_class)) {
5037 if (mono_method_get_context (method)->method_inst)
5038 GENERIC_SHARING_FAILURE (CEE_NOP);
5041 * In shared generic code with catch
5042 * clauses containing type variables
5043 * the exception handling code has to
5044 * be able to get to the rgctx.
5045 * Therefore we have to make sure that
5046 * the vtable/mrgctx argument (for
5047 * static or generic methods) or the
5048 * "this" argument (for non-static
5049 * methods) are live.
5051 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5052 mini_method_get_context (method)->method_inst) {
5053 mono_get_vtable_var (cfg);
5055 MonoInst *dummy_use;
5057 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5062 arg_array = alloca (sizeof (MonoInst *) * num_args);
5063 cfg->cbb = start_bblock;
5064 cfg->args = arg_array;
5065 mono_save_args (cfg, sig, inline_args);
5068 /* FIRST CODE BLOCK */
5069 NEW_BBLOCK (cfg, bblock);
5070 bblock->cil_code = ip;
5074 ADD_BBLOCK (cfg, bblock);
5076 if (cfg->method == method) {
5077 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5078 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5079 MONO_INST_NEW (cfg, ins, OP_BREAK);
5080 MONO_ADD_INS (bblock, ins);
5084 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5085 secman = mono_security_manager_get_methods ();
5087 security = (secman && mono_method_has_declsec (method));
5088 /* at this point having security doesn't mean we have any code to generate */
5089 if (security && (cfg->method == method)) {
5090 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5091 * And we do not want to enter the next section (with allocation) if we
5092 * have nothing to generate */
5093 security = mono_declsec_get_demands (method, &actions);
5096 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5097 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5099 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5100 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5101 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5103 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5104 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5108 mono_custom_attrs_free (custom);
5111 custom = mono_custom_attrs_from_class (wrapped->klass);
5112 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5116 mono_custom_attrs_free (custom);
5119 /* not a P/Invoke after all */
5124 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5125 /* we use a separate basic block for the initialization code */
5126 NEW_BBLOCK (cfg, init_localsbb);
5127 cfg->bb_init = init_localsbb;
5128 init_localsbb->real_offset = cfg->real_offset;
5129 start_bblock->next_bb = init_localsbb;
5130 init_localsbb->next_bb = bblock;
5131 link_bblock (cfg, start_bblock, init_localsbb);
5132 link_bblock (cfg, init_localsbb, bblock);
5134 cfg->cbb = init_localsbb;
5136 start_bblock->next_bb = bblock;
5137 link_bblock (cfg, start_bblock, bblock);
5140 /* at this point we know, if security is TRUE, that some code needs to be generated */
5141 if (security && (cfg->method == method)) {
5144 mono_jit_stats.cas_demand_generation++;
5146 if (actions.demand.blob) {
5147 /* Add code for SecurityAction.Demand */
5148 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5149 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5150 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5151 mono_emit_method_call (cfg, secman->demand, args, NULL);
5153 if (actions.noncasdemand.blob) {
5154 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5155 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5156 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5157 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5158 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5159 mono_emit_method_call (cfg, secman->demand, args, NULL);
5161 if (actions.demandchoice.blob) {
5162 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5163 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5164 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5165 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5166 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5170 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5172 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5175 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5176 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5177 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5178 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5179 if (!(method->klass && method->klass->image &&
5180 mono_security_core_clr_is_platform_image (method->klass->image))) {
5181 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5185 if (!method_is_safe (method))
5186 emit_throw_verification_exception (cfg, bblock, ip);
5189 if (header->code_size == 0)
5192 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5197 if (cfg->method == method)
5198 mono_debug_init_method (cfg, bblock, breakpoint_id);
5200 for (n = 0; n < header->num_locals; ++n) {
5201 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5206 /* add a check for this != NULL to inlined methods */
5207 if (is_virtual_call) {
5210 NEW_ARGLOAD (cfg, arg_ins, 0);
5211 MONO_ADD_INS (cfg->cbb, arg_ins);
5212 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5213 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5214 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5217 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5218 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5221 start_new_bblock = 0;
5225 if (cfg->method == method)
5226 cfg->real_offset = ip - header->code;
5228 cfg->real_offset = inline_offset;
5233 if (start_new_bblock) {
5234 bblock->cil_length = ip - bblock->cil_code;
5235 if (start_new_bblock == 2) {
5236 g_assert (ip == tblock->cil_code);
5238 GET_BBLOCK (cfg, tblock, ip);
5240 bblock->next_bb = tblock;
5243 start_new_bblock = 0;
5244 for (i = 0; i < bblock->in_scount; ++i) {
5245 if (cfg->verbose_level > 3)
5246 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5247 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5251 g_slist_free (class_inits);
5254 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5255 link_bblock (cfg, bblock, tblock);
5256 if (sp != stack_start) {
5257 handle_stack_args (cfg, stack_start, sp - stack_start);
5259 CHECK_UNVERIFIABLE (cfg);
5261 bblock->next_bb = tblock;
5264 for (i = 0; i < bblock->in_scount; ++i) {
5265 if (cfg->verbose_level > 3)
5266 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5267 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5270 g_slist_free (class_inits);
5275 bblock->real_offset = cfg->real_offset;
5277 if ((cfg->method == method) && cfg->coverage_info) {
5278 guint32 cil_offset = ip - header->code;
5279 cfg->coverage_info->data [cil_offset].cil_code = ip;
5281 /* TODO: Use an increment here */
5282 #if defined(__i386__)
5283 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5284 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5286 MONO_ADD_INS (cfg->cbb, ins);
5288 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5289 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5293 if (cfg->verbose_level > 3)
5294 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5299 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5301 MONO_ADD_INS (bblock, ins);
5307 CHECK_STACK_OVF (1);
5308 n = (*ip)-CEE_LDARG_0;
5310 EMIT_NEW_ARGLOAD (cfg, ins, n);
5318 CHECK_STACK_OVF (1);
5319 n = (*ip)-CEE_LDLOC_0;
5321 EMIT_NEW_LOCLOAD (cfg, ins, n);
5332 n = (*ip)-CEE_STLOC_0;
5335 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5338 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5339 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5340 /* Optimize reg-reg moves away */
5342 * Can't optimize other opcodes, since sp[0] might point to
5343 * the last ins of a decomposed opcode.
5345 sp [0]->dreg = (cfg)->locals [n]->dreg;
5347 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5355 CHECK_STACK_OVF (1);
5358 EMIT_NEW_ARGLOAD (cfg, ins, n);
5364 CHECK_STACK_OVF (1);
5367 NEW_ARGLOADA (cfg, ins, n);
5368 MONO_ADD_INS (cfg->cbb, ins);
5378 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5380 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5385 CHECK_STACK_OVF (1);
5388 EMIT_NEW_LOCLOAD (cfg, ins, n);
5394 CHECK_STACK_OVF (1);
5395 CHECK_LOCAL (ip [1]);
5398 * ldloca inhibits many optimizations so try to get rid of it in common
5401 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5402 gboolean skip = FALSE;
5404 /* From the INITOBJ case */
5405 token = read32 (ip + 4);
5406 klass = mini_get_class (method, token, generic_context);
5407 CHECK_TYPELOAD (klass);
5408 if (generic_class_is_reference_type (cfg, klass)) {
5409 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5410 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5411 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5412 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5413 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5425 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5433 CHECK_LOCAL (ip [1]);
5434 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5436 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5441 CHECK_STACK_OVF (1);
5442 EMIT_NEW_PCONST (cfg, ins, NULL);
5443 ins->type = STACK_OBJ;
5448 CHECK_STACK_OVF (1);
5449 EMIT_NEW_ICONST (cfg, ins, -1);
5462 CHECK_STACK_OVF (1);
5463 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5469 CHECK_STACK_OVF (1);
5471 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5477 CHECK_STACK_OVF (1);
5478 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5484 CHECK_STACK_OVF (1);
5485 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5486 ins->type = STACK_I8;
5487 ins->dreg = alloc_dreg (cfg, STACK_I8);
5489 ins->inst_l = (gint64)read64 (ip);
5490 MONO_ADD_INS (bblock, ins);
5496 /* FIXME: we should really allocate this only late in the compilation process */
5497 mono_domain_lock (cfg->domain);
5498 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5499 mono_domain_unlock (cfg->domain);
5501 CHECK_STACK_OVF (1);
5502 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5503 ins->type = STACK_R8;
5504 ins->dreg = alloc_dreg (cfg, STACK_R8);
5508 MONO_ADD_INS (bblock, ins);
5516 /* FIXME: we should really allocate this only late in the compilation process */
5517 mono_domain_lock (cfg->domain);
5518 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5519 mono_domain_unlock (cfg->domain);
5521 CHECK_STACK_OVF (1);
5522 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5523 ins->type = STACK_R8;
5524 ins->dreg = alloc_dreg (cfg, STACK_R8);
5528 MONO_ADD_INS (bblock, ins);
5535 MonoInst *temp, *store;
5537 CHECK_STACK_OVF (1);
5541 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5542 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5544 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5547 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5560 if (sp [0]->type == STACK_R8)
5561 /* we need to pop the value from the x86 FP stack */
5562 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5569 if (stack_start != sp)
5571 token = read32 (ip + 1);
5572 /* FIXME: check the signature matches */
5573 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5578 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5579 GENERIC_SHARING_FAILURE (CEE_JMP);
5581 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5582 if (check_linkdemand (cfg, method, cmethod))
5584 CHECK_CFG_EXCEPTION;
5589 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5592 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5594 /* Handle tail calls similarly to calls */
5595 n = fsig->param_count + fsig->hasthis;
5597 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5598 call->method = cmethod;
5599 call->tail_call = TRUE;
5600 call->signature = mono_method_signature (cmethod);
5601 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5602 call->inst.inst_p0 = cmethod;
5603 for (i = 0; i < n; ++i)
5604 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5606 mono_arch_emit_call (cfg, call);
5607 MONO_ADD_INS (bblock, (MonoInst*)call);
5610 for (i = 0; i < num_args; ++i)
5611 /* Prevent arguments from being optimized away */
5612 arg_array [i]->flags |= MONO_INST_VOLATILE;
5614 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5615 ins = (MonoInst*)call;
5616 ins->inst_p0 = cmethod;
5617 MONO_ADD_INS (bblock, ins);
5621 start_new_bblock = 1;
5626 case CEE_CALLVIRT: {
5627 MonoInst *addr = NULL;
5628 MonoMethodSignature *fsig = NULL;
5630 int virtual = *ip == CEE_CALLVIRT;
5631 int calli = *ip == CEE_CALLI;
5632 gboolean pass_imt_from_rgctx = FALSE;
5633 MonoInst *imt_arg = NULL;
5634 gboolean pass_vtable = FALSE;
5635 gboolean pass_mrgctx = FALSE;
5636 MonoInst *vtable_arg = NULL;
5637 gboolean check_this = FALSE;
5640 token = read32 (ip + 1);
5647 if (method->wrapper_type != MONO_WRAPPER_NONE)
5648 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5650 fsig = mono_metadata_parse_signature (image, token);
5652 n = fsig->param_count + fsig->hasthis;
5654 MonoMethod *cil_method;
5656 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5657 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5658 cil_method = cmethod;
5659 } else if (constrained_call) {
5660 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5662 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5663 cil_method = cmethod;
5668 if (!dont_verify && !cfg->skip_visibility) {
5669 MonoMethod *target_method = cil_method;
5670 if (method->is_inflated) {
5671 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5673 if (!mono_method_can_access_method (method_definition, target_method) &&
5674 !mono_method_can_access_method (method, cil_method))
5675 METHOD_ACCESS_FAILURE;
5678 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5679 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5681 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5682 /* MS.NET seems to silently convert this to a callvirt */
5685 if (!cmethod->klass->inited)
5686 if (!mono_class_init (cmethod->klass))
5689 if (mono_method_signature (cmethod)->pinvoke) {
5690 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5691 fsig = mono_method_signature (wrapper);
5692 } else if (constrained_call) {
5693 fsig = mono_method_signature (cmethod);
5695 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5698 mono_save_token_info (cfg, image, token, cmethod);
5700 n = fsig->param_count + fsig->hasthis;
5702 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5703 if (check_linkdemand (cfg, method, cmethod))
5705 CHECK_CFG_EXCEPTION;
5708 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5709 mini_class_is_system_array (cmethod->klass)) {
5710 array_rank = cmethod->klass->rank;
5713 if (cmethod->string_ctor)
5714 g_assert_not_reached ();
5717 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5720 if (!cfg->generic_sharing_context && cmethod)
5721 g_assert (!mono_method_check_context_used (cmethod));
5725 //g_assert (!virtual || fsig->hasthis);
5729 if (constrained_call) {
5731 * We have the `constrained.' prefix opcode.
5733 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5737 * The type parameter is instantiated as a valuetype,
5738 * but that type doesn't override the method we're
5739 * calling, so we need to box `this'.
5741 dreg = alloc_dreg (cfg, STACK_VTYPE);
5742 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5743 ins->klass = constrained_call;
5744 sp [0] = handle_box (cfg, ins, constrained_call);
5745 } else if (!constrained_call->valuetype) {
5746 int dreg = alloc_preg (cfg);
5749 * The type parameter is instantiated as a reference
5750 * type. We have a managed pointer on the stack, so
5751 * we need to dereference it here.
5753 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5754 ins->type = STACK_OBJ;
5756 } else if (cmethod->klass->valuetype)
5758 constrained_call = NULL;
5761 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5765 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5766 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5767 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5768 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5769 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5772 * Pass vtable iff target method might
5773 * be shared, which means that sharing
5774 * is enabled for its class and its
5775 * context is sharable (and it's not a
5778 if (sharing_enabled && context_sharable &&
5779 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5783 if (cmethod && mini_method_get_context (cmethod) &&
5784 mini_method_get_context (cmethod)->method_inst) {
5785 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5786 MonoGenericContext *context = mini_method_get_context (cmethod);
5787 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5789 g_assert (!pass_vtable);
5791 if (sharing_enabled && context_sharable)
5795 if (cfg->generic_sharing_context && cmethod) {
5796 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5798 context_used = mono_method_check_context_used (cmethod);
5800 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5801 /* Generic method interface
5802 calls are resolved via a
5803 helper function and don't
5805 if (!cmethod_context || !cmethod_context->method_inst)
5806 pass_imt_from_rgctx = TRUE;
5810 * If a shared method calls another
5811 * shared method then the caller must
5812 * have a generic sharing context
5813 * because the magic trampoline
5814 * requires it. FIXME: We shouldn't
5815 * have to force the vtable/mrgctx
5816 * variable here. Instead there
5817 * should be a flag in the cfg to
5818 * request a generic sharing context.
5820 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5821 mono_get_vtable_var (cfg);
5828 EMIT_GET_RGCTX (rgctx, context_used);
5829 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5831 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5833 CHECK_TYPELOAD (cmethod->klass);
5834 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5839 g_assert (!vtable_arg);
5844 EMIT_GET_RGCTX (rgctx, context_used);
5845 vtable_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5847 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5850 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5851 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5858 if (pass_imt_from_rgctx) {
5861 g_assert (!pass_vtable);
5864 EMIT_GET_RGCTX (rgctx, context_used);
5865 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5866 MONO_RGCTX_INFO_METHOD);
5872 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5873 check->sreg1 = sp [0]->dreg;
5874 MONO_ADD_INS (cfg->cbb, check);
5877 /* Calling virtual generic methods */
5878 if (cmethod && virtual &&
5879 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5880 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5881 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5882 mono_method_signature (cmethod)->generic_param_count) {
5883 MonoInst *this_temp, *this_arg_temp, *store;
5884 MonoInst *iargs [4];
5886 g_assert (mono_method_signature (cmethod)->is_inflated);
5888 /* Prevent inlining of methods that contain indirect calls */
5891 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5892 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5893 MONO_ADD_INS (bblock, store);
5895 /* FIXME: This should be a managed pointer */
5896 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5898 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5902 EMIT_GET_RGCTX (rgctx, context_used);
5903 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5904 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5905 addr = mono_emit_jit_icall (cfg,
5906 mono_helper_compile_generic_method, iargs);
5908 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5909 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5910 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5913 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5915 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5916 if (!MONO_TYPE_IS_VOID (fsig->ret))
5925 /* FIXME: runtime generic context pointer for jumps? */
5926 /* FIXME: handle this for generic sharing eventually */
5927 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5928 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5931 /* FIXME: runtime generic context pointer for jumps? */
5932 GENERIC_SHARING_FAILURE (*ip);
5934 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5937 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5938 call->tail_call = TRUE;
5939 call->method = cmethod;
5940 call->signature = mono_method_signature (cmethod);
5943 /* Handle tail calls similarly to calls */
5944 call->inst.opcode = OP_TAILCALL;
5946 mono_arch_emit_call (cfg, call);
5949 * We implement tail calls by storing the actual arguments into the
5950 * argument variables, then emitting a CEE_JMP.
5952 for (i = 0; i < n; ++i) {
5953 /* Prevent argument from being register allocated */
5954 arg_array [i]->flags |= MONO_INST_VOLATILE;
5955 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5959 ins = (MonoInst*)call;
5960 ins->inst_p0 = cmethod;
5961 ins->inst_p1 = arg_array [0];
5962 MONO_ADD_INS (bblock, ins);
5963 link_bblock (cfg, bblock, end_bblock);
5964 start_new_bblock = 1;
5965 /* skip CEE_RET as well */
5971 /* Conversion to a JIT intrinsic */
5972 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5973 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5974 type_to_eval_stack_type ((cfg), fsig->ret, ins);
5985 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
5986 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
5987 mono_method_check_inlining (cfg, cmethod) &&
5988 !g_list_find (dont_inline, cmethod)) {
5990 gboolean allways = FALSE;
5992 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5993 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5994 /* Prevent inlining of methods that call wrappers */
5996 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6000 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6002 cfg->real_offset += 5;
6005 if (!MONO_TYPE_IS_VOID (fsig->ret))
6006 /* *sp is already set by inline_method */
6009 inline_costs += costs;
6015 inline_costs += 10 * num_calls++;
6017 /* Tail recursion elimination */
6018 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6019 gboolean has_vtargs = FALSE;
6022 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6025 /* keep it simple */
6026 for (i = fsig->param_count - 1; i >= 0; i--) {
6027 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6032 for (i = 0; i < n; ++i)
6033 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6034 MONO_INST_NEW (cfg, ins, OP_BR);
6035 MONO_ADD_INS (bblock, ins);
6036 tblock = start_bblock->out_bb [0];
6037 link_bblock (cfg, bblock, tblock);
6038 ins->inst_target_bb = tblock;
6039 start_new_bblock = 1;
6041 /* skip the CEE_RET, too */
6042 if (ip_in_bb (cfg, bblock, ip + 5))
6052 /* Generic sharing */
6053 /* FIXME: only do this for generic methods if
6054 they are not shared! */
6056 (cmethod->klass->valuetype ||
6057 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6058 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6059 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6060 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6061 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6062 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6067 g_assert (cfg->generic_sharing_context && cmethod);
6071 * We are compiling a call to a
6072 * generic method from shared code,
6073 * which means that we have to look up
6074 * the method in the rgctx and do an
6078 EMIT_GET_RGCTX (rgctx, context_used);
6079 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6082 /* Indirect calls */
6084 g_assert (!imt_arg);
6086 if (*ip == CEE_CALL)
6087 g_assert (context_used);
6088 else if (*ip == CEE_CALLI)
6089 g_assert (!vtable_arg);
6091 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6092 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6094 /* Prevent inlining of methods with indirect calls */
6098 #ifdef MONO_ARCH_RGCTX_REG
6100 int rgctx_reg = mono_alloc_preg (cfg);
6102 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6103 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6104 call = (MonoCallInst*)ins;
6105 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6106 cfg->uses_rgctx_reg = TRUE;
6111 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6113 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6114 if (fsig->pinvoke && !fsig->ret->byref) {
6118 * Native code might return non register sized integers
6119 * without initializing the upper bits.
6121 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6122 case OP_LOADI1_MEMBASE:
6123 widen_op = OP_ICONV_TO_I1;
6125 case OP_LOADU1_MEMBASE:
6126 widen_op = OP_ICONV_TO_U1;
6128 case OP_LOADI2_MEMBASE:
6129 widen_op = OP_ICONV_TO_I2;
6131 case OP_LOADU2_MEMBASE:
6132 widen_op = OP_ICONV_TO_U2;
6138 if (widen_op != -1) {
6139 int dreg = alloc_preg (cfg);
6142 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6143 widen->type = ins->type;
6160 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6161 if (sp [fsig->param_count]->type == STACK_OBJ) {
6162 MonoInst *iargs [2];
6165 iargs [1] = sp [fsig->param_count];
6167 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6170 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6172 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6173 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6178 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6179 if (!cmethod->klass->element_class->valuetype && !readonly)
6180 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6183 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6186 g_assert_not_reached ();
6194 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6196 if (!MONO_TYPE_IS_VOID (fsig->ret))
6207 #ifdef MONO_ARCH_RGCTX_REG
6209 int rgctx_reg = mono_alloc_preg (cfg);
6211 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6212 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6213 call = (MonoCallInst*)ins;
6214 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6215 cfg->uses_rgctx_reg = TRUE;
6219 } else if (imt_arg) {
6220 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6222 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6225 if (!MONO_TYPE_IS_VOID (fsig->ret))
6233 if (cfg->method != method) {
6234 /* return from inlined method */
6236 * If in_count == 0, that means the ret is unreachable due to
6237 * being preceeded by a throw. In that case, inline_method () will
6238 * handle setting the return value
6239 * (test case: test_0_inline_throw ()).
6241 if (return_var && cfg->cbb->in_count) {
6245 //g_assert (returnvar != -1);
6246 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6247 cfg->ret_var_set = TRUE;
6251 MonoType *ret_type = mono_method_signature (method)->ret;
6253 g_assert (!return_var);
6256 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6259 if (!cfg->vret_addr) {
6262 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6264 EMIT_NEW_RETLOADA (cfg, ret_addr);
6266 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6267 ins->klass = mono_class_from_mono_type (ret_type);
6270 #ifdef MONO_ARCH_SOFT_FLOAT
6271 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6272 MonoInst *iargs [1];
6276 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6277 mono_arch_emit_setret (cfg, method, conv);
6279 mono_arch_emit_setret (cfg, method, *sp);
6282 mono_arch_emit_setret (cfg, method, *sp);
6287 if (sp != stack_start)
6289 MONO_INST_NEW (cfg, ins, OP_BR);
6291 ins->inst_target_bb = end_bblock;
6292 MONO_ADD_INS (bblock, ins);
6293 link_bblock (cfg, bblock, end_bblock);
6294 start_new_bblock = 1;
6298 MONO_INST_NEW (cfg, ins, OP_BR);
6300 target = ip + 1 + (signed char)(*ip);
6302 GET_BBLOCK (cfg, tblock, target);
6303 link_bblock (cfg, bblock, tblock);
6304 ins->inst_target_bb = tblock;
6305 if (sp != stack_start) {
6306 handle_stack_args (cfg, stack_start, sp - stack_start);
6308 CHECK_UNVERIFIABLE (cfg);
6310 MONO_ADD_INS (bblock, ins);
6311 start_new_bblock = 1;
6312 inline_costs += BRANCH_COST;
6326 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6328 target = ip + 1 + *(signed char*)ip;
6334 inline_costs += BRANCH_COST;
6338 MONO_INST_NEW (cfg, ins, OP_BR);
6341 target = ip + 4 + (gint32)read32(ip);
6343 GET_BBLOCK (cfg, tblock, target);
6344 link_bblock (cfg, bblock, tblock);
6345 ins->inst_target_bb = tblock;
6346 if (sp != stack_start) {
6347 handle_stack_args (cfg, stack_start, sp - stack_start);
6349 CHECK_UNVERIFIABLE (cfg);
6352 MONO_ADD_INS (bblock, ins);
6354 start_new_bblock = 1;
6355 inline_costs += BRANCH_COST;
6362 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6363 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6364 guint32 opsize = is_short ? 1 : 4;
6366 CHECK_OPSIZE (opsize);
6368 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6371 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6376 GET_BBLOCK (cfg, tblock, target);
6377 link_bblock (cfg, bblock, tblock);
6378 GET_BBLOCK (cfg, tblock, ip);
6379 link_bblock (cfg, bblock, tblock);
6381 if (sp != stack_start) {
6382 handle_stack_args (cfg, stack_start, sp - stack_start);
6383 CHECK_UNVERIFIABLE (cfg);
6386 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6387 cmp->sreg1 = sp [0]->dreg;
6388 type_from_op (cmp, sp [0], NULL);
6391 #if SIZEOF_VOID_P == 4
6392 if (cmp->opcode == OP_LCOMPARE_IMM) {
6393 /* Convert it to OP_LCOMPARE */
6394 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6395 ins->type = STACK_I8;
6396 ins->dreg = alloc_dreg (cfg, STACK_I8);
6398 MONO_ADD_INS (bblock, ins);
6399 cmp->opcode = OP_LCOMPARE;
6400 cmp->sreg2 = ins->dreg;
6403 MONO_ADD_INS (bblock, cmp);
6405 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6406 type_from_op (ins, sp [0], NULL);
6407 MONO_ADD_INS (bblock, ins);
6408 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6409 GET_BBLOCK (cfg, tblock, target);
6410 ins->inst_true_bb = tblock;
6411 GET_BBLOCK (cfg, tblock, ip);
6412 ins->inst_false_bb = tblock;
6413 start_new_bblock = 2;
6416 inline_costs += BRANCH_COST;
6431 MONO_INST_NEW (cfg, ins, *ip);
6433 target = ip + 4 + (gint32)read32(ip);
6439 inline_costs += BRANCH_COST;
6443 MonoBasicBlock **targets;
6444 MonoBasicBlock *default_bblock;
6445 MonoJumpInfoBBTable *table;
6447 int offset_reg = alloc_preg (cfg);
6448 int target_reg = alloc_preg (cfg);
6449 int table_reg = alloc_preg (cfg);
6450 int sum_reg = alloc_preg (cfg);
6455 n = read32 (ip + 1);
6458 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6462 CHECK_OPSIZE (n * sizeof (guint32));
6463 target = ip + n * sizeof (guint32);
6465 GET_BBLOCK (cfg, default_bblock, target);
6467 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6468 for (i = 0; i < n; ++i) {
6469 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6470 targets [i] = tblock;
6474 if (sp != stack_start) {
6476 * Link the current bb with the targets as well, so handle_stack_args
6477 * will set their in_stack correctly.
6479 link_bblock (cfg, bblock, default_bblock);
6480 for (i = 0; i < n; ++i)
6481 link_bblock (cfg, bblock, targets [i]);
6483 handle_stack_args (cfg, stack_start, sp - stack_start);
6485 CHECK_UNVERIFIABLE (cfg);
6488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6492 for (i = 0; i < n; ++i)
6493 link_bblock (cfg, bblock, targets [i]);
6495 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6496 table->table = targets;
6497 table->table_size = n;
6500 /* ARM implements SWITCH statements differently */
6501 /* FIXME: Make it use the generic implementation */
6502 /* the backend code will deal with aot vs normal case */
6503 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6504 ins->sreg1 = src1->dreg;
6505 ins->inst_p0 = table;
6506 ins->inst_many_bb = targets;
6507 ins->klass = GUINT_TO_POINTER (n);
6508 MONO_ADD_INS (cfg->cbb, ins);
6510 if (sizeof (gpointer) == 8)
6511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6515 #if SIZEOF_VOID_P == 8
6516 /* The upper word might not be zero, and we add it to a 64 bit address later */
6517 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6520 if (cfg->compile_aot) {
6521 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6523 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6524 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6525 ins->inst_p0 = table;
6526 ins->dreg = table_reg;
6527 MONO_ADD_INS (cfg->cbb, ins);
6530 /* FIXME: Use load_memindex */
6531 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6533 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6535 start_new_bblock = 1;
6536 inline_costs += (BRANCH_COST * 2);
6556 dreg = alloc_freg (cfg);
6559 dreg = alloc_lreg (cfg);
6562 dreg = alloc_preg (cfg);
6565 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6566 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6567 ins->flags |= ins_flag;
6569 MONO_ADD_INS (bblock, ins);
6584 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6585 ins->flags |= ins_flag;
6587 MONO_ADD_INS (bblock, ins);
6595 MONO_INST_NEW (cfg, ins, (*ip));
6597 ins->sreg1 = sp [0]->dreg;
6598 ins->sreg2 = sp [1]->dreg;
6599 type_from_op (ins, sp [0], sp [1]);
6601 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6603 /* Use the immediate opcodes if possible */
6604 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6605 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6606 if (imm_opcode != -1) {
6607 ins->opcode = imm_opcode;
6608 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6611 sp [1]->opcode = OP_NOP;
6615 MONO_ADD_INS ((cfg)->cbb, (ins));
6618 mono_decompose_opcode (cfg, ins);
6635 MONO_INST_NEW (cfg, ins, (*ip));
6637 ins->sreg1 = sp [0]->dreg;
6638 ins->sreg2 = sp [1]->dreg;
6639 type_from_op (ins, sp [0], sp [1]);
6641 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6642 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6644 /* FIXME: Pass opcode to is_inst_imm */
6646 /* Use the immediate opcodes if possible */
6647 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6650 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6651 if (imm_opcode != -1) {
6652 ins->opcode = imm_opcode;
6653 if (sp [1]->opcode == OP_I8CONST) {
6654 #if SIZEOF_VOID_P == 8
6655 ins->inst_imm = sp [1]->inst_l;
6657 ins->inst_ls_word = sp [1]->inst_ls_word;
6658 ins->inst_ms_word = sp [1]->inst_ms_word;
6662 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6665 sp [1]->opcode = OP_NOP;
6668 MONO_ADD_INS ((cfg)->cbb, (ins));
6671 mono_decompose_opcode (cfg, ins);
6684 case CEE_CONV_OVF_I8:
6685 case CEE_CONV_OVF_U8:
6689 /* Special case this earlier so we have long constants in the IR */
6690 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6691 int data = sp [-1]->inst_c0;
6692 sp [-1]->opcode = OP_I8CONST;
6693 sp [-1]->type = STACK_I8;
6694 #if SIZEOF_VOID_P == 8
6695 if ((*ip) == CEE_CONV_U8)
6696 sp [-1]->inst_c0 = (guint32)data;
6698 sp [-1]->inst_c0 = data;
6700 sp [-1]->inst_ls_word = data;
6701 if ((*ip) == CEE_CONV_U8)
6702 sp [-1]->inst_ms_word = 0;
6704 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6706 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6713 case CEE_CONV_OVF_I4:
6714 case CEE_CONV_OVF_I1:
6715 case CEE_CONV_OVF_I2:
6716 case CEE_CONV_OVF_I:
6717 case CEE_CONV_OVF_U:
6720 if (sp [-1]->type == STACK_R8) {
6721 ADD_UNOP (CEE_CONV_OVF_I8);
6728 case CEE_CONV_OVF_U1:
6729 case CEE_CONV_OVF_U2:
6730 case CEE_CONV_OVF_U4:
6733 if (sp [-1]->type == STACK_R8) {
6734 ADD_UNOP (CEE_CONV_OVF_U8);
6741 case CEE_CONV_OVF_I1_UN:
6742 case CEE_CONV_OVF_I2_UN:
6743 case CEE_CONV_OVF_I4_UN:
6744 case CEE_CONV_OVF_I8_UN:
6745 case CEE_CONV_OVF_U1_UN:
6746 case CEE_CONV_OVF_U2_UN:
6747 case CEE_CONV_OVF_U4_UN:
6748 case CEE_CONV_OVF_U8_UN:
6749 case CEE_CONV_OVF_I_UN:
6750 case CEE_CONV_OVF_U_UN:
6760 case CEE_ADD_OVF_UN:
6762 case CEE_MUL_OVF_UN:
6764 case CEE_SUB_OVF_UN:
6772 token = read32 (ip + 1);
6773 klass = mini_get_class (method, token, generic_context);
6774 CHECK_TYPELOAD (klass);
6776 if (generic_class_is_reference_type (cfg, klass)) {
6777 MonoInst *store, *load;
6778 int dreg = alloc_preg (cfg);
6780 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6781 load->flags |= ins_flag;
6782 MONO_ADD_INS (cfg->cbb, load);
6784 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6785 store->flags |= ins_flag;
6786 MONO_ADD_INS (cfg->cbb, store);
6788 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6800 token = read32 (ip + 1);
6801 klass = mini_get_class (method, token, generic_context);
6802 CHECK_TYPELOAD (klass);
6804 /* Optimize the common ldobj+stloc combination */
6814 loc_index = ip [5] - CEE_STLOC_0;
6821 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6822 CHECK_LOCAL (loc_index);
6824 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6825 ins->dreg = cfg->locals [loc_index]->dreg;
6831 /* Optimize the ldobj+stobj combination */
6832 /* The reference case ends up being a load+store anyway */
6833 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6838 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6845 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6854 CHECK_STACK_OVF (1);
6856 n = read32 (ip + 1);
6858 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6859 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6860 ins->type = STACK_OBJ;
6863 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6864 MonoInst *iargs [1];
6866 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6867 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6869 if (cfg->opt & MONO_OPT_SHARED) {
6870 MonoInst *iargs [3];
6872 if (cfg->compile_aot) {
6873 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6875 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6876 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6877 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6878 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6879 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6881 if (bblock->out_of_line) {
6882 MonoInst *iargs [2];
6884 if (cfg->method->klass->image == mono_defaults.corlib) {
6886 * Avoid relocations in AOT and save some space by using a
6887 * version of helper_ldstr specialized to mscorlib.
6889 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6890 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6892 /* Avoid creating the string object */
6893 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6894 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6895 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6899 if (cfg->compile_aot) {
6900 NEW_LDSTRCONST (cfg, ins, image, n);
6902 MONO_ADD_INS (bblock, ins);
6905 NEW_PCONST (cfg, ins, NULL);
6906 ins->type = STACK_OBJ;
6907 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6909 MONO_ADD_INS (bblock, ins);
6918 MonoInst *iargs [2];
6919 MonoMethodSignature *fsig;
6924 token = read32 (ip + 1);
6925 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6928 fsig = mono_method_get_signature (cmethod, image, token);
6930 mono_save_token_info (cfg, image, token, cmethod);
6932 if (!mono_class_init (cmethod->klass))
6935 if (cfg->generic_sharing_context)
6936 context_used = mono_method_check_context_used (cmethod);
6938 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6939 if (check_linkdemand (cfg, method, cmethod))
6941 CHECK_CFG_EXCEPTION;
6942 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6943 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6946 n = fsig->param_count;
6950 * Generate smaller code for the common newobj <exception> instruction in
6951 * argument checking code.
6953 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6954 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6955 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6956 MonoInst *iargs [3];
6960 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6963 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6967 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6972 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6975 g_assert_not_reached ();
6983 /* move the args to allow room for 'this' in the first position */
6989 /* check_call_signature () requires sp[0] to be set */
6990 this_ins.type = STACK_OBJ;
6992 if (check_call_signature (cfg, fsig, sp))
6997 if (mini_class_is_system_array (cmethod->klass)) {
6998 g_assert (!context_used);
6999 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7001 /* Avoid varargs in the common case */
7002 if (fsig->param_count == 1)
7003 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7004 else if (fsig->param_count == 2)
7005 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7007 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7008 } else if (cmethod->string_ctor) {
7009 g_assert (!context_used);
7010 /* we simply pass a null pointer */
7011 EMIT_NEW_PCONST (cfg, *sp, NULL);
7012 /* now call the string ctor */
7013 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7015 MonoInst* callvirt_this_arg = NULL;
7017 if (cmethod->klass->valuetype) {
7018 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7019 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7020 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7025 * The code generated by mini_emit_virtual_call () expects
7026 * iargs [0] to be a boxed instance, but luckily the vcall
7027 * will be transformed into a normal call there.
7029 } else if (context_used) {
7030 MonoInst *rgctx, *data;
7033 EMIT_GET_RGCTX (rgctx, context_used);
7034 if (cfg->opt & MONO_OPT_SHARED)
7035 rgctx_info = MONO_RGCTX_INFO_KLASS;
7037 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7038 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7040 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7043 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7045 CHECK_TYPELOAD (cmethod->klass);
7048 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7049 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7050 * As a workaround, we call class cctors before allocating objects.
7052 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7053 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7054 if (cfg->verbose_level > 2)
7055 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7056 class_inits = g_slist_prepend (class_inits, vtable);
7059 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7064 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7066 /* Now call the actual ctor */
7067 /* Avoid virtual calls to ctors if possible */
7068 if (cmethod->klass->marshalbyref)
7069 callvirt_this_arg = sp [0];
7071 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7072 mono_method_check_inlining (cfg, cmethod) &&
7073 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7074 !g_list_find (dont_inline, cmethod)) {
7077 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7078 cfg->real_offset += 5;
7081 inline_costs += costs - 5;
7084 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7086 } else if (context_used &&
7087 (cmethod->klass->valuetype ||
7088 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7089 MonoInst *rgctx, *cmethod_addr;
7091 g_assert (!callvirt_this_arg);
7093 EMIT_GET_RGCTX (rgctx, context_used);
7094 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7095 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7097 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7100 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7104 if (alloc == NULL) {
7106 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7107 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7121 token = read32 (ip + 1);
7122 klass = mini_get_class (method, token, generic_context);
7123 CHECK_TYPELOAD (klass);
7124 if (sp [0]->type != STACK_OBJ)
7127 if (cfg->generic_sharing_context)
7128 context_used = mono_class_check_context_used (klass);
7131 MonoInst *rgctx, *args [2];
7133 g_assert (!method->klass->valuetype);
7139 EMIT_GET_RGCTX (rgctx, context_used);
7140 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7141 MONO_RGCTX_INFO_KLASS);
7143 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7147 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7148 MonoMethod *mono_castclass;
7149 MonoInst *iargs [1];
7152 mono_castclass = mono_marshal_get_castclass (klass);
7155 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7156 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7157 g_assert (costs > 0);
7160 cfg->real_offset += 5;
7165 inline_costs += costs;
7168 ins = handle_castclass (cfg, klass, *sp);
7178 token = read32 (ip + 1);
7179 klass = mini_get_class (method, token, generic_context);
7180 CHECK_TYPELOAD (klass);
7181 if (sp [0]->type != STACK_OBJ)
7184 if (cfg->generic_sharing_context)
7185 context_used = mono_class_check_context_used (klass);
7188 MonoInst *rgctx, *args [2];
7194 EMIT_GET_RGCTX (rgctx, context_used);
7195 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7197 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7201 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7202 MonoMethod *mono_isinst;
7203 MonoInst *iargs [1];
7206 mono_isinst = mono_marshal_get_isinst (klass);
7209 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7210 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7211 g_assert (costs > 0);
7214 cfg->real_offset += 5;
7219 inline_costs += costs;
7222 ins = handle_isinst (cfg, klass, *sp);
7229 case CEE_UNBOX_ANY: {
7230 MonoInst *rgctx = NULL;
7235 token = read32 (ip + 1);
7236 klass = mini_get_class (method, token, generic_context);
7237 CHECK_TYPELOAD (klass);
7239 mono_save_token_info (cfg, image, token, klass);
7241 if (cfg->generic_sharing_context)
7242 context_used = mono_class_check_context_used (klass);
7244 if (generic_class_is_reference_type (cfg, klass)) {
7247 MonoInst *iargs [2];
7249 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
7254 EMIT_GET_RGCTX (rgctx, context_used);
7255 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7256 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7260 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7261 MonoMethod *mono_castclass;
7262 MonoInst *iargs [1];
7265 mono_castclass = mono_marshal_get_castclass (klass);
7268 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7269 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7271 g_assert (costs > 0);
7274 cfg->real_offset += 5;
7278 inline_costs += costs;
7280 ins = handle_castclass (cfg, klass, *sp);
7289 EMIT_GET_RGCTX (rgctx, context_used);
7291 if (mono_class_is_nullable (klass)) {
7292 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7299 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7318 token = read32 (ip + 1);
7319 klass = mini_get_class (method, token, generic_context);
7320 CHECK_TYPELOAD (klass);
7322 mono_save_token_info (cfg, image, token, klass);
7324 if (cfg->generic_sharing_context)
7325 context_used = mono_class_check_context_used (klass);
7327 if (generic_class_is_reference_type (cfg, klass)) {
7333 if (klass == mono_defaults.void_class)
7335 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7337 /* frequent check in generic code: box (struct), brtrue */
7338 if (!mono_class_is_nullable (klass) &&
7339 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7340 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7342 MONO_INST_NEW (cfg, ins, OP_BR);
7343 if (*ip == CEE_BRTRUE_S) {
7346 target = ip + 1 + (signed char)(*ip);
7351 target = ip + 4 + (gint)(read32 (ip));
7354 GET_BBLOCK (cfg, tblock, target);
7355 link_bblock (cfg, bblock, tblock);
7356 ins->inst_target_bb = tblock;
7357 GET_BBLOCK (cfg, tblock, ip);
7359 * This leads to some inconsistency, since the two bblocks are not
7360 * really connected, but it is needed for handling stack arguments
7361 * correct (See test_0_box_brtrue_opt_regress_81102).
7363 link_bblock (cfg, bblock, tblock);
7364 if (sp != stack_start) {
7365 handle_stack_args (cfg, stack_start, sp - stack_start);
7367 CHECK_UNVERIFIABLE (cfg);
7369 MONO_ADD_INS (bblock, ins);
7370 start_new_bblock = 1;
7379 EMIT_GET_RGCTX (rgctx, context_used);
7380 if (cfg->opt & MONO_OPT_SHARED)
7381 rgctx_info = MONO_RGCTX_INFO_KLASS;
7383 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7384 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7385 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, rgctx, data);
7387 *sp++ = handle_box (cfg, val, klass);
7395 MonoInst *rgctx = NULL;
7400 token = read32 (ip + 1);
7401 klass = mini_get_class (method, token, generic_context);
7402 CHECK_TYPELOAD (klass);
7404 mono_save_token_info (cfg, image, token, klass);
7406 if (cfg->generic_sharing_context)
7407 context_used = mono_class_check_context_used (klass);
7410 EMIT_GET_RGCTX (rgctx, context_used);
7412 if (mono_class_is_nullable (klass)) {
7415 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7416 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7420 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7430 MonoClassField *field;
7434 if (*ip == CEE_STFLD) {
7441 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7443 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7446 token = read32 (ip + 1);
7447 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7448 field = mono_method_get_wrapper_data (method, token);
7449 klass = field->parent;
7452 field = mono_field_from_token (image, token, &klass, generic_context);
7456 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7457 FIELD_ACCESS_FAILURE;
7458 mono_class_init (klass);
7460 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7461 if (*ip == CEE_STFLD) {
7462 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7464 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7465 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7466 MonoInst *iargs [5];
7469 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7470 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7471 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7475 if (cfg->opt & MONO_OPT_INLINE) {
7476 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7477 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7478 g_assert (costs > 0);
7481 cfg->real_offset += 5;
7484 inline_costs += costs;
7487 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7492 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7494 store->flags |= ins_flag;
7501 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7502 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7503 MonoInst *iargs [4];
7506 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7507 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7508 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7509 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7510 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7511 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7513 g_assert (costs > 0);
7516 cfg->real_offset += 5;
7520 inline_costs += costs;
7523 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7527 if (sp [0]->type == STACK_VTYPE) {
7530 /* Have to compute the address of the variable */
7532 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7534 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7536 g_assert (var->klass == klass);
7538 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7542 if (*ip == CEE_LDFLDA) {
7543 dreg = alloc_preg (cfg);
7545 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7546 ins->klass = mono_class_from_mono_type (field->type);
7547 ins->type = STACK_MP;
7552 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7553 load->flags |= ins_flag;
7564 MonoClassField *field;
7565 gpointer addr = NULL;
7566 gboolean is_special_static;
7569 token = read32 (ip + 1);
7571 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7572 field = mono_method_get_wrapper_data (method, token);
7573 klass = field->parent;
7576 field = mono_field_from_token (image, token, &klass, generic_context);
7579 mono_class_init (klass);
7580 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7581 FIELD_ACCESS_FAILURE;
7584 * We can only support shared generic static
7585 * field access on architectures where the
7586 * trampoline code has been extended to handle
7587 * the generic class init.
7589 #ifndef MONO_ARCH_VTABLE_REG
7590 GENERIC_SHARING_FAILURE (*ip);
7593 if (cfg->generic_sharing_context)
7594 context_used = mono_class_check_context_used (klass);
7596 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7598 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7599 * to be called here.
7601 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7602 mono_class_vtable (cfg->domain, klass);
7603 CHECK_TYPELOAD (klass);
7605 mono_domain_lock (cfg->domain);
7606 if (cfg->domain->special_static_fields)
7607 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7608 mono_domain_unlock (cfg->domain);
7610 is_special_static = mono_class_field_is_special_static (field);
7612 /* Generate IR to compute the field address */
7614 if ((cfg->opt & MONO_OPT_SHARED) ||
7615 (cfg->compile_aot && is_special_static) ||
7616 (context_used && is_special_static)) {
7617 MonoInst *iargs [2];
7619 g_assert (field->parent);
7620 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7624 EMIT_GET_RGCTX (rgctx, context_used);
7625 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7627 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7629 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7630 } else if (context_used) {
7631 MonoInst *rgctx, *static_data;
7634 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7635 method->klass->name_space, method->klass->name, method->name,
7636 depth, field->offset);
7639 if (mono_class_needs_cctor_run (klass, method)) {
7641 MonoInst *vtable, *rgctx;
7643 EMIT_GET_RGCTX (rgctx, context_used);
7644 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7646 // FIXME: This doesn't work since it tries to pass the argument
7647 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7649 * The vtable pointer is always passed in a register regardless of
7650 * the calling convention, so assign it manually, and make a call
7651 * using a signature without parameters.
7653 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7654 #ifdef MONO_ARCH_VTABLE_REG
7655 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7656 cfg->uses_vtable_reg = TRUE;
7663 * The pointer we're computing here is
7665 * super_info.static_data + field->offset
7667 EMIT_GET_RGCTX (rgctx, context_used);
7668 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7670 if (field->offset == 0) {
7673 int addr_reg = mono_alloc_preg (cfg);
7674 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7676 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7677 MonoInst *iargs [2];
7679 g_assert (field->parent);
7680 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7681 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7682 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7684 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7686 CHECK_TYPELOAD (klass);
7688 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7689 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7690 if (cfg->verbose_level > 2)
7691 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7692 class_inits = g_slist_prepend (class_inits, vtable);
7694 if (cfg->run_cctors) {
7696 /* This makes so that inline cannot trigger */
7697 /* .cctors: too many apps depend on them */
7698 /* running with a specific order... */
7699 if (! vtable->initialized)
7701 ex = mono_runtime_class_init_full (vtable, FALSE);
7703 set_exception_object (cfg, ex);
7704 goto exception_exit;
7708 addr = (char*)vtable->data + field->offset;
7710 if (cfg->compile_aot)
7711 EMIT_NEW_SFLDACONST (cfg, ins, field);
7713 EMIT_NEW_PCONST (cfg, ins, addr);
7716 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7717 * This could be later optimized to do just a couple of
7718 * memory dereferences with constant offsets.
7720 MonoInst *iargs [1];
7721 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7722 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7726 /* Generate IR to do the actual load/store operation */
7728 if (*ip == CEE_LDSFLDA) {
7729 ins->klass = mono_class_from_mono_type (field->type);
7731 } else if (*ip == CEE_STSFLD) {
7736 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7737 store->flags |= ins_flag;
7740 gboolean is_const = FALSE;
7741 MonoVTable *vtable = NULL;
7743 if (!context_used) {
7744 vtable = mono_class_vtable (cfg->domain, klass);
7745 CHECK_TYPELOAD (klass);
7747 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7748 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7749 gpointer addr = (char*)vtable->data + field->offset;
7750 int ro_type = field->type->type;
7751 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7752 ro_type = field->type->data.klass->enum_basetype->type;
7754 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7757 case MONO_TYPE_BOOLEAN:
7759 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7763 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7766 case MONO_TYPE_CHAR:
7768 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7772 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7777 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7781 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7786 case MONO_TYPE_STRING:
7787 case MONO_TYPE_OBJECT:
7788 case MONO_TYPE_CLASS:
7789 case MONO_TYPE_SZARRAY:
7791 case MONO_TYPE_FNPTR:
7792 case MONO_TYPE_ARRAY:
7793 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7794 type_to_eval_stack_type ((cfg), field->type, *sp);
7799 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7804 case MONO_TYPE_VALUETYPE:
7814 CHECK_STACK_OVF (1);
7816 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7817 load->flags |= ins_flag;
7829 token = read32 (ip + 1);
7830 klass = mini_get_class (method, token, generic_context);
7831 CHECK_TYPELOAD (klass);
7832 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7844 const char *data_ptr;
7851 token = read32 (ip + 1);
7853 klass = mini_get_class (method, token, generic_context);
7854 CHECK_TYPELOAD (klass);
7856 if (cfg->generic_sharing_context)
7857 context_used = mono_class_check_context_used (klass);
7863 /* FIXME: Decompose later to help abcrem */
7866 EMIT_GET_RGCTX (rgctx, context_used);
7867 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7872 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7874 if (cfg->opt & MONO_OPT_SHARED) {
7875 /* Decompose now to avoid problems with references to the domainvar */
7876 MonoInst *iargs [3];
7878 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7879 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7882 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7884 /* Decompose later since it is needed by abcrem */
7885 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7886 ins->dreg = alloc_preg (cfg);
7887 ins->sreg1 = sp [0]->dreg;
7888 ins->inst_newa_class = klass;
7889 ins->type = STACK_OBJ;
7891 MONO_ADD_INS (cfg->cbb, ins);
7892 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7893 cfg->cbb->has_array_access = TRUE;
7895 /* Needed so mono_emit_load_get_addr () gets called */
7896 mono_get_got_var (cfg);
7906 * we inline/optimize the initialization sequence if possible.
7907 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7908 * for small sizes open code the memcpy
7909 * ensure the rva field is big enough
7911 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7912 MonoMethod *memcpy_method = get_memcpy_method ();
7913 MonoInst *iargs [3];
7914 int add_reg = alloc_preg (cfg);
7916 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7917 if (cfg->compile_aot) {
7918 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7920 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7922 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7923 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
7932 if (sp [0]->type != STACK_OBJ)
7935 dreg = alloc_preg (cfg);
7936 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7937 ins->dreg = alloc_preg (cfg);
7938 ins->sreg1 = sp [0]->dreg;
7939 ins->type = STACK_I4;
7940 MONO_ADD_INS (cfg->cbb, ins);
7941 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7942 cfg->cbb->has_array_access = TRUE;
7950 if (sp [0]->type != STACK_OBJ)
7953 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7955 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7956 CHECK_TYPELOAD (klass);
7957 /* we need to make sure that this array is exactly the type it needs
7958 * to be for correctness. the wrappers are lax with their usage
7959 * so we need to ignore them here
7961 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7962 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7965 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7969 case CEE_LDELEM_ANY:
7980 case CEE_LDELEM_REF: {
7986 if (*ip == CEE_LDELEM_ANY) {
7988 token = read32 (ip + 1);
7989 klass = mini_get_class (method, token, generic_context);
7990 CHECK_TYPELOAD (klass);
7991 mono_class_init (klass);
7994 klass = array_access_to_klass (*ip);
7996 if (sp [0]->type != STACK_OBJ)
7999 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8001 if (sp [1]->opcode == OP_ICONST) {
8002 int array_reg = sp [0]->dreg;
8003 int index_reg = sp [1]->dreg;
8004 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8006 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8007 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8009 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8010 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8013 if (*ip == CEE_LDELEM_ANY)
8026 case CEE_STELEM_REF:
8027 case CEE_STELEM_ANY: {
8033 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8035 if (*ip == CEE_STELEM_ANY) {
8037 token = read32 (ip + 1);
8038 klass = mini_get_class (method, token, generic_context);
8039 CHECK_TYPELOAD (klass);
8040 mono_class_init (klass);
8043 klass = array_access_to_klass (*ip);
8045 if (sp [0]->type != STACK_OBJ)
8048 /* storing a NULL doesn't need any of the complex checks in stelemref */
8049 if (generic_class_is_reference_type (cfg, klass) &&
8050 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8051 MonoMethod* helper = mono_marshal_get_stelemref ();
8052 MonoInst *iargs [3];
8054 if (sp [0]->type != STACK_OBJ)
8056 if (sp [2]->type != STACK_OBJ)
8063 mono_emit_method_call (cfg, helper, iargs, NULL);
8065 if (sp [1]->opcode == OP_ICONST) {
8066 int array_reg = sp [0]->dreg;
8067 int index_reg = sp [1]->dreg;
8068 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8070 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8071 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8073 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8078 if (*ip == CEE_STELEM_ANY)
8085 case CEE_CKFINITE: {
8089 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8090 ins->sreg1 = sp [0]->dreg;
8091 ins->dreg = alloc_freg (cfg);
8092 ins->type = STACK_R8;
8093 MONO_ADD_INS (bblock, ins);
8096 mono_decompose_opcode (cfg, ins);
8101 case CEE_REFANYVAL: {
8102 MonoInst *src_var, *src;
8104 int klass_reg = alloc_preg (cfg);
8105 int dreg = alloc_preg (cfg);
8108 MONO_INST_NEW (cfg, ins, *ip);
8111 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8112 CHECK_TYPELOAD (klass);
8113 mono_class_init (klass);
8115 if (cfg->generic_sharing_context)
8116 context_used = mono_class_check_context_used (klass);
8119 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8121 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8122 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8123 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8126 MonoInst *rgctx, *klass_ins;
8128 EMIT_GET_RGCTX (rgctx, context_used);
8129 klass_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8132 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8133 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8135 mini_emit_class_check (cfg, klass_reg, klass);
8137 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8138 ins->type = STACK_MP;
8143 case CEE_MKREFANY: {
8144 MonoInst *loc, *addr;
8147 MONO_INST_NEW (cfg, ins, *ip);
8150 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8151 CHECK_TYPELOAD (klass);
8152 mono_class_init (klass);
8154 if (cfg->generic_sharing_context)
8155 context_used = mono_class_check_context_used (klass);
8157 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8158 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8161 MonoInst *rgctx, *const_ins;
8162 int type_reg = alloc_preg (cfg);
8164 EMIT_GET_RGCTX (rgctx, context_used);
8165 const_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8166 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8168 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8169 } else if (cfg->compile_aot) {
8170 int const_reg = alloc_preg (cfg);
8171 int type_reg = alloc_preg (cfg);
8173 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8178 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8179 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8181 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8183 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8184 ins->type = STACK_VTYPE;
8185 ins->klass = mono_defaults.typed_reference_class;
8192 MonoClass *handle_class;
8194 CHECK_STACK_OVF (1);
8197 n = read32 (ip + 1);
8199 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8200 handle = mono_method_get_wrapper_data (method, n);
8201 handle_class = mono_method_get_wrapper_data (method, n + 1);
8202 if (handle_class == mono_defaults.typehandle_class)
8203 handle = &((MonoClass*)handle)->byval_arg;
8206 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8210 mono_class_init (handle_class);
8211 if (cfg->generic_sharing_context) {
8212 if (handle_class == mono_defaults.typehandle_class) {
8213 /* If we get a MONO_TYPE_CLASS
8214 then we need to provide the
8216 instantiation of it. */
8217 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8220 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8221 } else if (handle_class == mono_defaults.fieldhandle_class)
8222 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8223 else if (handle_class == mono_defaults.methodhandle_class)
8224 context_used = mono_method_check_context_used (handle);
8226 g_assert_not_reached ();
8229 if (cfg->opt & MONO_OPT_SHARED) {
8230 MonoInst *addr, *vtvar, *iargs [3];
8231 int method_context_used;
8233 if (cfg->generic_sharing_context)
8234 method_context_used = mono_method_check_context_used (method);
8236 method_context_used = 0;
8238 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8240 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8241 EMIT_NEW_ICONST (cfg, iargs [1], n);
8242 if (method_context_used) {
8245 EMIT_GET_RGCTX (rgctx, method_context_used);
8246 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8247 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8249 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8250 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8252 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8254 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8256 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8258 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8259 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8260 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8261 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8262 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8263 MonoClass *tclass = mono_class_from_mono_type (handle);
8265 mono_class_init (tclass);
8269 EMIT_GET_RGCTX (rgctx, context_used);
8270 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8271 } else if (cfg->compile_aot) {
8272 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8274 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8276 ins->type = STACK_OBJ;
8277 ins->klass = cmethod->klass;
8280 MonoInst *addr, *vtvar;
8282 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8287 EMIT_GET_RGCTX (rgctx, context_used);
8288 if (handle_class == mono_defaults.typehandle_class) {
8289 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8290 mono_class_from_mono_type (handle),
8291 MONO_RGCTX_INFO_TYPE);
8292 } else if (handle_class == mono_defaults.methodhandle_class) {
8293 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8294 handle, MONO_RGCTX_INFO_METHOD);
8295 } else if (handle_class == mono_defaults.fieldhandle_class) {
8296 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8297 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8299 g_assert_not_reached ();
8301 } else if (cfg->compile_aot) {
8302 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8304 EMIT_NEW_PCONST (cfg, ins, handle);
8306 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8308 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8318 MONO_INST_NEW (cfg, ins, OP_THROW);
8320 ins->sreg1 = sp [0]->dreg;
8322 bblock->out_of_line = TRUE;
8323 MONO_ADD_INS (bblock, ins);
8324 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8325 MONO_ADD_INS (bblock, ins);
8328 link_bblock (cfg, bblock, end_bblock);
8329 start_new_bblock = 1;
8331 case CEE_ENDFINALLY:
8332 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8333 MONO_ADD_INS (bblock, ins);
8335 start_new_bblock = 1;
8338 * Control will leave the method so empty the stack, otherwise
8339 * the next basic block will start with a nonempty stack.
8341 while (sp != stack_start) {
8349 if (*ip == CEE_LEAVE) {
8351 target = ip + 5 + (gint32)read32(ip + 1);
8354 target = ip + 2 + (signed char)(ip [1]);
8357 /* empty the stack */
8358 while (sp != stack_start) {
8363 * If this leave statement is in a catch block, check for a
8364 * pending exception, and rethrow it if necessary.
8366 for (i = 0; i < header->num_clauses; ++i) {
8367 MonoExceptionClause *clause = &header->clauses [i];
8370 * Use <= in the final comparison to handle clauses with multiple
8371 * leave statements, like in bug #78024.
8372 * The ordering of the exception clauses guarantees that we find the
8375 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8377 MonoBasicBlock *dont_throw;
8382 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8385 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8387 NEW_BBLOCK (cfg, dont_throw);
8390 * Currently, we allways rethrow the abort exception, despite the
8391 * fact that this is not correct. See thread6.cs for an example.
8392 * But propagating the abort exception is more important than
8393 * getting the sematics right.
8395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8397 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8399 MONO_START_BB (cfg, dont_throw);
8404 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8406 for (tmp = handlers; tmp; tmp = tmp->next) {
8408 link_bblock (cfg, bblock, tblock);
8409 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8410 ins->inst_target_bb = tblock;
8411 MONO_ADD_INS (bblock, ins);
8413 g_list_free (handlers);
8416 MONO_INST_NEW (cfg, ins, OP_BR);
8417 MONO_ADD_INS (bblock, ins);
8418 GET_BBLOCK (cfg, tblock, target);
8419 link_bblock (cfg, bblock, tblock);
8420 ins->inst_target_bb = tblock;
8421 start_new_bblock = 1;
8423 if (*ip == CEE_LEAVE)
8432 * Mono specific opcodes
8434 case MONO_CUSTOM_PREFIX: {
8436 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8440 case CEE_MONO_ICALL: {
8442 MonoJitICallInfo *info;
8444 token = read32 (ip + 2);
8445 func = mono_method_get_wrapper_data (method, token);
8446 info = mono_find_jit_icall_by_addr (func);
8449 CHECK_STACK (info->sig->param_count);
8450 sp -= info->sig->param_count;
8452 ins = mono_emit_jit_icall (cfg, info->func, sp);
8453 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8457 inline_costs += 10 * num_calls++;
8461 case CEE_MONO_LDPTR: {
8464 CHECK_STACK_OVF (1);
8466 token = read32 (ip + 2);
8468 ptr = mono_method_get_wrapper_data (method, token);
8469 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8470 MonoJitICallInfo *callinfo;
8471 const char *icall_name;
8473 icall_name = method->name + strlen ("__icall_wrapper_");
8474 g_assert (icall_name);
8475 callinfo = mono_find_jit_icall_by_name (icall_name);
8476 g_assert (callinfo);
8478 if (ptr == callinfo->func) {
8479 /* Will be transformed into an AOTCONST later */
8480 EMIT_NEW_PCONST (cfg, ins, ptr);
8486 /* FIXME: Generalize this */
8487 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8488 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8493 EMIT_NEW_PCONST (cfg, ins, ptr);
8496 inline_costs += 10 * num_calls++;
8497 /* Can't embed random pointers into AOT code */
8498 cfg->disable_aot = 1;
8501 case CEE_MONO_ICALL_ADDR: {
8502 MonoMethod *cmethod;
8505 CHECK_STACK_OVF (1);
8507 token = read32 (ip + 2);
8509 cmethod = mono_method_get_wrapper_data (method, token);
8511 if (cfg->compile_aot) {
8512 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8514 ptr = mono_lookup_internal_call (cmethod);
8516 EMIT_NEW_PCONST (cfg, ins, ptr);
8522 case CEE_MONO_VTADDR: {
8523 MonoInst *src_var, *src;
8529 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8530 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8535 case CEE_MONO_NEWOBJ: {
8536 MonoInst *iargs [2];
8538 CHECK_STACK_OVF (1);
8540 token = read32 (ip + 2);
8541 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8542 mono_class_init (klass);
8543 NEW_DOMAINCONST (cfg, iargs [0]);
8544 MONO_ADD_INS (cfg->cbb, iargs [0]);
8545 NEW_CLASSCONST (cfg, iargs [1], klass);
8546 MONO_ADD_INS (cfg->cbb, iargs [1]);
8547 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8549 inline_costs += 10 * num_calls++;
8552 case CEE_MONO_OBJADDR:
8555 MONO_INST_NEW (cfg, ins, OP_MOVE);
8556 ins->dreg = alloc_preg (cfg);
8557 ins->sreg1 = sp [0]->dreg;
8558 ins->type = STACK_MP;
8559 MONO_ADD_INS (cfg->cbb, ins);
8563 case CEE_MONO_LDNATIVEOBJ:
8565 * Similar to LDOBJ, but instead load the unmanaged
8566 * representation of the vtype to the stack.
8571 token = read32 (ip + 2);
8572 klass = mono_method_get_wrapper_data (method, token);
8573 g_assert (klass->valuetype);
8574 mono_class_init (klass);
8577 MonoInst *src, *dest, *temp;
8580 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8581 temp->backend.is_pinvoke = 1;
8582 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8583 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8585 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8586 dest->type = STACK_VTYPE;
8587 dest->klass = klass;
8593 case CEE_MONO_RETOBJ: {
8595 * Same as RET, but return the native representation of a vtype
8598 g_assert (cfg->ret);
8599 g_assert (mono_method_signature (method)->pinvoke);
8604 token = read32 (ip + 2);
8605 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8607 if (!cfg->vret_addr) {
8608 g_assert (cfg->ret_var_is_local);
8610 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8612 EMIT_NEW_RETLOADA (cfg, ins);
8614 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8616 if (sp != stack_start)
8619 MONO_INST_NEW (cfg, ins, OP_BR);
8620 ins->inst_target_bb = end_bblock;
8621 MONO_ADD_INS (bblock, ins);
8622 link_bblock (cfg, bblock, end_bblock);
8623 start_new_bblock = 1;
8627 case CEE_MONO_CISINST:
8628 case CEE_MONO_CCASTCLASS: {
8633 token = read32 (ip + 2);
8634 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8635 if (ip [1] == CEE_MONO_CISINST)
8636 ins = handle_cisinst (cfg, klass, sp [0]);
8638 ins = handle_ccastclass (cfg, klass, sp [0]);
8644 case CEE_MONO_SAVE_LMF:
8645 case CEE_MONO_RESTORE_LMF:
8646 #ifdef MONO_ARCH_HAVE_LMF_OPS
8647 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8648 MONO_ADD_INS (bblock, ins);
8649 cfg->need_lmf_area = TRUE;
8653 case CEE_MONO_CLASSCONST:
8654 CHECK_STACK_OVF (1);
8656 token = read32 (ip + 2);
8657 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8660 inline_costs += 10 * num_calls++;
8662 case CEE_MONO_NOT_TAKEN:
8663 bblock->out_of_line = TRUE;
8667 CHECK_STACK_OVF (1);
8669 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8670 ins->dreg = alloc_preg (cfg);
8671 ins->inst_offset = (gint32)read32 (ip + 2);
8672 ins->type = STACK_PTR;
8673 MONO_ADD_INS (bblock, ins);
8678 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8688 /* somewhat similar to LDTOKEN */
8689 MonoInst *addr, *vtvar;
8690 CHECK_STACK_OVF (1);
8691 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8693 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8694 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8696 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8697 ins->type = STACK_VTYPE;
8698 ins->klass = mono_defaults.argumenthandle_class;
8711 * The following transforms:
8712 * CEE_CEQ into OP_CEQ
8713 * CEE_CGT into OP_CGT
8714 * CEE_CGT_UN into OP_CGT_UN
8715 * CEE_CLT into OP_CLT
8716 * CEE_CLT_UN into OP_CLT_UN
8718 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8720 MONO_INST_NEW (cfg, ins, cmp->opcode);
8722 cmp->sreg1 = sp [0]->dreg;
8723 cmp->sreg2 = sp [1]->dreg;
8724 type_from_op (cmp, sp [0], sp [1]);
8726 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8727 cmp->opcode = OP_LCOMPARE;
8728 else if (sp [0]->type == STACK_R8)
8729 cmp->opcode = OP_FCOMPARE;
8731 cmp->opcode = OP_ICOMPARE;
8732 MONO_ADD_INS (bblock, cmp);
8733 ins->type = STACK_I4;
8734 ins->dreg = alloc_dreg (cfg, ins->type);
8735 type_from_op (ins, sp [0], sp [1]);
8737 if (cmp->opcode == OP_FCOMPARE) {
8739 * The backends expect the fceq opcodes to do the
8742 cmp->opcode = OP_NOP;
8743 ins->sreg1 = cmp->sreg1;
8744 ins->sreg2 = cmp->sreg2;
8746 MONO_ADD_INS (bblock, ins);
8753 MonoMethod *cil_method, *ctor_method;
8754 gboolean is_shared = FALSE;
8756 CHECK_STACK_OVF (1);
8758 n = read32 (ip + 2);
8759 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8762 mono_class_init (cmethod->klass);
8764 mono_save_token_info (cfg, image, n, cmethod);
8766 if (cfg->generic_sharing_context)
8767 context_used = mono_method_check_context_used (cmethod);
8769 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8770 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8771 (cmethod->klass->generic_class ||
8772 cmethod->klass->generic_container)) {
8775 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8779 cil_method = cmethod;
8780 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8781 METHOD_ACCESS_FAILURE;
8783 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8784 if (check_linkdemand (cfg, method, cmethod))
8786 CHECK_CFG_EXCEPTION;
8787 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8788 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8792 * Optimize the common case of ldftn+delegate creation
8794 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8795 /* FIXME: SGEN support */
8796 /* FIXME: handle shared static generic methods */
8797 /* FIXME: handle this in shared code */
8798 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8799 MonoInst *target_ins;
8802 if (cfg->verbose_level > 3)
8803 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8804 target_ins = sp [-1];
8806 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8817 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8819 EMIT_GET_RGCTX (rgctx, context_used);
8820 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8821 } else if (is_shared) {
8822 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8824 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8826 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8830 inline_costs += 10 * num_calls++;
8833 case CEE_LDVIRTFTN: {
8838 n = read32 (ip + 2);
8839 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8842 mono_class_init (cmethod->klass);
8844 if (cfg->generic_sharing_context)
8845 context_used = mono_method_check_context_used (cmethod);
8847 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8848 if (check_linkdemand (cfg, method, cmethod))
8850 CHECK_CFG_EXCEPTION;
8851 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8852 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8861 EMIT_GET_RGCTX (rgctx, context_used);
8862 args [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8863 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8865 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8866 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8870 inline_costs += 10 * num_calls++;
8874 CHECK_STACK_OVF (1);
8876 n = read16 (ip + 2);
8878 EMIT_NEW_ARGLOAD (cfg, ins, n);
8883 CHECK_STACK_OVF (1);
8885 n = read16 (ip + 2);
8887 NEW_ARGLOADA (cfg, ins, n);
8888 MONO_ADD_INS (cfg->cbb, ins);
8896 n = read16 (ip + 2);
8898 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8900 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8904 CHECK_STACK_OVF (1);
8906 n = read16 (ip + 2);
8908 EMIT_NEW_LOCLOAD (cfg, ins, n);
8913 CHECK_STACK_OVF (1);
8915 n = read16 (ip + 2);
8917 EMIT_NEW_LOCLOADA (cfg, ins, n);
8925 n = read16 (ip + 2);
8927 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8929 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8936 if (sp != stack_start)
8938 if (cfg->method != method)
8940 * Inlining this into a loop in a parent could lead to
8941 * stack overflows which is different behavior than the
8942 * non-inlined case, thus disable inlining in this case.
8944 goto inline_failure;
8946 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8947 ins->dreg = alloc_preg (cfg);
8948 ins->sreg1 = sp [0]->dreg;
8949 ins->type = STACK_PTR;
8950 MONO_ADD_INS (cfg->cbb, ins);
8952 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8953 if (header->init_locals)
8954 ins->flags |= MONO_INST_INIT;
8959 case CEE_ENDFILTER: {
8960 MonoExceptionClause *clause, *nearest;
8961 int cc, nearest_num;
8965 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8967 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8968 ins->sreg1 = (*sp)->dreg;
8969 MONO_ADD_INS (bblock, ins);
8970 start_new_bblock = 1;
8975 for (cc = 0; cc < header->num_clauses; ++cc) {
8976 clause = &header->clauses [cc];
8977 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8978 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8979 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8985 if ((ip - header->code) != nearest->handler_offset)
8990 case CEE_UNALIGNED_:
8991 ins_flag |= MONO_INST_UNALIGNED;
8992 /* FIXME: record alignment? we can assume 1 for now */
8997 ins_flag |= MONO_INST_VOLATILE;
9001 #if !defined(__ppc__) && !defined(__powerpc)
9002 ins_flag |= MONO_INST_TAILCALL;
9003 cfg->flags |= MONO_CFG_HAS_TAIL;
9004 /* Can't inline tail calls at this time */
9005 inline_costs += 100000;
9013 token = read32 (ip + 2);
9014 klass = mini_get_class (method, token, generic_context);
9015 CHECK_TYPELOAD (klass);
9016 if (generic_class_is_reference_type (cfg, klass)) {
9017 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9019 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9020 mini_emit_initobj (cfg, *sp, NULL, klass);
9025 case CEE_CONSTRAINED_:
9027 token = read32 (ip + 2);
9028 constrained_call = mono_class_get_full (image, token, generic_context);
9029 CHECK_TYPELOAD (constrained_call);
9034 MonoInst *iargs [3];
9038 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9039 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9040 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9041 /* emit_memset only works when val == 0 */
9042 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9047 if (ip [1] == CEE_CPBLK) {
9048 MonoMethod *memcpy_method = get_memcpy_method ();
9049 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9051 MonoMethod *memset_method = get_memset_method ();
9052 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9062 ins_flag |= MONO_INST_NOTYPECHECK;
9064 ins_flag |= MONO_INST_NORANGECHECK;
9065 /* we ignore the no-nullcheck for now since we
9066 * really do it explicitly only when doing callvirt->call
9072 int handler_offset = -1;
9074 for (i = 0; i < header->num_clauses; ++i) {
9075 MonoExceptionClause *clause = &header->clauses [i];
9076 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9077 handler_offset = clause->handler_offset;
9082 bblock->flags |= BB_EXCEPTION_UNSAFE;
9084 g_assert (handler_offset != -1);
9086 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9087 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9088 ins->sreg1 = load->dreg;
9089 MONO_ADD_INS (bblock, ins);
9091 link_bblock (cfg, bblock, end_bblock);
9092 start_new_bblock = 1;
9100 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9102 CHECK_STACK_OVF (1);
9104 token = read32 (ip + 2);
9105 /* FIXXME: handle generics. */
9106 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9107 MonoType *type = mono_type_create_from_typespec (image, token);
9108 token = mono_type_size (type, &ialign);
9110 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9111 CHECK_TYPELOAD (klass);
9112 mono_class_init (klass);
9113 token = mono_class_value_size (klass, &align);
9115 EMIT_NEW_ICONST (cfg, ins, token);
9120 case CEE_REFANYTYPE: {
9121 MonoInst *src_var, *src;
9127 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9129 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9130 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9131 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9141 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9146 g_error ("opcode 0x%02x not handled", *ip);
9149 if (start_new_bblock != 1)
9152 bblock->cil_length = ip - bblock->cil_code;
9153 bblock->next_bb = end_bblock;
9155 if (cfg->method == method && cfg->domainvar) {
9157 MonoInst *get_domain;
9159 cfg->cbb = init_localsbb;
9161 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9162 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9165 get_domain->dreg = alloc_preg (cfg);
9166 MONO_ADD_INS (cfg->cbb, get_domain);
9168 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9169 MONO_ADD_INS (cfg->cbb, store);
9172 if (cfg->method == method && cfg->got_var)
9173 mono_emit_load_got_addr (cfg);
9175 if (header->init_locals) {
9178 cfg->cbb = init_localsbb;
9179 cfg->ip = header->code;
9180 for (i = 0; i < header->num_locals; ++i) {
9181 MonoType *ptype = header->locals [i];
9182 int t = ptype->type;
9183 dreg = cfg->locals [i]->dreg;
9185 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9186 t = ptype->data.klass->enum_basetype->type;
9188 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9189 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9190 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9191 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9192 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9193 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9194 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9195 ins->type = STACK_R8;
9196 ins->inst_p0 = (void*)&r8_0;
9197 ins->dreg = alloc_dreg (cfg, STACK_R8);
9198 MONO_ADD_INS (init_localsbb, ins);
9199 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9200 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9201 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9202 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9204 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9211 if (cfg->method == method) {
9213 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9214 bb->region = mono_find_block_region (cfg, bb->real_offset);
9216 mono_create_spvar_for_region (cfg, bb->region);
9217 if (cfg->verbose_level > 2)
9218 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9222 g_slist_free (class_inits);
9223 dont_inline = g_list_remove (dont_inline, method);
9225 if (inline_costs < 0) {
9228 /* Method is too large */
9229 mname = mono_method_full_name (method, TRUE);
9230 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9231 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9236 if ((cfg->verbose_level > 1) && (cfg->method == method))
9237 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9239 return inline_costs;
9242 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9243 g_slist_free (class_inits);
9244 dont_inline = g_list_remove (dont_inline, method);
9248 g_slist_free (class_inits);
9249 dont_inline = g_list_remove (dont_inline, method);
9253 g_slist_free (class_inits);
9254 dont_inline = g_list_remove (dont_inline, method);
9255 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9259 g_slist_free (class_inits);
9260 dont_inline = g_list_remove (dont_inline, method);
9261 set_exception_type_from_invalid_il (cfg, method, ip);
9266 store_membase_reg_to_store_membase_imm (int opcode)
9269 case OP_STORE_MEMBASE_REG:
9270 return OP_STORE_MEMBASE_IMM;
9271 case OP_STOREI1_MEMBASE_REG:
9272 return OP_STOREI1_MEMBASE_IMM;
9273 case OP_STOREI2_MEMBASE_REG:
9274 return OP_STOREI2_MEMBASE_IMM;
9275 case OP_STOREI4_MEMBASE_REG:
9276 return OP_STOREI4_MEMBASE_IMM;
9277 case OP_STOREI8_MEMBASE_REG:
9278 return OP_STOREI8_MEMBASE_IMM;
9280 g_assert_not_reached ();
9287 mono_op_to_op_imm (int opcode)
9297 return OP_IDIV_UN_IMM;
9301 return OP_IREM_UN_IMM;
9315 return OP_ISHR_UN_IMM;
9332 return OP_LSHR_UN_IMM;
9335 return OP_COMPARE_IMM;
9337 return OP_ICOMPARE_IMM;
9339 return OP_LCOMPARE_IMM;
9341 case OP_STORE_MEMBASE_REG:
9342 return OP_STORE_MEMBASE_IMM;
9343 case OP_STOREI1_MEMBASE_REG:
9344 return OP_STOREI1_MEMBASE_IMM;
9345 case OP_STOREI2_MEMBASE_REG:
9346 return OP_STOREI2_MEMBASE_IMM;
9347 case OP_STOREI4_MEMBASE_REG:
9348 return OP_STOREI4_MEMBASE_IMM;
9350 #if defined(__i386__) || defined (__x86_64__)
9352 return OP_X86_PUSH_IMM;
9353 case OP_X86_COMPARE_MEMBASE_REG:
9354 return OP_X86_COMPARE_MEMBASE_IMM;
9356 #if defined(__x86_64__)
9357 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9358 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9360 case OP_VOIDCALL_REG:
9369 return OP_LOCALLOC_IMM;
9376 ldind_to_load_membase (int opcode)
9380 return OP_LOADI1_MEMBASE;
9382 return OP_LOADU1_MEMBASE;
9384 return OP_LOADI2_MEMBASE;
9386 return OP_LOADU2_MEMBASE;
9388 return OP_LOADI4_MEMBASE;
9390 return OP_LOADU4_MEMBASE;
9392 return OP_LOAD_MEMBASE;
9394 return OP_LOAD_MEMBASE;
9396 return OP_LOADI8_MEMBASE;
9398 return OP_LOADR4_MEMBASE;
9400 return OP_LOADR8_MEMBASE;
9402 g_assert_not_reached ();
9409 stind_to_store_membase (int opcode)
9413 return OP_STOREI1_MEMBASE_REG;
9415 return OP_STOREI2_MEMBASE_REG;
9417 return OP_STOREI4_MEMBASE_REG;
9420 return OP_STORE_MEMBASE_REG;
9422 return OP_STOREI8_MEMBASE_REG;
9424 return OP_STORER4_MEMBASE_REG;
9426 return OP_STORER8_MEMBASE_REG;
9428 g_assert_not_reached ();
9435 mono_load_membase_to_load_mem (int opcode)
9437 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9438 #if defined(__i386__) || defined(__x86_64__)
9440 case OP_LOAD_MEMBASE:
9442 case OP_LOADU1_MEMBASE:
9443 return OP_LOADU1_MEM;
9444 case OP_LOADU2_MEMBASE:
9445 return OP_LOADU2_MEM;
9446 case OP_LOADI4_MEMBASE:
9447 return OP_LOADI4_MEM;
9448 case OP_LOADU4_MEMBASE:
9449 return OP_LOADU4_MEM;
9450 #if SIZEOF_VOID_P == 8
9451 case OP_LOADI8_MEMBASE:
9452 return OP_LOADI8_MEM;
9461 op_to_op_dest_membase (int store_opcode, int opcode)
9463 #if defined(__i386__)
9464 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9469 return OP_X86_ADD_MEMBASE_REG;
9471 return OP_X86_SUB_MEMBASE_REG;
9473 return OP_X86_AND_MEMBASE_REG;
9475 return OP_X86_OR_MEMBASE_REG;
9477 return OP_X86_XOR_MEMBASE_REG;
9480 return OP_X86_ADD_MEMBASE_IMM;
9483 return OP_X86_SUB_MEMBASE_IMM;
9486 return OP_X86_AND_MEMBASE_IMM;
9489 return OP_X86_OR_MEMBASE_IMM;
9492 return OP_X86_XOR_MEMBASE_IMM;
9498 #if defined(__x86_64__)
9499 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9504 return OP_X86_ADD_MEMBASE_REG;
9506 return OP_X86_SUB_MEMBASE_REG;
9508 return OP_X86_AND_MEMBASE_REG;
9510 return OP_X86_OR_MEMBASE_REG;
9512 return OP_X86_XOR_MEMBASE_REG;
9514 return OP_X86_ADD_MEMBASE_IMM;
9516 return OP_X86_SUB_MEMBASE_IMM;
9518 return OP_X86_AND_MEMBASE_IMM;
9520 return OP_X86_OR_MEMBASE_IMM;
9522 return OP_X86_XOR_MEMBASE_IMM;
9524 return OP_AMD64_ADD_MEMBASE_REG;
9526 return OP_AMD64_SUB_MEMBASE_REG;
9528 return OP_AMD64_AND_MEMBASE_REG;
9530 return OP_AMD64_OR_MEMBASE_REG;
9532 return OP_AMD64_XOR_MEMBASE_REG;
9535 return OP_AMD64_ADD_MEMBASE_IMM;
9538 return OP_AMD64_SUB_MEMBASE_IMM;
9541 return OP_AMD64_AND_MEMBASE_IMM;
9544 return OP_AMD64_OR_MEMBASE_IMM;
9547 return OP_AMD64_XOR_MEMBASE_IMM;
9557 op_to_op_store_membase (int store_opcode, int opcode)
9559 #if defined(__i386__) || defined(__x86_64__)
9562 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9563 return OP_X86_SETEQ_MEMBASE;
9565 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9566 return OP_X86_SETNE_MEMBASE;
9574 op_to_op_src1_membase (int load_opcode, int opcode)
9577 /* FIXME: This has sign extension issues */
9579 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9580 return OP_X86_COMPARE_MEMBASE8_IMM;
9583 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9588 return OP_X86_PUSH_MEMBASE;
9589 case OP_COMPARE_IMM:
9590 case OP_ICOMPARE_IMM:
9591 return OP_X86_COMPARE_MEMBASE_IMM;
9594 return OP_X86_COMPARE_MEMBASE_REG;
9599 /* FIXME: This has sign extension issues */
9601 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9602 return OP_X86_COMPARE_MEMBASE8_IMM;
9607 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9608 return OP_X86_PUSH_MEMBASE;
9610 /* FIXME: This only works for 32 bit immediates
9611 case OP_COMPARE_IMM:
9612 case OP_LCOMPARE_IMM:
9613 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9614 return OP_AMD64_COMPARE_MEMBASE_IMM;
9616 case OP_ICOMPARE_IMM:
9617 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9618 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9622 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9623 return OP_AMD64_COMPARE_MEMBASE_REG;
9626 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9627 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9636 op_to_op_src2_membase (int load_opcode, int opcode)
9639 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9645 return OP_X86_COMPARE_REG_MEMBASE;
9647 return OP_X86_ADD_REG_MEMBASE;
9649 return OP_X86_SUB_REG_MEMBASE;
9651 return OP_X86_AND_REG_MEMBASE;
9653 return OP_X86_OR_REG_MEMBASE;
9655 return OP_X86_XOR_REG_MEMBASE;
9662 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9663 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9667 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9668 return OP_AMD64_COMPARE_REG_MEMBASE;
9671 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9672 return OP_X86_ADD_REG_MEMBASE;
9674 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9675 return OP_X86_SUB_REG_MEMBASE;
9677 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9678 return OP_X86_AND_REG_MEMBASE;
9680 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9681 return OP_X86_OR_REG_MEMBASE;
9683 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9684 return OP_X86_XOR_REG_MEMBASE;
9686 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9687 return OP_AMD64_ADD_REG_MEMBASE;
9689 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9690 return OP_AMD64_SUB_REG_MEMBASE;
9692 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9693 return OP_AMD64_AND_REG_MEMBASE;
9695 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9696 return OP_AMD64_OR_REG_MEMBASE;
9698 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9699 return OP_AMD64_XOR_REG_MEMBASE;
9707 mono_op_to_op_imm_noemul (int opcode)
9710 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9715 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9723 return mono_op_to_op_imm (opcode);
9728 * mono_handle_global_vregs:
9730 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9734 mono_handle_global_vregs (MonoCompile *cfg)
9740 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9742 /* Find local vregs used in more than one bb */
9743 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9744 MonoInst *ins = bb->code;
9745 int block_num = bb->block_num;
9747 if (cfg->verbose_level > 1)
9748 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9751 for (; ins; ins = ins->next) {
9752 const char *spec = INS_INFO (ins->opcode);
9753 int regtype, regindex;
9756 if (G_UNLIKELY (cfg->verbose_level > 1))
9757 mono_print_ins (ins);
9759 g_assert (ins->opcode >= MONO_CEE_LAST);
9761 for (regindex = 0; regindex < 3; regindex ++) {
9764 if (regindex == 0) {
9765 regtype = spec [MONO_INST_DEST];
9769 } else if (regindex == 1) {
9770 regtype = spec [MONO_INST_SRC1];
9775 regtype = spec [MONO_INST_SRC2];
9781 #if SIZEOF_VOID_P == 4
9782 if (regtype == 'l') {
9784 * Since some instructions reference the original long vreg,
9785 * and some reference the two component vregs, it is quite hard
9786 * to determine when it needs to be global. So be conservative.
9788 if (!get_vreg_to_inst (cfg, vreg)) {
9789 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9791 if (cfg->verbose_level > 1)
9792 printf ("LONG VREG R%d made global.\n", vreg);
9796 * Make the component vregs volatile since the optimizations can
9797 * get confused otherwise.
9799 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9800 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9804 g_assert (vreg != -1);
9806 prev_bb = vreg_to_bb [vreg];
9808 /* 0 is a valid block num */
9809 vreg_to_bb [vreg] = block_num + 1;
9810 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9811 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9814 if (!get_vreg_to_inst (cfg, vreg)) {
9815 if (G_UNLIKELY (cfg->verbose_level > 1))
9816 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9820 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9823 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9826 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9829 g_assert_not_reached ();
9833 /* Flag as having been used in more than one bb */
9834 vreg_to_bb [vreg] = -1;
9840 /* If a variable is used in only one bblock, convert it into a local vreg */
9841 for (i = 0; i < cfg->num_varinfo; i++) {
9842 MonoInst *var = cfg->varinfo [i];
9843 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9845 switch (var->type) {
9851 #if SIZEOF_VOID_P == 8
9854 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9855 /* Enabling this screws up the fp stack on x86 */
9858 /* Arguments are implicitly global */
9859 /* Putting R4 vars into registers doesn't work currently */
9860 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9862 * Make that the variable's liveness interval doesn't contain a call, since
9863 * that would cause the lvreg to be spilled, making the whole optimization
9866 /* This is too slow for JIT compilation */
9868 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9870 int def_index, call_index, ins_index;
9871 gboolean spilled = FALSE;
9876 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9877 const char *spec = INS_INFO (ins->opcode);
9879 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9880 def_index = ins_index;
9882 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9883 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9884 if (call_index > def_index) {
9890 if (MONO_IS_CALL (ins))
9891 call_index = ins_index;
9901 if (G_UNLIKELY (cfg->verbose_level > 2))
9902 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9903 var->flags |= MONO_INST_IS_DEAD;
9904 cfg->vreg_to_inst [var->dreg] = NULL;
9911 * Compress the varinfo and vars tables so the liveness computation is faster and
9912 * takes up less space.
9915 for (i = 0; i < cfg->num_varinfo; ++i) {
9916 MonoInst *var = cfg->varinfo [i];
9917 if (pos < i && cfg->locals_start == i)
9918 cfg->locals_start = pos;
9919 if (!(var->flags & MONO_INST_IS_DEAD)) {
9921 cfg->varinfo [pos] = cfg->varinfo [i];
9922 cfg->varinfo [pos]->inst_c0 = pos;
9923 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9924 cfg->vars [pos].idx = pos;
9925 #if SIZEOF_VOID_P == 4
9926 if (cfg->varinfo [pos]->type == STACK_I8) {
9927 /* Modify the two component vars too */
9930 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9931 var1->inst_c0 = pos;
9932 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9933 var1->inst_c0 = pos;
9940 cfg->num_varinfo = pos;
9941 if (cfg->locals_start > cfg->num_varinfo)
9942 cfg->locals_start = cfg->num_varinfo;
9946 * mono_spill_global_vars:
9948 * Generate spill code for variables which are not allocated to registers,
9949 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9950 * code is generated which could be optimized by the local optimization passes.
9953 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9958 guint32 *vreg_to_lvreg;
9960 guint32 i, lvregs_len;
9961 gboolean dest_has_lvreg = FALSE;
9962 guint32 stacktypes [128];
9964 *need_local_opts = FALSE;
9966 memset (spec2, 0, sizeof (spec2));
9968 /* FIXME: Move this function to mini.c */
9969 stacktypes ['i'] = STACK_PTR;
9970 stacktypes ['l'] = STACK_I8;
9971 stacktypes ['f'] = STACK_R8;
9973 #if SIZEOF_VOID_P == 4
9974 /* Create MonoInsts for longs */
9975 for (i = 0; i < cfg->num_varinfo; i++) {
9976 MonoInst *ins = cfg->varinfo [i];
9978 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9979 switch (ins->type) {
9980 #ifdef MONO_ARCH_SOFT_FLOAT
9986 g_assert (ins->opcode == OP_REGOFFSET);
9988 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
9990 tree->opcode = OP_REGOFFSET;
9991 tree->inst_basereg = ins->inst_basereg;
9992 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
9994 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
9996 tree->opcode = OP_REGOFFSET;
9997 tree->inst_basereg = ins->inst_basereg;
9998 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10008 /* FIXME: widening and truncation */
10011 * As an optimization, when a variable allocated to the stack is first loaded into
10012 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10013 * the variable again.
10015 orig_next_vreg = cfg->next_vreg;
10016 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10017 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10020 /* Add spill loads/stores */
10021 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10024 if (cfg->verbose_level > 1)
10025 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10027 /* Clear vreg_to_lvreg array */
10028 for (i = 0; i < lvregs_len; i++)
10029 vreg_to_lvreg [lvregs [i]] = 0;
10033 MONO_BB_FOR_EACH_INS (bb, ins) {
10034 const char *spec = INS_INFO (ins->opcode);
10035 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10036 gboolean store, no_lvreg;
10038 if (G_UNLIKELY (cfg->verbose_level > 1))
10039 mono_print_ins (ins);
10041 if (ins->opcode == OP_NOP)
10045 * We handle LDADDR here as well, since it can only be decomposed
10046 * when variable addresses are known.
10048 if (ins->opcode == OP_LDADDR) {
10049 MonoInst *var = ins->inst_p0;
10051 if (var->opcode == OP_VTARG_ADDR) {
10052 /* Happens on SPARC/S390 where vtypes are passed by reference */
10053 MonoInst *vtaddr = var->inst_left;
10054 if (vtaddr->opcode == OP_REGVAR) {
10055 ins->opcode = OP_MOVE;
10056 ins->sreg1 = vtaddr->dreg;
10058 else if (var->inst_left->opcode == OP_REGOFFSET) {
10059 ins->opcode = OP_LOAD_MEMBASE;
10060 ins->inst_basereg = vtaddr->inst_basereg;
10061 ins->inst_offset = vtaddr->inst_offset;
10065 g_assert (var->opcode == OP_REGOFFSET);
10067 ins->opcode = OP_ADD_IMM;
10068 ins->sreg1 = var->inst_basereg;
10069 ins->inst_imm = var->inst_offset;
10072 *need_local_opts = TRUE;
10073 spec = INS_INFO (ins->opcode);
10076 if (ins->opcode < MONO_CEE_LAST) {
10077 mono_print_ins (ins);
10078 g_assert_not_reached ();
10082 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10086 if (MONO_IS_STORE_MEMBASE (ins)) {
10087 tmp_reg = ins->dreg;
10088 ins->dreg = ins->sreg2;
10089 ins->sreg2 = tmp_reg;
10092 spec2 [MONO_INST_DEST] = ' ';
10093 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10094 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10096 } else if (MONO_IS_STORE_MEMINDEX (ins))
10097 g_assert_not_reached ();
10102 if (G_UNLIKELY (cfg->verbose_level > 1))
10103 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10108 regtype = spec [MONO_INST_DEST];
10109 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10112 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10113 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10114 MonoInst *store_ins;
10117 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10119 if (var->opcode == OP_REGVAR) {
10120 ins->dreg = var->dreg;
10121 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10123 * Instead of emitting a load+store, use a _membase opcode.
10125 g_assert (var->opcode == OP_REGOFFSET);
10126 if (ins->opcode == OP_MOVE) {
10129 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10130 ins->inst_basereg = var->inst_basereg;
10131 ins->inst_offset = var->inst_offset;
10134 spec = INS_INFO (ins->opcode);
10138 g_assert (var->opcode == OP_REGOFFSET);
10140 prev_dreg = ins->dreg;
10142 /* Invalidate any previous lvreg for this vreg */
10143 vreg_to_lvreg [ins->dreg] = 0;
10147 #ifdef MONO_ARCH_SOFT_FLOAT
10148 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10150 store_opcode = OP_STOREI8_MEMBASE_REG;
10154 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10156 if (regtype == 'l') {
10157 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10158 mono_bblock_insert_after_ins (bb, ins, store_ins);
10159 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10160 mono_bblock_insert_after_ins (bb, ins, store_ins);
10163 g_assert (store_opcode != OP_STOREV_MEMBASE);
10165 /* Try to fuse the store into the instruction itself */
10166 /* FIXME: Add more instructions */
10167 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10168 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10169 ins->inst_imm = ins->inst_c0;
10170 ins->inst_destbasereg = var->inst_basereg;
10171 ins->inst_offset = var->inst_offset;
10172 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10173 ins->opcode = store_opcode;
10174 ins->inst_destbasereg = var->inst_basereg;
10175 ins->inst_offset = var->inst_offset;
10179 tmp_reg = ins->dreg;
10180 ins->dreg = ins->sreg2;
10181 ins->sreg2 = tmp_reg;
10184 spec2 [MONO_INST_DEST] = ' ';
10185 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10186 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10188 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10189 // FIXME: The backends expect the base reg to be in inst_basereg
10190 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10192 ins->inst_basereg = var->inst_basereg;
10193 ins->inst_offset = var->inst_offset;
10194 spec = INS_INFO (ins->opcode);
10196 /* printf ("INS: "); mono_print_ins (ins); */
10197 /* Create a store instruction */
10198 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10200 /* Insert it after the instruction */
10201 mono_bblock_insert_after_ins (bb, ins, store_ins);
10204 * We can't assign ins->dreg to var->dreg here, since the
10205 * sregs could use it. So set a flag, and do it after
10208 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10209 dest_has_lvreg = TRUE;
10218 for (srcindex = 0; srcindex < 2; ++srcindex) {
10219 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10220 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10222 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10223 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10224 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10225 MonoInst *load_ins;
10226 guint32 load_opcode;
10228 if (var->opcode == OP_REGVAR) {
10230 ins->sreg1 = var->dreg;
10232 ins->sreg2 = var->dreg;
10236 g_assert (var->opcode == OP_REGOFFSET);
10238 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10240 g_assert (load_opcode != OP_LOADV_MEMBASE);
10242 if (vreg_to_lvreg [sreg]) {
10243 /* The variable is already loaded to an lvreg */
10244 if (G_UNLIKELY (cfg->verbose_level > 1))
10245 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10247 ins->sreg1 = vreg_to_lvreg [sreg];
10249 ins->sreg2 = vreg_to_lvreg [sreg];
10253 /* Try to fuse the load into the instruction */
10254 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10255 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10256 ins->inst_basereg = var->inst_basereg;
10257 ins->inst_offset = var->inst_offset;
10258 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10259 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10260 ins->sreg2 = var->inst_basereg;
10261 ins->inst_offset = var->inst_offset;
10263 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10264 ins->opcode = OP_NOP;
10267 //printf ("%d ", srcindex); mono_print_ins (ins);
10269 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10271 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10272 if (var->dreg == prev_dreg) {
10274 * sreg refers to the value loaded by the load
10275 * emitted below, but we need to use ins->dreg
10276 * since it refers to the store emitted earlier.
10280 vreg_to_lvreg [var->dreg] = sreg;
10281 g_assert (lvregs_len < 1024);
10282 lvregs [lvregs_len ++] = var->dreg;
10291 if (regtype == 'l') {
10292 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10293 mono_bblock_insert_before_ins (bb, ins, load_ins);
10294 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10295 mono_bblock_insert_before_ins (bb, ins, load_ins);
10298 #if SIZEOF_VOID_P == 4
10299 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10301 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10302 mono_bblock_insert_before_ins (bb, ins, load_ins);
10308 if (dest_has_lvreg) {
10309 vreg_to_lvreg [prev_dreg] = ins->dreg;
10310 g_assert (lvregs_len < 1024);
10311 lvregs [lvregs_len ++] = prev_dreg;
10312 dest_has_lvreg = FALSE;
10316 tmp_reg = ins->dreg;
10317 ins->dreg = ins->sreg2;
10318 ins->sreg2 = tmp_reg;
10321 if (MONO_IS_CALL (ins)) {
10322 /* Clear vreg_to_lvreg array */
10323 for (i = 0; i < lvregs_len; i++)
10324 vreg_to_lvreg [lvregs [i]] = 0;
10328 if (cfg->verbose_level > 1)
10329 mono_print_ins_index (1, ins);
10336 * - use 'iadd' instead of 'int_add'
10337 * - handling ovf opcodes: decompose in method_to_ir.
10338 * - unify iregs/fregs
10339 * -> partly done, the missing parts are:
10340 * - a more complete unification would involve unifying the hregs as well, so
10341 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10342 * would no longer map to the machine hregs, so the code generators would need to
10343 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10344 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10345 * fp/non-fp branches speeds it up by about 15%.
10346 * - use sext/zext opcodes instead of shifts
10348 * - get rid of TEMPLOADs if possible and use vregs instead
10349 * - clean up usage of OP_P/OP_ opcodes
10350 * - cleanup usage of DUMMY_USE
10351 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10353 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10354 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10355 * - make sure handle_stack_args () is called before the branch is emitted
10356 * - when the new IR is done, get rid of all unused stuff
10357 * - COMPARE/BEQ as separate instructions or unify them ?
10358 * - keeping them separate allows specialized compare instructions like
10359 * compare_imm, compare_membase
10360 * - most back ends unify fp compare+branch, fp compare+ceq
10361 * - integrate handle_stack_args into inline_method
10362 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10363 * - Things to backport to the old JIT:
10364 * - op_atomic_exchange fix for amd64
10365 * - localloc fix for amd64
10366 * - x86 type_token change
10368 * - long eq/ne optimizations
10369 * - handle long shift opts on 32 bit platforms somehow: they require
10370 * 3 sregs (2 for arg1 and 1 for arg2)
10371 * - make byref a 'normal' type.
10372 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10373 * variable if needed.
10374 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10375 * like inline_method.
10376 * - remove inlining restrictions
10377 * - remove mono_save_args.
10378 * - add 'introduce a new optimization to simplify some range checks'
10379 * - fix LNEG and enable cfold of INEG
10380 * - generalize x86 optimizations like ldelema as a peephole optimization
10381 * - add store_mem_imm for amd64
10382 * - optimize the loading of the interruption flag in the managed->native wrappers
10383 * - avoid special handling of OP_NOP in passes
10384 * - move code inserting instructions into one function/macro.
10385 * - cleanup the code replacement in decompose_long_opts ()
10386 * - try a coalescing phase after liveness analysis
10387 * - add float -> vreg conversion + local optimizations on !x86
10388 * - figure out how to handle decomposed branches during optimizations, ie.
10389 * compare+branch, op_jump_table+op_br etc.
10390 * - promote RuntimeXHandles to vregs
10391 * - vtype cleanups:
10392 * - add a NEW_VARLOADA_VREG macro
10393 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10394 * accessing vtype fields.
10395 * - get rid of I8CONST on 64 bit platforms
10396 * - dealing with the increase in code size due to branches created during opcode
10398 * - use extended basic blocks
10399 * - all parts of the JIT
10400 * - handle_global_vregs () && local regalloc
10401 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10402 * - sources of increase in code size:
10405 * - isinst and castclass
10406 * - lvregs not allocated to global registers even if used multiple times
10407 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10409 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10410 * - add all micro optimizations from the old JIT
10411 * - put tree optimizations into the deadce pass
10412 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10413 * specific function.
10414 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10415 * fcompare + branchCC.
10416 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10417 * running generics.exe.
10418 * - create a helper function for allocating a stack slot, taking into account
10419 * MONO_CFG_HAS_SPILLUP.
10420 * - merge new GC changes in mini.c.
10422 * - merge the ia64 switch changes.
10423 * - merge the mips conditional changes.
10424 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10425 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10426 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10427 * - optimize mono_regstate2_alloc_int/float.
10428 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10429 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10430 * parts of the tree could be separated by other instructions, killing the tree
10431 * arguments, or stores killing loads etc. Also, should we fold loads into other
10432 * instructions if the result of the load is used multiple times ?
10433 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10434 * - LAST MERGE: 108395.
10435 * - when returning vtypes in registers, generate IR and append it to the end of the
10436 * last bb instead of doing it in the epilog.
10437 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10438 * ones in inssel.h.
10439 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10447 - When to decompose opcodes:
10448 - earlier: this makes some optimizations hard to implement, since the low level IR
10449 no longer contains the neccessary information. But it is easier to do.
10450 - later: harder to implement, enables more optimizations.
10451 - Branches inside bblocks:
10452 - created when decomposing complex opcodes.
10453 - branches to another bblock: harmless, but not tracked by the branch
10454 optimizations, so need to branch to a label at the start of the bblock.
10455 - branches to inside the same bblock: very problematic, trips up the local
10456 reg allocator. Can be fixed by spitting the current bblock, but that is a
10457 complex operation, since some local vregs can become global vregs etc.
10458 - Local/global vregs:
10459 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10460 local register allocator.
10461 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10462 structure, created by mono_create_var (). Assigned to hregs or the stack by
10463 the global register allocator.
10464 - When to do optimizations like alu->alu_imm:
10465 - earlier -> saves work later on since the IR will be smaller/simpler
10466 - later -> can work on more instructions
10467 - Handling of valuetypes:
10468 - When a vtype is pushed on the stack, a new tempotary is created, an
10469 instruction computing its address (LDADDR) is emitted and pushed on
10470 the stack. Need to optimize cases when the vtype is used immediately as in
10471 argument passing, stloc etc.
10472 - Instead of the to_end stuff in the old JIT, simply call the function handling
10473 the values on the stack before emitting the last instruction of the bb.