2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
271 * Can't put this at the beginning, since other files reference stuff from this
276 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
278 #define GET_BBLOCK(cfg,tblock,ip) do { \
279 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
281 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
282 NEW_BBLOCK (cfg, (tblock)); \
283 (tblock)->cil_code = (ip); \
284 ADD_BBLOCK (cfg, (tblock)); \
288 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
289 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
290 int _length_reg = alloc_ireg (cfg); \
291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
292 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
293 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
297 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
298 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
299 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
302 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
303 ins->sreg1 = array_reg; \
304 ins->sreg2 = index_reg; \
305 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
306 MONO_ADD_INS ((cfg)->cbb, ins); \
307 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
308 (cfg)->cbb->has_array_access = TRUE; \
312 #if defined(__i386__) || defined(__x86_64__)
313 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
314 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
315 (dest)->dreg = alloc_preg ((cfg)); \
316 (dest)->sreg1 = (sr1); \
317 (dest)->sreg2 = (sr2); \
318 (dest)->inst_imm = (imm); \
319 (dest)->backend.shift_amount = (shift); \
320 MONO_ADD_INS ((cfg)->cbb, (dest)); \
324 #if SIZEOF_VOID_P == 8
325 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
326 /* FIXME: Need to add many more cases */ \
327 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
329 int dr = alloc_preg (cfg); \
330 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
331 (ins)->sreg2 = widen->dreg; \
335 #define ADD_WIDEN_OP(ins, arg1, arg2)
338 #define ADD_BINOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 ins->sreg2 = sp [1]->dreg; \
343 type_from_op (ins, sp [0], sp [1]); \
345 /* Have to insert a widening op */ \
346 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
347 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
350 mono_decompose_opcode ((cfg), (ins)); \
353 #define ADD_UNOP(op) do { \
354 MONO_INST_NEW (cfg, ins, (op)); \
356 ins->sreg1 = sp [0]->dreg; \
357 type_from_op (ins, sp [0], NULL); \
359 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
360 MONO_ADD_INS ((cfg)->cbb, (ins)); \
362 mono_decompose_opcode (cfg, ins); \
365 #define ADD_BINCOND(next_block) do { \
368 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
369 cmp->sreg1 = sp [0]->dreg; \
370 cmp->sreg2 = sp [1]->dreg; \
371 type_from_op (cmp, sp [0], sp [1]); \
373 type_from_op (ins, sp [0], sp [1]); \
374 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
375 GET_BBLOCK (cfg, tblock, target); \
376 link_bblock (cfg, bblock, tblock); \
377 ins->inst_true_bb = tblock; \
378 if ((next_block)) { \
379 link_bblock (cfg, bblock, (next_block)); \
380 ins->inst_false_bb = (next_block); \
381 start_new_bblock = 1; \
383 GET_BBLOCK (cfg, tblock, ip); \
384 link_bblock (cfg, bblock, tblock); \
385 ins->inst_false_bb = tblock; \
386 start_new_bblock = 2; \
388 if (sp != stack_start) { \
389 handle_stack_args (cfg, stack_start, sp - stack_start); \
390 CHECK_UNVERIFIABLE (cfg); \
392 MONO_ADD_INS (bblock, cmp); \
393 MONO_ADD_INS (bblock, ins); \
397 * link_bblock: Links two basic blocks
399 * links two basic blocks in the control flow graph, the 'from'
400 * argument is the starting block and the 'to' argument is the block
401 * the control flow ends to after 'from'.
404 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
406 MonoBasicBlock **newa;
410 if (from->cil_code) {
412 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
414 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
417 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
419 printf ("edge from entry to exit\n");
424 for (i = 0; i < from->out_count; ++i) {
425 if (to == from->out_bb [i]) {
431 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
432 for (i = 0; i < from->out_count; ++i) {
433 newa [i] = from->out_bb [i];
441 for (i = 0; i < to->in_count; ++i) {
442 if (from == to->in_bb [i]) {
448 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
449 for (i = 0; i < to->in_count; ++i) {
450 newa [i] = to->in_bb [i];
459 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
461 link_bblock (cfg, from, to);
465 * mono_find_block_region:
467 * We mark each basic block with a region ID. We use that to avoid BB
468 * optimizations when blocks are in different regions.
471 * A region token that encodes where this region is, and information
472 * about the clause owner for this block.
474 * The region encodes the try/catch/filter clause that owns this block
475 * as well as the type. -1 is a special value that represents a block
476 * that is in none of try/catch/filter.
479 mono_find_block_region (MonoCompile *cfg, int offset)
481 MonoMethod *method = cfg->method;
482 MonoMethodHeader *header = mono_method_get_header (method);
483 MonoExceptionClause *clause;
486 /* first search for handlers and filters */
487 for (i = 0; i < header->num_clauses; ++i) {
488 clause = &header->clauses [i];
489 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
490 (offset < (clause->handler_offset)))
491 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
493 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
494 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
495 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
496 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
497 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
499 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
503 /* search the try blocks */
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
507 return ((i + 1) << 8) | clause->flags;
514 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
516 MonoMethod *method = cfg->method;
517 MonoMethodHeader *header = mono_method_get_header (method);
518 MonoExceptionClause *clause;
519 MonoBasicBlock *handler;
523 for (i = 0; i < header->num_clauses; ++i) {
524 clause = &header->clauses [i];
525 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
526 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
527 if (clause->flags == type) {
528 handler = cfg->cil_offset_to_bb [clause->handler_offset];
530 res = g_list_append (res, handler);
538 mono_create_spvar_for_region (MonoCompile *cfg, int region)
542 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
546 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
547 /* prevent it from being register allocated */
548 var->flags |= MONO_INST_INDIRECT;
550 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
554 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
556 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
560 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
564 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
568 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
569 /* prevent it from being register allocated */
570 var->flags |= MONO_INST_INDIRECT;
572 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
578 * Returns the type used in the eval stack when @type is loaded.
579 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
582 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
587 inst->type = STACK_MP;
588 inst->klass = mono_defaults.object_class;
592 inst->klass = klass = mono_class_from_mono_type (type);
595 switch (type->type) {
597 inst->type = STACK_INV;
601 case MONO_TYPE_BOOLEAN:
607 inst->type = STACK_I4;
612 case MONO_TYPE_FNPTR:
613 inst->type = STACK_PTR;
615 case MONO_TYPE_CLASS:
616 case MONO_TYPE_STRING:
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_SZARRAY:
619 case MONO_TYPE_ARRAY:
620 inst->type = STACK_OBJ;
624 inst->type = STACK_I8;
628 inst->type = STACK_R8;
630 case MONO_TYPE_VALUETYPE:
631 if (type->data.klass->enumtype) {
632 type = type->data.klass->enum_basetype;
636 inst->type = STACK_VTYPE;
639 case MONO_TYPE_TYPEDBYREF:
640 inst->klass = mono_defaults.typed_reference_class;
641 inst->type = STACK_VTYPE;
643 case MONO_TYPE_GENERICINST:
644 type = &type->data.generic_class->container_class->byval_arg;
647 case MONO_TYPE_MVAR :
648 /* FIXME: all the arguments must be references for now,
649 * later look inside cfg and see if the arg num is
652 g_assert (cfg->generic_sharing_context);
653 inst->type = STACK_OBJ;
656 g_error ("unknown type 0x%02x in eval stack type", type->type);
661 * The following tables are used to quickly validate the IL code in type_from_op ().
664 bin_num_table [STACK_MAX] [STACK_MAX] = {
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
677 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
680 /* reduce the size of this table */
682 bin_int_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 bin_comp_table [STACK_MAX] [STACK_MAX] = {
695 /* Inv i L p F & O vt */
697 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
698 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
699 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
700 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
701 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
702 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
703 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
706 /* reduce the size of this table */
708 shift_table [STACK_MAX] [STACK_MAX] = {
709 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
710 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
711 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
712 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
720 * Tables to map from the non-specific opcode to the matching
721 * type-specific opcode.
723 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
725 binops_op_map [STACK_MAX] = {
726 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
729 /* handles from CEE_NEG to CEE_CONV_U8 */
731 unops_op_map [STACK_MAX] = {
732 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
735 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
737 ovfops_op_map [STACK_MAX] = {
738 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
741 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
743 ovf2ops_op_map [STACK_MAX] = {
744 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
747 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
749 ovf3ops_op_map [STACK_MAX] = {
750 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
753 /* handles from CEE_BEQ to CEE_BLT_UN */
755 beqops_op_map [STACK_MAX] = {
756 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
759 /* handles from CEE_CEQ to CEE_CLT_UN */
761 ceqops_op_map [STACK_MAX] = {
762 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
766 * Sets ins->type (the type on the eval stack) according to the
767 * type of the opcode and the arguments to it.
768 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
770 * FIXME: this function sets ins->type unconditionally in some cases, but
771 * it should set it to invalid for some types (a conv.x on an object)
774 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
776 switch (ins->opcode) {
783 /* FIXME: check unverifiable args for STACK_MP */
784 ins->type = bin_num_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
792 ins->type = bin_int_table [src1->type] [src2->type];
793 ins->opcode += binops_op_map [ins->type];
798 ins->type = shift_table [src1->type] [src2->type];
799 ins->opcode += binops_op_map [ins->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
806 ins->opcode = OP_LCOMPARE;
807 else if (src1->type == STACK_R8)
808 ins->opcode = OP_FCOMPARE;
810 ins->opcode = OP_ICOMPARE;
812 case OP_ICOMPARE_IMM:
813 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
814 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
815 ins->opcode = OP_LCOMPARE_IMM;
827 ins->opcode += beqops_op_map [src1->type];
830 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
831 ins->opcode += ceqops_op_map [src1->type];
837 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
838 ins->opcode += ceqops_op_map [src1->type];
842 ins->type = neg_table [src1->type];
843 ins->opcode += unops_op_map [ins->type];
846 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
847 ins->type = src1->type;
849 ins->type = STACK_INV;
850 ins->opcode += unops_op_map [ins->type];
856 ins->type = STACK_I4;
857 ins->opcode += unops_op_map [src1->type];
860 ins->type = STACK_R8;
861 switch (src1->type) {
864 ins->opcode = OP_ICONV_TO_R_UN;
867 ins->opcode = OP_LCONV_TO_R_UN;
871 case CEE_CONV_OVF_I1:
872 case CEE_CONV_OVF_U1:
873 case CEE_CONV_OVF_I2:
874 case CEE_CONV_OVF_U2:
875 case CEE_CONV_OVF_I4:
876 case CEE_CONV_OVF_U4:
877 ins->type = STACK_I4;
878 ins->opcode += ovf3ops_op_map [src1->type];
880 case CEE_CONV_OVF_I_UN:
881 case CEE_CONV_OVF_U_UN:
882 ins->type = STACK_PTR;
883 ins->opcode += ovf2ops_op_map [src1->type];
885 case CEE_CONV_OVF_I1_UN:
886 case CEE_CONV_OVF_I2_UN:
887 case CEE_CONV_OVF_I4_UN:
888 case CEE_CONV_OVF_U1_UN:
889 case CEE_CONV_OVF_U2_UN:
890 case CEE_CONV_OVF_U4_UN:
891 ins->type = STACK_I4;
892 ins->opcode += ovf2ops_op_map [src1->type];
895 ins->type = STACK_PTR;
896 switch (src1->type) {
898 ins->opcode = OP_MOVE;
902 #if SIZEOF_VOID_P == 8
903 ins->opcode = OP_LCONV_TO_U;
905 ins->opcode = OP_MOVE;
909 ins->opcode = OP_LCONV_TO_U;
912 ins->opcode = OP_FCONV_TO_U;
918 ins->type = STACK_I8;
919 ins->opcode += unops_op_map [src1->type];
921 case CEE_CONV_OVF_I8:
922 case CEE_CONV_OVF_U8:
923 ins->type = STACK_I8;
924 ins->opcode += ovf3ops_op_map [src1->type];
926 case CEE_CONV_OVF_U8_UN:
927 case CEE_CONV_OVF_I8_UN:
928 ins->type = STACK_I8;
929 ins->opcode += ovf2ops_op_map [src1->type];
933 ins->type = STACK_R8;
934 ins->opcode += unops_op_map [src1->type];
937 ins->type = STACK_R8;
941 ins->type = STACK_I4;
942 ins->opcode += ovfops_op_map [src1->type];
947 ins->type = STACK_PTR;
948 ins->opcode += ovfops_op_map [src1->type];
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += ovfops_op_map [src1->type];
958 if (ins->type == STACK_R8)
959 ins->type = STACK_INV;
961 case OP_LOAD_MEMBASE:
962 ins->type = STACK_PTR;
964 case OP_LOADI1_MEMBASE:
965 case OP_LOADU1_MEMBASE:
966 case OP_LOADI2_MEMBASE:
967 case OP_LOADU2_MEMBASE:
968 case OP_LOADI4_MEMBASE:
969 case OP_LOADU4_MEMBASE:
970 ins->type = STACK_PTR;
972 case OP_LOADI8_MEMBASE:
973 ins->type = STACK_I8;
975 case OP_LOADR4_MEMBASE:
976 case OP_LOADR8_MEMBASE:
977 ins->type = STACK_R8;
980 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
984 if (ins->type == STACK_MP)
985 ins->klass = mono_defaults.object_class;
990 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
996 param_table [STACK_MAX] [STACK_MAX] = {
1001 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1005 switch (args->type) {
1015 for (i = 0; i < sig->param_count; ++i) {
1016 switch (args [i].type) {
1020 if (!sig->params [i]->byref)
1024 if (sig->params [i]->byref)
1026 switch (sig->params [i]->type) {
1027 case MONO_TYPE_CLASS:
1028 case MONO_TYPE_STRING:
1029 case MONO_TYPE_OBJECT:
1030 case MONO_TYPE_SZARRAY:
1031 case MONO_TYPE_ARRAY:
1038 if (sig->params [i]->byref)
1040 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1049 /*if (!param_table [args [i].type] [sig->params [i]->type])
1057 * When we need a pointer to the current domain many times in a method, we
1058 * call mono_domain_get() once and we store the result in a local variable.
1059 * This function returns the variable that represents the MonoDomain*.
1061 inline static MonoInst *
1062 mono_get_domainvar (MonoCompile *cfg)
1064 if (!cfg->domainvar)
1065 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1066 return cfg->domainvar;
1070 * The got_var contains the address of the Global Offset Table when AOT
1073 inline static MonoInst *
1074 mono_get_got_var (MonoCompile *cfg)
1076 #ifdef MONO_ARCH_NEED_GOT_VAR
1077 if (!cfg->compile_aot)
1079 if (!cfg->got_var) {
1080 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1082 return cfg->got_var;
1089 mono_get_vtable_var (MonoCompile *cfg)
1091 g_assert (cfg->generic_sharing_context);
1093 if (!cfg->rgctx_var) {
1094 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1095 /* force the var to be stack allocated */
1096 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1099 return cfg->rgctx_var;
1103 type_from_stack_type (MonoInst *ins) {
1104 switch (ins->type) {
1105 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1106 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1107 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1108 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1110 return &ins->klass->this_arg;
1111 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1112 case STACK_VTYPE: return &ins->klass->byval_arg;
1114 g_error ("stack type %d to montype not handled\n", ins->type);
1119 static G_GNUC_UNUSED int
1120 type_to_stack_type (MonoType *t)
1122 switch (mono_type_get_underlying_type (t)->type) {
1125 case MONO_TYPE_BOOLEAN:
1128 case MONO_TYPE_CHAR:
1135 case MONO_TYPE_FNPTR:
1137 case MONO_TYPE_CLASS:
1138 case MONO_TYPE_STRING:
1139 case MONO_TYPE_OBJECT:
1140 case MONO_TYPE_SZARRAY:
1141 case MONO_TYPE_ARRAY:
1149 case MONO_TYPE_VALUETYPE:
1150 case MONO_TYPE_TYPEDBYREF:
1152 case MONO_TYPE_GENERICINST:
1153 if (mono_type_generic_inst_is_valuetype (t))
1159 g_assert_not_reached ();
1166 array_access_to_klass (int opcode)
1170 return mono_defaults.byte_class;
1172 return mono_defaults.uint16_class;
1175 return mono_defaults.int_class;
1178 return mono_defaults.sbyte_class;
1181 return mono_defaults.int16_class;
1184 return mono_defaults.int32_class;
1186 return mono_defaults.uint32_class;
1189 return mono_defaults.int64_class;
1192 return mono_defaults.single_class;
1195 return mono_defaults.double_class;
1196 case CEE_LDELEM_REF:
1197 case CEE_STELEM_REF:
1198 return mono_defaults.object_class;
1200 g_assert_not_reached ();
1206 * We try to share variables when possible
1209 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1214 /* inlining can result in deeper stacks */
1215 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1216 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1218 pos = ins->type - 1 + slot * STACK_MAX;
1220 switch (ins->type) {
1227 if ((vnum = cfg->intvars [pos]))
1228 return cfg->varinfo [vnum];
1229 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1230 cfg->intvars [pos] = res->inst_c0;
1233 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1239 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1241 if (cfg->compile_aot) {
1242 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1243 jump_info_token->image = image;
1244 jump_info_token->token = token;
1245 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1250 * This function is called to handle items that are left on the evaluation stack
1251 * at basic block boundaries. What happens is that we save the values to local variables
1252 * and we reload them later when first entering the target basic block (with the
1253 * handle_loaded_temps () function).
1254 * A single joint point will use the same variables (stored in the array bb->out_stack or
1255 * bb->in_stack, if the basic block is before or after the joint point).
1257 * This function needs to be called _before_ emitting the last instruction of
1258 * the bb (i.e. before emitting a branch).
1259 * If the stack merge fails at a join point, cfg->unverifiable is set.
1262 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1265 MonoBasicBlock *bb = cfg->cbb;
1266 MonoBasicBlock *outb;
1267 MonoInst *inst, **locals;
1272 if (cfg->verbose_level > 3)
1273 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1274 if (!bb->out_scount) {
1275 bb->out_scount = count;
1276 //printf ("bblock %d has out:", bb->block_num);
1278 for (i = 0; i < bb->out_count; ++i) {
1279 outb = bb->out_bb [i];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb->flags & BB_EXCEPTION_HANDLER)
1283 //printf (" %d", outb->block_num);
1284 if (outb->in_stack) {
1286 bb->out_stack = outb->in_stack;
1292 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1293 for (i = 0; i < count; ++i) {
1295 * try to reuse temps already allocated for this purpouse, if they occupy the same
1296 * stack slot and if they are of the same type.
1297 * This won't cause conflicts since if 'local' is used to
1298 * store one of the values in the in_stack of a bblock, then
1299 * the same variable will be used for the same outgoing stack
1301 * This doesn't work when inlining methods, since the bblocks
1302 * in the inlined methods do not inherit their in_stack from
1303 * the bblock they are inlined to. See bug #58863 for an
1306 if (cfg->inlined_method)
1307 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1309 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 if (outb->in_scount) {
1320 if (outb->in_scount != bb->out_scount) {
1321 cfg->unverifiable = TRUE;
1324 continue; /* check they are the same locals */
1326 outb->in_scount = count;
1327 outb->in_stack = bb->out_stack;
1330 locals = bb->out_stack;
1332 for (i = 0; i < count; ++i) {
1333 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1334 inst->cil_code = sp [i]->cil_code;
1335 sp [i] = locals [i];
1336 if (cfg->verbose_level > 3)
1337 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1341 * It is possible that the out bblocks already have in_stack assigned, and
1342 * the in_stacks differ. In this case, we will store to all the different
1349 /* Find a bblock which has a different in_stack */
1351 while (bindex < bb->out_count) {
1352 outb = bb->out_bb [bindex];
1353 /* exception handlers are linked, but they should not be considered for stack args */
1354 if (outb->flags & BB_EXCEPTION_HANDLER) {
1358 if (outb->in_stack != locals) {
1359 for (i = 0; i < count; ++i) {
1360 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1361 inst->cil_code = sp [i]->cil_code;
1362 sp [i] = locals [i];
1363 if (cfg->verbose_level > 3)
1364 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1366 locals = outb->in_stack;
1375 /* Emit code which loads interface_offsets [klass->interface_id]
1376 * The array is stored in memory before vtable.
1379 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1381 if (cfg->compile_aot) {
1382 int ioffset_reg = alloc_preg (cfg);
1383 int iid_reg = alloc_preg (cfg);
1385 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1386 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1395 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1396 * stored in "klass_reg" implements the interface "klass".
1399 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1401 int ibitmap_reg = alloc_preg (cfg);
1402 int ibitmap_byte_reg = alloc_preg (cfg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1406 if (cfg->compile_aot) {
1407 int iid_reg = alloc_preg (cfg);
1408 int shifted_iid_reg = alloc_preg (cfg);
1409 int ibitmap_byte_address_reg = alloc_preg (cfg);
1410 int masked_iid_reg = alloc_preg (cfg);
1411 int iid_one_bit_reg = alloc_preg (cfg);
1412 int iid_bit_reg = alloc_preg (cfg);
1413 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1415 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1416 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1418 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1419 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1420 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1423 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 int ibitmap_reg = alloc_preg (cfg);
1435 int ibitmap_byte_reg = alloc_preg (cfg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1439 if (cfg->compile_aot) {
1440 int iid_reg = alloc_preg (cfg);
1441 int shifted_iid_reg = alloc_preg (cfg);
1442 int ibitmap_byte_address_reg = alloc_preg (cfg);
1443 int masked_iid_reg = alloc_preg (cfg);
1444 int iid_one_bit_reg = alloc_preg (cfg);
1445 int iid_bit_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1451 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1452 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1461 * Emit code which checks whenever the interface id of @klass is smaller than
1462 * than the value given by max_iid_reg.
1465 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 if (cfg->compile_aot) {
1469 int iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1471 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1478 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1481 /* Same as above, but obtains max_iid from a vtable */
1483 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1484 MonoBasicBlock *false_target)
1486 int max_iid_reg = alloc_preg (cfg);
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1489 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1492 /* Same as above, but obtains max_iid from a klass */
1494 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1495 MonoBasicBlock *false_target)
1497 int max_iid_reg = alloc_preg (cfg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1500 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1504 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1506 int idepth_reg = alloc_preg (cfg);
1507 int stypes_reg = alloc_preg (cfg);
1508 int stype = alloc_preg (cfg);
1510 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1517 if (cfg->compile_aot) {
1518 int const_reg = alloc_preg (cfg);
1519 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1520 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1528 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1530 int intf_reg = alloc_preg (cfg);
1532 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1533 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1538 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1542 * Variant of the above that takes a register to the class, not the vtable.
1545 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1547 int intf_bit_reg = alloc_preg (cfg);
1549 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1550 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1555 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 if (cfg->compile_aot) {
1562 int const_reg = alloc_preg (cfg);
1563 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1564 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1568 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1572 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1574 if (cfg->compile_aot) {
1575 int const_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1585 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1588 int rank_reg = alloc_preg (cfg);
1589 int eclass_reg = alloc_preg (cfg);
1591 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1593 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1594 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1596 if (klass->cast_class == mono_defaults.object_class) {
1597 int parent_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1599 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1602 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class) {
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1607 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1609 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1612 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1613 /* Check that the object is a vector too */
1614 int bounds_reg = alloc_preg (cfg);
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1617 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1620 int idepth_reg = alloc_preg (cfg);
1621 int stypes_reg = alloc_preg (cfg);
1622 int stype = alloc_preg (cfg);
1624 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1625 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1627 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1631 mini_emit_class_check (cfg, stype, klass);
1636 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1640 g_assert (val == 0);
1642 if ((size <= 4) && (size <= align)) {
1645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1648 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1653 #if SIZEOF_VOID_P == 8
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1661 val_reg = alloc_preg (cfg);
1663 if (sizeof (gpointer) == 8)
1664 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1666 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1669 /* This could be optimized further if neccesary */
1671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1678 #if !NO_UNALIGNED_ACCESS
1679 if (sizeof (gpointer) == 8) {
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1710 #endif /* DISABLE_JIT */
1713 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1718 /* This could be optimized further if neccesary */
1720 cur_reg = alloc_preg (cfg);
1721 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1729 #if !NO_UNALIGNED_ACCESS
1730 if (sizeof (gpointer) == 8) {
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1751 cur_reg = alloc_preg (cfg);
1752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1759 cur_reg = alloc_preg (cfg);
1760 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1761 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1771 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1773 int vtable_reg = alloc_preg (cfg);
1775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1777 if (cfg->opt & MONO_OPT_SHARED) {
1778 int class_reg = alloc_preg (cfg);
1779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1780 if (cfg->compile_aot) {
1781 int klass_reg = alloc_preg (cfg);
1782 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1788 if (cfg->compile_aot) {
1789 int vt_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1791 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1793 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1797 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1801 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1804 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 type = mini_get_basic_type_from_generic (gsctx, type);
1808 switch (type->type) {
1809 case MONO_TYPE_VOID:
1810 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1813 case MONO_TYPE_BOOLEAN:
1816 case MONO_TYPE_CHAR:
1819 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1823 case MONO_TYPE_FNPTR:
1824 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1825 case MONO_TYPE_CLASS:
1826 case MONO_TYPE_STRING:
1827 case MONO_TYPE_OBJECT:
1828 case MONO_TYPE_SZARRAY:
1829 case MONO_TYPE_ARRAY:
1830 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1833 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1836 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1837 case MONO_TYPE_VALUETYPE:
1838 if (type->data.klass->enumtype) {
1839 type = type->data.klass->enum_basetype;
1842 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1843 case MONO_TYPE_TYPEDBYREF:
1844 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1845 case MONO_TYPE_GENERICINST:
1846 type = &type->data.generic_class->container_class->byval_arg;
1849 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1855 * target_type_is_incompatible:
1856 * @cfg: MonoCompile context
1858 * Check that the item @arg on the evaluation stack can be stored
1859 * in the target type (can be a local, or field, etc).
1860 * The cfg arg can be used to check if we need verification or just
1863 * Returns: non-0 value if arg can't be stored on a target.
1866 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1868 MonoType *simple_type;
1871 if (target->byref) {
1872 /* FIXME: check that the pointed to types match */
1873 if (arg->type == STACK_MP)
1874 return arg->klass != mono_class_from_mono_type (target);
1875 if (arg->type == STACK_PTR)
1880 simple_type = mono_type_get_underlying_type (target);
1881 switch (simple_type->type) {
1882 case MONO_TYPE_VOID:
1886 case MONO_TYPE_BOOLEAN:
1889 case MONO_TYPE_CHAR:
1892 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1896 /* STACK_MP is needed when setting pinned locals */
1897 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1902 case MONO_TYPE_FNPTR:
1903 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1906 case MONO_TYPE_CLASS:
1907 case MONO_TYPE_STRING:
1908 case MONO_TYPE_OBJECT:
1909 case MONO_TYPE_SZARRAY:
1910 case MONO_TYPE_ARRAY:
1911 if (arg->type != STACK_OBJ)
1913 /* FIXME: check type compatibility */
1917 if (arg->type != STACK_I8)
1922 if (arg->type != STACK_R8)
1925 case MONO_TYPE_VALUETYPE:
1926 if (arg->type != STACK_VTYPE)
1928 klass = mono_class_from_mono_type (simple_type);
1929 if (klass != arg->klass)
1932 case MONO_TYPE_TYPEDBYREF:
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 if (klass != arg->klass)
1939 case MONO_TYPE_GENERICINST:
1940 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1941 if (arg->type != STACK_VTYPE)
1943 klass = mono_class_from_mono_type (simple_type);
1944 if (klass != arg->klass)
1948 if (arg->type != STACK_OBJ)
1950 /* FIXME: check type compatibility */
1954 case MONO_TYPE_MVAR:
1955 /* FIXME: all the arguments must be references for now,
1956 * later look inside cfg and see if the arg num is
1957 * really a reference
1959 g_assert (cfg->generic_sharing_context);
1960 if (arg->type != STACK_OBJ)
1964 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1970 * Prepare arguments for passing to a function call.
1971 * Return a non-zero value if the arguments can't be passed to the given
1973 * The type checks are not yet complete and some conversions may need
1974 * casts on 32 or 64 bit architectures.
1976 * FIXME: implement this using target_type_is_incompatible ()
1979 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1981 MonoType *simple_type;
1985 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1989 for (i = 0; i < sig->param_count; ++i) {
1990 if (sig->params [i]->byref) {
1991 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1995 simple_type = sig->params [i];
1996 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1998 switch (simple_type->type) {
1999 case MONO_TYPE_VOID:
2004 case MONO_TYPE_BOOLEAN:
2007 case MONO_TYPE_CHAR:
2010 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2016 case MONO_TYPE_FNPTR:
2017 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2020 case MONO_TYPE_CLASS:
2021 case MONO_TYPE_STRING:
2022 case MONO_TYPE_OBJECT:
2023 case MONO_TYPE_SZARRAY:
2024 case MONO_TYPE_ARRAY:
2025 if (args [i]->type != STACK_OBJ)
2030 if (args [i]->type != STACK_I8)
2035 if (args [i]->type != STACK_R8)
2038 case MONO_TYPE_VALUETYPE:
2039 if (simple_type->data.klass->enumtype) {
2040 simple_type = simple_type->data.klass->enum_basetype;
2043 if (args [i]->type != STACK_VTYPE)
2046 case MONO_TYPE_TYPEDBYREF:
2047 if (args [i]->type != STACK_VTYPE)
2050 case MONO_TYPE_GENERICINST:
2051 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2068 case OP_VOIDCALLVIRT:
2077 g_assert_not_reached ();
2084 callvirt_to_call_membase (int opcode)
2088 return OP_CALL_MEMBASE;
2089 case OP_VOIDCALLVIRT:
2090 return OP_VOIDCALL_MEMBASE;
2092 return OP_FCALL_MEMBASE;
2094 return OP_LCALL_MEMBASE;
2096 return OP_VCALL_MEMBASE;
2098 g_assert_not_reached ();
2104 #ifdef MONO_ARCH_HAVE_IMT
2106 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2108 #ifdef MONO_ARCH_IMT_REG
2109 int method_reg = alloc_preg (cfg);
2112 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2113 } else if (cfg->compile_aot) {
2114 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2117 MONO_INST_NEW (cfg, ins, OP_PCONST);
2118 ins->inst_p0 = call->method;
2119 ins->dreg = method_reg;
2120 MONO_ADD_INS (cfg->cbb, ins);
2123 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2125 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2130 static MonoJumpInfo *
2131 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2133 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2137 ji->data.target = target;
2142 inline static MonoInst*
2143 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2145 inline static MonoCallInst *
2146 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2147 MonoInst **args, int calli, int virtual)
2150 #ifdef MONO_ARCH_SOFT_FLOAT
2154 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2157 call->signature = sig;
2159 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2161 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2162 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2165 temp->backend.is_pinvoke = sig->pinvoke;
2168 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2169 * address of return value to increase optimization opportunities.
2170 * Before vtype decomposition, the dreg of the call ins itself represents the
2171 * fact the call modifies the return value. After decomposition, the call will
2172 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2173 * will be transformed into an LDADDR.
2175 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2176 loada->dreg = alloc_preg (cfg);
2177 loada->inst_p0 = temp;
2178 /* We reference the call too since call->dreg could change during optimization */
2179 loada->inst_p1 = call;
2180 MONO_ADD_INS (cfg->cbb, loada);
2182 call->inst.dreg = temp->dreg;
2184 call->vret_var = loada;
2185 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2186 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2188 #ifdef MONO_ARCH_SOFT_FLOAT
2190 * If the call has a float argument, we would need to do an r8->r4 conversion using
2191 * an icall, but that cannot be done during the call sequence since it would clobber
2192 * the call registers + the stack. So we do it before emitting the call.
2194 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2196 MonoInst *in = call->args [i];
2198 if (i >= sig->hasthis)
2199 t = sig->params [i - sig->hasthis];
2201 t = &mono_defaults.int_class->byval_arg;
2202 t = mono_type_get_underlying_type (t);
2204 if (!t->byref && t->type == MONO_TYPE_R4) {
2205 MonoInst *iargs [1];
2209 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2211 /* The result will be in an int vreg */
2212 call->args [i] = conv;
2217 mono_arch_emit_call (cfg, call);
2219 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2220 cfg->flags |= MONO_CFG_HAS_CALLS;
2225 inline static MonoInst*
2226 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2228 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2230 call->inst.sreg1 = addr->dreg;
2232 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2234 return (MonoInst*)call;
2237 inline static MonoInst*
2238 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2240 #ifdef MONO_ARCH_RGCTX_REG
2242 int rgctx_reg = mono_alloc_preg (cfg);
2244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2245 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 return (MonoInst*)call;
2250 g_assert_not_reached ();
2256 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2257 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2259 gboolean virtual = this != NULL;
2260 gboolean enable_for_aot = TRUE;
2263 if (method->string_ctor) {
2264 /* Create the real signature */
2265 /* FIXME: Cache these */
2266 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2267 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2272 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2274 if (this && sig->hasthis &&
2275 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2276 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2277 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2279 call->method = method;
2281 call->inst.flags |= MONO_INST_HAS_METHOD;
2282 call->inst.inst_left = this;
2285 int vtable_reg, slot_reg, this_reg;
2287 this_reg = this->dreg;
2289 if ((!cfg->compile_aot || enable_for_aot) &&
2290 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2291 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2292 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2294 * the method is not virtual, we just need to ensure this is not null
2295 * and then we can call the method directly.
2297 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2298 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2301 if (!method->string_ctor) {
2302 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2303 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2304 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2307 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2309 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2311 return (MonoInst*)call;
2314 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2315 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2316 /* Make a call to delegate->invoke_impl */
2317 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2318 call->inst.inst_basereg = this_reg;
2319 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2320 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2322 return (MonoInst*)call;
2326 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2327 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2328 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2330 * the method is virtual, but we can statically dispatch since either
2331 * it's class or the method itself are sealed.
2332 * But first we need to ensure it's not a null reference.
2334 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2335 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2336 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2338 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2339 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2341 return (MonoInst*)call;
2344 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2346 /* Initialize method->slot */
2347 mono_class_setup_vtable (method->klass);
2349 vtable_reg = alloc_preg (cfg);
2350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2351 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2353 #ifdef MONO_ARCH_HAVE_IMT
2355 guint32 imt_slot = mono_method_get_imt_slot (method);
2356 emit_imt_argument (cfg, call, imt_arg);
2357 slot_reg = vtable_reg;
2358 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2361 if (slot_reg == -1) {
2362 slot_reg = alloc_preg (cfg);
2363 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2364 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2367 slot_reg = vtable_reg;
2368 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2371 call->inst.sreg1 = slot_reg;
2372 call->virtual = TRUE;
2375 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 return (MonoInst*)call;
2380 static inline MonoInst*
2381 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2383 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2387 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2394 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2397 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2399 return (MonoInst*)call;
2402 inline static MonoInst*
2403 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2405 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2409 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2413 * mono_emit_abs_call:
2415 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2417 inline static MonoInst*
2418 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2419 MonoMethodSignature *sig, MonoInst **args)
2421 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2425 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2428 if (cfg->abs_patches == NULL)
2429 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2430 g_hash_table_insert (cfg->abs_patches, ji, ji);
2431 ins = mono_emit_native_call (cfg, ji, sig, args);
2432 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2437 get_memcpy_method (void)
2439 static MonoMethod *memcpy_method = NULL;
2440 if (!memcpy_method) {
2441 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2443 g_error ("Old corlib found. Install a new one");
2445 return memcpy_method;
2449 * Emit code to copy a valuetype of type @klass whose address is stored in
2450 * @src->dreg to memory whose address is stored at @dest->dreg.
2453 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2455 MonoInst *iargs [3];
2458 MonoMethod *memcpy_method;
2462 * This check breaks with spilled vars... need to handle it during verification anyway.
2463 * g_assert (klass && klass == src->klass && klass == dest->klass);
2467 n = mono_class_native_size (klass, &align);
2469 n = mono_class_value_size (klass, &align);
2471 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2472 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2473 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2477 EMIT_NEW_ICONST (cfg, iargs [2], n);
2479 memcpy_method = get_memcpy_method ();
2480 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2485 get_memset_method (void)
2487 static MonoMethod *memset_method = NULL;
2488 if (!memset_method) {
2489 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2491 g_error ("Old corlib found. Install a new one");
2493 return memset_method;
2497 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2499 MonoInst *iargs [3];
2502 MonoMethod *memset_method;
2504 /* FIXME: Optimize this for the case when dest is an LDADDR */
2506 mono_class_init (klass);
2507 n = mono_class_value_size (klass, &align);
2509 if (n <= sizeof (gpointer) * 5) {
2510 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2513 memset_method = get_memset_method ();
2515 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2516 EMIT_NEW_ICONST (cfg, iargs [2], n);
2517 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2522 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2524 MonoInst *this = NULL;
2526 g_assert (!method->klass->valuetype);
2528 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2529 EMIT_NEW_ARGLOAD (cfg, this, 0);
2531 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2532 MonoInst *mrgctx_loc, *mrgctx_var;
2535 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2537 mrgctx_loc = mono_get_vtable_var (cfg);
2538 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2541 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2542 MonoInst *vtable_loc, *vtable_var;
2546 vtable_loc = mono_get_vtable_var (cfg);
2547 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2549 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2550 MonoInst *mrgctx_var = vtable_var;
2553 vtable_reg = alloc_preg (cfg);
2554 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2555 vtable_var->type = STACK_PTR;
2561 int vtable_reg, res_reg;
2563 vtable_reg = alloc_preg (cfg);
2564 res_reg = alloc_preg (cfg);
2565 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2570 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2571 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2572 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2575 static MonoJumpInfoRgctxEntry *
2576 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2578 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2579 res->method = method;
2580 res->in_mrgctx = in_mrgctx;
2581 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2582 res->data->type = patch_type;
2583 res->data->data.target = patch_data;
2584 res->info_type = info_type;
2589 static inline MonoInst*
2590 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2592 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2596 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2597 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2599 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2601 return emit_rgctx_fetch (cfg, rgctx, entry);
2605 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2606 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2608 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2610 return emit_rgctx_fetch (cfg, rgctx, entry);
2614 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2615 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2617 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2619 return emit_rgctx_fetch (cfg, rgctx, entry);
2623 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2627 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2629 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2632 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2633 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2635 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2637 return mono_emit_method_call (cfg, method, &val, NULL);
2642 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2646 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2647 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2648 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2649 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2651 obj_reg = sp [0]->dreg;
2652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2655 /* FIXME: generics */
2656 g_assert (klass->rank == 0);
2659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2660 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2666 MonoInst *element_class;
2668 /* This assertion is from the unboxcast insn */
2669 g_assert (klass->rank == 0);
2671 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2672 klass->element_class, MONO_RGCTX_INFO_KLASS);
2674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2675 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2677 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2680 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2681 MONO_ADD_INS (cfg->cbb, add);
2682 add->type = STACK_MP;
2689 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2691 MonoInst *iargs [2];
2694 if (cfg->opt & MONO_OPT_SHARED) {
2695 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2696 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2698 alloc_ftn = mono_object_new;
2699 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2700 /* This happens often in argument checking code, eg. throw new FooException... */
2701 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2702 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2703 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2705 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2706 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2709 if (managed_alloc) {
2710 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2711 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2713 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2715 guint32 lw = vtable->klass->instance_size;
2716 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2717 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2718 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2721 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2725 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2729 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2732 MonoInst *iargs [2];
2733 MonoMethod *managed_alloc = NULL;
2737 FIXME: we cannot get managed_alloc here because we can't get
2738 the class's vtable (because it's not a closed class)
2740 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2741 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2744 if (cfg->opt & MONO_OPT_SHARED) {
2745 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2746 iargs [1] = data_inst;
2747 alloc_ftn = mono_object_new;
2749 if (managed_alloc) {
2750 iargs [0] = data_inst;
2751 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2754 iargs [0] = data_inst;
2755 alloc_ftn = mono_object_new_specific;
2758 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2762 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2764 MonoInst *alloc, *ins;
2766 if (mono_class_is_nullable (klass)) {
2767 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2768 return mono_emit_method_call (cfg, method, &val, NULL);
2771 alloc = handle_alloc (cfg, klass, TRUE);
2773 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2779 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *rgctx, MonoInst *data_inst)
2781 MonoInst *alloc, *ins;
2783 if (mono_class_is_nullable (klass)) {
2784 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2785 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2786 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2788 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2790 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2792 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2799 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2801 MonoBasicBlock *is_null_bb;
2802 int obj_reg = src->dreg;
2803 int vtable_reg = alloc_preg (cfg);
2805 NEW_BBLOCK (cfg, is_null_bb);
2807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2810 if (mini_get_debug_options ()->better_cast_details) {
2811 int to_klass_reg = alloc_preg (cfg);
2812 int klass_reg = alloc_preg (cfg);
2813 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2816 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2820 MONO_ADD_INS (cfg->cbb, tls_get);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2825 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2826 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2829 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2831 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2833 int klass_reg = alloc_preg (cfg);
2835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2837 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2838 /* the remoting code is broken, access the class for now */
2840 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2843 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2846 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2848 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2849 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2853 MONO_START_BB (cfg, is_null_bb);
2855 /* Reset the variables holding the cast details */
2856 if (mini_get_debug_options ()->better_cast_details) {
2857 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2859 MONO_ADD_INS (cfg->cbb, tls_get);
2860 /* It is enough to reset the from field */
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2868 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2871 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2872 int obj_reg = src->dreg;
2873 int vtable_reg = alloc_preg (cfg);
2874 int res_reg = alloc_preg (cfg);
2876 NEW_BBLOCK (cfg, is_null_bb);
2877 NEW_BBLOCK (cfg, false_bb);
2878 NEW_BBLOCK (cfg, end_bb);
2880 /* Do the assignment at the beginning, so the other assignment can be if converted */
2881 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2882 ins->type = STACK_OBJ;
2885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2888 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2890 /* the is_null_bb target simply copies the input register to the output */
2891 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2893 int klass_reg = alloc_preg (cfg);
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 int rank_reg = alloc_preg (cfg);
2899 int eclass_reg = alloc_preg (cfg);
2901 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2906 if (klass->cast_class == mono_defaults.object_class) {
2907 int parent_reg = alloc_preg (cfg);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2909 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2910 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2912 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2913 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2914 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2916 } else if (klass->cast_class == mono_defaults.enum_class) {
2917 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2918 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2919 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2920 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2922 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2923 /* Check that the object is a vector too */
2924 int bounds_reg = alloc_preg (cfg);
2925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2927 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2930 /* the is_null_bb target simply copies the input register to the output */
2931 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2933 } else if (mono_class_is_nullable (klass)) {
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2935 /* the is_null_bb target simply copies the input register to the output */
2936 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2938 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2939 /* the remoting code is broken, access the class for now */
2941 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2948 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2951 /* the is_null_bb target simply copies the input register to the output */
2952 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2957 MONO_START_BB (cfg, false_bb);
2959 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2962 MONO_START_BB (cfg, is_null_bb);
2964 MONO_START_BB (cfg, end_bb);
2970 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2972 /* This opcode takes as input an object reference and a class, and returns:
2973 0) if the object is an instance of the class,
2974 1) if the object is not instance of the class,
2975 2) if the object is a proxy whose type cannot be determined */
2978 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
2979 int obj_reg = src->dreg;
2980 int dreg = alloc_ireg (cfg);
2982 int klass_reg = alloc_preg (cfg);
2984 NEW_BBLOCK (cfg, true_bb);
2985 NEW_BBLOCK (cfg, false_bb);
2986 NEW_BBLOCK (cfg, false2_bb);
2987 NEW_BBLOCK (cfg, end_bb);
2988 NEW_BBLOCK (cfg, no_proxy_bb);
2990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2991 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
2993 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2994 NEW_BBLOCK (cfg, interface_fail_bb);
2996 tmp_reg = alloc_preg (cfg);
2997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2998 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
2999 MONO_START_BB (cfg, interface_fail_bb);
3000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3002 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3004 tmp_reg = alloc_preg (cfg);
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3009 tmp_reg = alloc_preg (cfg);
3010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3013 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3014 tmp_reg = alloc_preg (cfg);
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3016 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3018 tmp_reg = alloc_preg (cfg);
3019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3023 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3026 MONO_START_BB (cfg, no_proxy_bb);
3028 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3031 MONO_START_BB (cfg, false_bb);
3033 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3036 MONO_START_BB (cfg, false2_bb);
3038 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3039 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3041 MONO_START_BB (cfg, true_bb);
3043 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3045 MONO_START_BB (cfg, end_bb);
3048 MONO_INST_NEW (cfg, ins, OP_ICONST);
3050 ins->type = STACK_I4;
3056 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3058 /* This opcode takes as input an object reference and a class, and returns:
3059 0) if the object is an instance of the class,
3060 1) if the object is a proxy whose type cannot be determined
3061 an InvalidCastException exception is thrown otherwhise*/
3064 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3065 int obj_reg = src->dreg;
3066 int dreg = alloc_ireg (cfg);
3067 int tmp_reg = alloc_preg (cfg);
3068 int klass_reg = alloc_preg (cfg);
3070 NEW_BBLOCK (cfg, end_bb);
3071 NEW_BBLOCK (cfg, ok_result_bb);
3073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3076 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3077 NEW_BBLOCK (cfg, interface_fail_bb);
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3080 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3081 MONO_START_BB (cfg, interface_fail_bb);
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3086 tmp_reg = alloc_preg (cfg);
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3089 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3091 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3095 NEW_BBLOCK (cfg, no_proxy_bb);
3097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3099 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3101 tmp_reg = alloc_preg (cfg);
3102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3105 tmp_reg = alloc_preg (cfg);
3106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3108 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3110 NEW_BBLOCK (cfg, fail_1_bb);
3112 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3114 MONO_START_BB (cfg, fail_1_bb);
3116 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3119 MONO_START_BB (cfg, no_proxy_bb);
3121 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3124 MONO_START_BB (cfg, ok_result_bb);
3126 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3128 MONO_START_BB (cfg, end_bb);
3131 MONO_INST_NEW (cfg, ins, OP_ICONST);
3133 ins->type = STACK_I4;
3138 static G_GNUC_UNUSED MonoInst*
3139 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3141 gpointer *trampoline;
3142 MonoInst *obj, *method_ins, *tramp_ins;
3146 obj = handle_alloc (cfg, klass, FALSE);
3148 /* Inline the contents of mono_delegate_ctor */
3150 /* Set target field */
3151 /* Optimize away setting of NULL target */
3152 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3153 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3155 /* Set method field */
3156 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3160 * To avoid looking up the compiled code belonging to the target method
3161 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3162 * store it, and we fill it after the method has been compiled.
3164 if (!cfg->compile_aot && !method->dynamic) {
3165 MonoInst *code_slot_ins;
3167 domain = mono_domain_get ();
3168 mono_domain_lock (domain);
3169 if (!domain->method_code_hash)
3170 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3171 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3173 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3174 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3176 mono_domain_unlock (domain);
3178 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3182 /* Set invoke_impl field */
3183 trampoline = mono_create_delegate_trampoline (klass);
3184 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3187 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3193 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3195 MonoJitICallInfo *info;
3197 /* Need to register the icall so it gets an icall wrapper */
3198 info = mono_get_array_new_va_icall (rank);
3200 cfg->flags |= MONO_CFG_HAS_VARARGS;
3202 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3203 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3207 mono_emit_load_got_addr (MonoCompile *cfg)
3209 MonoInst *getaddr, *dummy_use;
3211 if (!cfg->got_var || cfg->got_var_allocated)
3214 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3215 getaddr->dreg = cfg->got_var->dreg;
3217 /* Add it to the start of the first bblock */
3218 if (cfg->bb_entry->code) {
3219 getaddr->next = cfg->bb_entry->code;
3220 cfg->bb_entry->code = getaddr;
3223 MONO_ADD_INS (cfg->bb_entry, getaddr);
3225 cfg->got_var_allocated = TRUE;
3228 * Add a dummy use to keep the got_var alive, since real uses might
3229 * only be generated by the back ends.
3230 * Add it to end_bblock, so the variable's lifetime covers the whole
3232 * It would be better to make the usage of the got var explicit in all
3233 * cases when the backend needs it (i.e. calls, throw etc.), so this
3234 * wouldn't be needed.
3236 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3237 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3241 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3243 MonoMethodHeader *header = mono_method_get_header (method);
3245 #ifdef MONO_ARCH_SOFT_FLOAT
3246 MonoMethodSignature *sig = mono_method_signature (method);
3250 if (cfg->generic_sharing_context)
3253 #ifdef MONO_ARCH_HAVE_LMF_OPS
3254 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3255 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3256 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3260 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3261 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3262 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3263 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3264 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3265 (method->klass->marshalbyref) ||
3266 !header || header->num_clauses)
3269 /* also consider num_locals? */
3270 /* Do the size check early to avoid creating vtables */
3271 if (getenv ("MONO_INLINELIMIT")) {
3272 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3275 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3279 * if we can initialize the class of the method right away, we do,
3280 * otherwise we don't allow inlining if the class needs initialization,
3281 * since it would mean inserting a call to mono_runtime_class_init()
3282 * inside the inlined code
3284 if (!(cfg->opt & MONO_OPT_SHARED)) {
3285 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3286 if (cfg->run_cctors && method->klass->has_cctor) {
3287 if (!method->klass->runtime_info)
3288 /* No vtable created yet */
3290 vtable = mono_class_vtable (cfg->domain, method->klass);
3293 /* This makes so that inline cannot trigger */
3294 /* .cctors: too many apps depend on them */
3295 /* running with a specific order... */
3296 if (! vtable->initialized)
3298 mono_runtime_class_init (vtable);
3300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3301 if (!method->klass->runtime_info)
3302 /* No vtable created yet */
3304 vtable = mono_class_vtable (cfg->domain, method->klass);
3307 if (!vtable->initialized)
3312 * If we're compiling for shared code
3313 * the cctor will need to be run at aot method load time, for example,
3314 * or at the end of the compilation of the inlining method.
3316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3321 * CAS - do not inline methods with declarative security
3322 * Note: this has to be before any possible return TRUE;
3324 if (mono_method_has_declsec (method))
3327 #ifdef MONO_ARCH_SOFT_FLOAT
3329 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3331 for (i = 0; i < sig->param_count; ++i)
3332 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3340 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3342 if (vtable->initialized && !cfg->compile_aot)
3345 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3348 if (!mono_class_needs_cctor_run (vtable->klass, method))
3351 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3352 /* The initialization is already done before the method is called */
3359 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3363 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3365 mono_class_init (klass);
3366 size = mono_class_array_element_size (klass);
3368 mult_reg = alloc_preg (cfg);
3369 array_reg = arr->dreg;
3370 index_reg = index->dreg;
3372 #if SIZEOF_VOID_P == 8
3373 /* The array reg is 64 bits but the index reg is only 32 */
3374 index2_reg = alloc_preg (cfg);
3375 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3377 index2_reg = index_reg;
3380 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3382 #if defined(__i386__) || defined(__x86_64__)
3383 if (size == 1 || size == 2 || size == 4 || size == 8) {
3384 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3386 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3387 ins->type = STACK_PTR;
3393 add_reg = alloc_preg (cfg);
3395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3396 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3397 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3398 ins->type = STACK_PTR;
3399 MONO_ADD_INS (cfg->cbb, ins);
3404 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3406 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3408 int bounds_reg = alloc_preg (cfg);
3409 int add_reg = alloc_preg (cfg);
3410 int mult_reg = alloc_preg (cfg);
3411 int mult2_reg = alloc_preg (cfg);
3412 int low1_reg = alloc_preg (cfg);
3413 int low2_reg = alloc_preg (cfg);
3414 int high1_reg = alloc_preg (cfg);
3415 int high2_reg = alloc_preg (cfg);
3416 int realidx1_reg = alloc_preg (cfg);
3417 int realidx2_reg = alloc_preg (cfg);
3418 int sum_reg = alloc_preg (cfg);
3423 mono_class_init (klass);
3424 size = mono_class_array_element_size (klass);
3426 index1 = index_ins1->dreg;
3427 index2 = index_ins2->dreg;
3429 /* range checking */
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3431 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3434 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3435 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3436 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3437 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3438 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3439 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3442 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3443 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3445 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3447 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3449 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3450 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3452 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3453 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3455 ins->type = STACK_MP;
3457 MONO_ADD_INS (cfg->cbb, ins);
3464 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3468 MonoMethod *addr_method;
3471 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3474 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3476 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3477 /* emit_ldelema_2 depends on OP_LMUL */
3478 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3479 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3483 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3484 addr_method = mono_marshal_get_array_address (rank, element_size);
3485 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3491 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3493 MonoInst *ins = NULL;
3495 static MonoClass *runtime_helpers_class = NULL;
3496 if (! runtime_helpers_class)
3497 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3498 "System.Runtime.CompilerServices", "RuntimeHelpers");
3500 if (cmethod->klass == mono_defaults.string_class) {
3501 if (strcmp (cmethod->name, "get_Chars") == 0) {
3502 int dreg = alloc_ireg (cfg);
3503 int index_reg = alloc_preg (cfg);
3504 int mult_reg = alloc_preg (cfg);
3505 int add_reg = alloc_preg (cfg);
3507 #if SIZEOF_VOID_P == 8
3508 /* The array reg is 64 bits but the index reg is only 32 */
3509 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3511 index_reg = args [1]->dreg;
3513 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3515 #if defined(__i386__) || defined(__x86_64__)
3516 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3517 add_reg = ins->dreg;
3518 /* Avoid a warning */
3520 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3524 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3525 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3526 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3528 type_from_op (ins, NULL, NULL);
3530 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3531 int dreg = alloc_ireg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3533 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3534 type_from_op (ins, NULL, NULL);
3537 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3538 int mult_reg = alloc_preg (cfg);
3539 int add_reg = alloc_preg (cfg);
3541 /* The corlib functions check for oob already. */
3542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3543 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3547 } else if (cmethod->klass == mono_defaults.object_class) {
3549 if (strcmp (cmethod->name, "GetType") == 0) {
3550 int dreg = alloc_preg (cfg);
3551 int vt_reg = alloc_preg (cfg);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3554 type_from_op (ins, NULL, NULL);
3557 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3558 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3559 int dreg = alloc_ireg (cfg);
3560 int t1 = alloc_ireg (cfg);
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3563 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3564 ins->type = STACK_I4;
3568 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3569 MONO_INST_NEW (cfg, ins, OP_NOP);
3570 MONO_ADD_INS (cfg->cbb, ins);
3574 } else if (cmethod->klass == mono_defaults.array_class) {
3575 if (cmethod->name [0] != 'g')
3578 if (strcmp (cmethod->name, "get_Rank") == 0) {
3579 int dreg = alloc_ireg (cfg);
3580 int vtable_reg = alloc_preg (cfg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3582 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3583 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3584 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3585 type_from_op (ins, NULL, NULL);
3588 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3589 int dreg = alloc_ireg (cfg);
3591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3592 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3593 type_from_op (ins, NULL, NULL);
3598 } else if (cmethod->klass == runtime_helpers_class) {
3600 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3601 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3605 } else if (cmethod->klass == mono_defaults.thread_class) {
3606 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3607 ins->dreg = alloc_preg (cfg);
3608 ins->type = STACK_OBJ;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3612 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3613 MONO_ADD_INS (cfg->cbb, ins);
3616 } else if (mini_class_is_system_array (cmethod->klass) &&
3617 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3618 MonoInst *addr, *store, *load;
3619 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3621 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3622 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3623 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3625 } else if (cmethod->klass->image == mono_defaults.corlib &&
3626 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3627 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3630 #if SIZEOF_VOID_P == 8
3631 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3632 /* 64 bit reads are already atomic */
3633 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3634 ins->dreg = mono_alloc_preg (cfg);
3635 ins->inst_basereg = args [0]->dreg;
3636 ins->inst_offset = 0;
3637 MONO_ADD_INS (cfg->cbb, ins);
3641 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3642 if (strcmp (cmethod->name, "Increment") == 0) {
3643 MonoInst *ins_iconst;
3646 if (fsig->params [0]->type == MONO_TYPE_I4)
3647 opcode = OP_ATOMIC_ADD_NEW_I4;
3648 #if SIZEOF_VOID_P == 8
3649 else if (fsig->params [0]->type == MONO_TYPE_I8)
3650 opcode = OP_ATOMIC_ADD_NEW_I8;
3653 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3654 ins_iconst->inst_c0 = 1;
3655 ins_iconst->dreg = mono_alloc_ireg (cfg);
3656 MONO_ADD_INS (cfg->cbb, ins_iconst);
3658 MONO_INST_NEW (cfg, ins, opcode);
3659 ins->dreg = mono_alloc_ireg (cfg);
3660 ins->inst_basereg = args [0]->dreg;
3661 ins->inst_offset = 0;
3662 ins->sreg2 = ins_iconst->dreg;
3663 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3664 MONO_ADD_INS (cfg->cbb, ins);
3666 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3667 MonoInst *ins_iconst;
3670 if (fsig->params [0]->type == MONO_TYPE_I4)
3671 opcode = OP_ATOMIC_ADD_NEW_I4;
3672 #if SIZEOF_VOID_P == 8
3673 else if (fsig->params [0]->type == MONO_TYPE_I8)
3674 opcode = OP_ATOMIC_ADD_NEW_I8;
3677 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3678 ins_iconst->inst_c0 = -1;
3679 ins_iconst->dreg = mono_alloc_ireg (cfg);
3680 MONO_ADD_INS (cfg->cbb, ins_iconst);
3682 MONO_INST_NEW (cfg, ins, opcode);
3683 ins->dreg = mono_alloc_ireg (cfg);
3684 ins->inst_basereg = args [0]->dreg;
3685 ins->inst_offset = 0;
3686 ins->sreg2 = ins_iconst->dreg;
3687 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3688 MONO_ADD_INS (cfg->cbb, ins);
3690 } else if (strcmp (cmethod->name, "Add") == 0) {
3693 if (fsig->params [0]->type == MONO_TYPE_I4)
3694 opcode = OP_ATOMIC_ADD_NEW_I4;
3695 #if SIZEOF_VOID_P == 8
3696 else if (fsig->params [0]->type == MONO_TYPE_I8)
3697 opcode = OP_ATOMIC_ADD_NEW_I8;
3701 MONO_INST_NEW (cfg, ins, opcode);
3702 ins->dreg = mono_alloc_ireg (cfg);
3703 ins->inst_basereg = args [0]->dreg;
3704 ins->inst_offset = 0;
3705 ins->sreg2 = args [1]->dreg;
3706 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3707 MONO_ADD_INS (cfg->cbb, ins);
3710 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3712 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3713 if (strcmp (cmethod->name, "Exchange") == 0) {
3716 if (fsig->params [0]->type == MONO_TYPE_I4)
3717 opcode = OP_ATOMIC_EXCHANGE_I4;
3718 #if SIZEOF_VOID_P == 8
3719 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3720 (fsig->params [0]->type == MONO_TYPE_I) ||
3721 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3722 opcode = OP_ATOMIC_EXCHANGE_I8;
3724 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3725 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3726 opcode = OP_ATOMIC_EXCHANGE_I4;
3731 MONO_INST_NEW (cfg, ins, opcode);
3732 ins->dreg = mono_alloc_ireg (cfg);
3733 ins->inst_basereg = args [0]->dreg;
3734 ins->inst_offset = 0;
3735 ins->sreg2 = args [1]->dreg;
3736 MONO_ADD_INS (cfg->cbb, ins);
3738 switch (fsig->params [0]->type) {
3740 ins->type = STACK_I4;
3744 ins->type = STACK_I8;
3746 case MONO_TYPE_OBJECT:
3747 ins->type = STACK_OBJ;
3750 g_assert_not_reached ();
3753 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3755 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3757 * Can't implement CompareExchange methods this way since they have
3758 * three arguments. We can implement one of the common cases, where the new
3759 * value is a constant.
3761 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3762 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3763 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3764 ins->dreg = alloc_ireg (cfg);
3765 ins->sreg1 = args [0]->dreg;
3766 ins->sreg2 = args [1]->dreg;
3767 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3768 ins->type = STACK_I4;
3769 MONO_ADD_INS (cfg->cbb, ins);
3771 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3773 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3777 } else if (cmethod->klass->image == mono_defaults.corlib) {
3778 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3779 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3780 MONO_INST_NEW (cfg, ins, OP_BREAK);
3781 MONO_ADD_INS (cfg->cbb, ins);
3784 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3785 && strcmp (cmethod->klass->name, "Environment") == 0) {
3786 #ifdef PLATFORM_WIN32
3787 EMIT_NEW_ICONST (cfg, ins, 1);
3789 EMIT_NEW_ICONST (cfg, ins, 0);
3793 } else if (cmethod->klass == mono_defaults.math_class) {
3795 * There is general branches code for Min/Max, but it does not work for
3797 * http://everything2.com/?node_id=1051618
3801 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3805 * This entry point could be used later for arbitrary method
3808 inline static MonoInst*
3809 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3810 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3812 if (method->klass == mono_defaults.string_class) {
3813 /* managed string allocation support */
3814 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3815 MonoInst *iargs [2];
3816 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3817 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3820 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3821 iargs [1] = args [0];
3822 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3829 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3831 MonoInst *store, *temp;
3834 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3835 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3838 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3839 * would be different than the MonoInst's used to represent arguments, and
3840 * the ldelema implementation can't deal with that.
3841 * Solution: When ldelema is used on an inline argument, create a var for
3842 * it, emit ldelema on that var, and emit the saving code below in
3843 * inline_method () if needed.
3845 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3846 cfg->args [i] = temp;
3847 /* This uses cfg->args [i] which is set by the preceeding line */
3848 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3849 store->cil_code = sp [0]->cil_code;
3854 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3855 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3857 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3859 check_inline_called_method_name_limit (MonoMethod *called_method)
3862 static char *limit = NULL;
3864 if (limit == NULL) {
3865 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3867 if (limit_string != NULL)
3868 limit = limit_string;
3870 limit = (char *) "";
3873 if (limit [0] != '\0') {
3874 char *called_method_name = mono_method_full_name (called_method, TRUE);
3876 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3877 g_free (called_method_name);
3879 //return (strncmp_result <= 0);
3880 return (strncmp_result == 0);
3887 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3889 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3892 static char *limit = NULL;
3894 if (limit == NULL) {
3895 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3896 if (limit_string != NULL) {
3897 limit = limit_string;
3899 limit = (char *) "";
3903 if (limit [0] != '\0') {
3904 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3906 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3907 g_free (caller_method_name);
3909 //return (strncmp_result <= 0);
3910 return (strncmp_result == 0);
3918 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3919 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3921 MonoInst *ins, *rvar = NULL;
3922 MonoMethodHeader *cheader;
3923 MonoBasicBlock *ebblock, *sbblock;
3925 MonoMethod *prev_inlined_method;
3926 MonoInst **prev_locals, **prev_args;
3927 MonoType **prev_arg_types;
3928 guint prev_real_offset;
3929 GHashTable *prev_cbb_hash;
3930 MonoBasicBlock **prev_cil_offset_to_bb;
3931 MonoBasicBlock *prev_cbb;
3932 unsigned char* prev_cil_start;
3933 guint32 prev_cil_offset_to_bb_len;
3934 MonoMethod *prev_current_method;
3935 MonoGenericContext *prev_generic_context;
3937 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3939 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3940 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3943 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3944 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3948 if (cfg->verbose_level > 2)
3949 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3951 if (!cmethod->inline_info) {
3952 mono_jit_stats.inlineable_methods++;
3953 cmethod->inline_info = 1;
3955 /* allocate space to store the return value */
3956 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3957 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3960 /* allocate local variables */
3961 cheader = mono_method_get_header (cmethod);
3962 prev_locals = cfg->locals;
3963 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3964 for (i = 0; i < cheader->num_locals; ++i)
3965 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3967 /* allocate start and end blocks */
3968 /* This is needed so if the inline is aborted, we can clean up */
3969 NEW_BBLOCK (cfg, sbblock);
3970 sbblock->real_offset = real_offset;
3972 NEW_BBLOCK (cfg, ebblock);
3973 ebblock->block_num = cfg->num_bblocks++;
3974 ebblock->real_offset = real_offset;
3976 prev_args = cfg->args;
3977 prev_arg_types = cfg->arg_types;
3978 prev_inlined_method = cfg->inlined_method;
3979 cfg->inlined_method = cmethod;
3980 cfg->ret_var_set = FALSE;
3981 prev_real_offset = cfg->real_offset;
3982 prev_cbb_hash = cfg->cbb_hash;
3983 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
3984 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
3985 prev_cil_start = cfg->cil_start;
3986 prev_cbb = cfg->cbb;
3987 prev_current_method = cfg->current_method;
3988 prev_generic_context = cfg->generic_context;
3990 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
3992 cfg->inlined_method = prev_inlined_method;
3993 cfg->real_offset = prev_real_offset;
3994 cfg->cbb_hash = prev_cbb_hash;
3995 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
3996 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
3997 cfg->cil_start = prev_cil_start;
3998 cfg->locals = prev_locals;
3999 cfg->args = prev_args;
4000 cfg->arg_types = prev_arg_types;
4001 cfg->current_method = prev_current_method;
4002 cfg->generic_context = prev_generic_context;
4004 if ((costs >= 0 && costs < 60) || inline_allways) {
4005 if (cfg->verbose_level > 2)
4006 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4008 mono_jit_stats.inlined_methods++;
4010 /* always add some code to avoid block split failures */
4011 MONO_INST_NEW (cfg, ins, OP_NOP);
4012 MONO_ADD_INS (prev_cbb, ins);
4014 prev_cbb->next_bb = sbblock;
4015 link_bblock (cfg, prev_cbb, sbblock);
4018 * Get rid of the begin and end bblocks if possible to aid local
4021 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4023 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4024 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4026 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4027 MonoBasicBlock *prev = ebblock->in_bb [0];
4028 mono_merge_basic_blocks (cfg, prev, ebblock);
4036 * If the inlined method contains only a throw, then the ret var is not
4037 * set, so set it to a dummy value.
4039 if (!cfg->ret_var_set) {
4040 static double r8_0 = 0.0;
4042 switch (rvar->type) {
4044 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4047 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4052 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4055 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4056 ins->type = STACK_R8;
4057 ins->inst_p0 = (void*)&r8_0;
4058 ins->dreg = rvar->dreg;
4059 MONO_ADD_INS (cfg->cbb, ins);
4062 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4065 g_assert_not_reached ();
4069 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4074 if (cfg->verbose_level > 2)
4075 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4076 cfg->exception_type = MONO_EXCEPTION_NONE;
4077 mono_loader_clear_error ();
4079 /* This gets rid of the newly added bblocks */
4080 cfg->cbb = prev_cbb;
4086 * Some of these comments may well be out-of-date.
4087 * Design decisions: we do a single pass over the IL code (and we do bblock
4088 * splitting/merging in the few cases when it's required: a back jump to an IL
4089 * address that was not already seen as bblock starting point).
4090 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4091 * Complex operations are decomposed in simpler ones right away. We need to let the
4092 * arch-specific code peek and poke inside this process somehow (except when the
4093 * optimizations can take advantage of the full semantic info of coarse opcodes).
4094 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4095 * MonoInst->opcode initially is the IL opcode or some simplification of that
4096 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4097 * opcode with value bigger than OP_LAST.
4098 * At this point the IR can be handed over to an interpreter, a dumb code generator
4099 * or to the optimizing code generator that will translate it to SSA form.
4101 * Profiling directed optimizations.
4102 * We may compile by default with few or no optimizations and instrument the code
4103 * or the user may indicate what methods to optimize the most either in a config file
4104 * or through repeated runs where the compiler applies offline the optimizations to
4105 * each method and then decides if it was worth it.
4108 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4109 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4110 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4111 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4112 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4113 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4114 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4115 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4117 /* offset from br.s -> br like opcodes */
4118 #define BIG_BRANCH_OFFSET 13
4121 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4123 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4125 return b == NULL || b == bb;
4129 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4131 unsigned char *ip = start;
4132 unsigned char *target;
4135 MonoBasicBlock *bblock;
4136 const MonoOpcode *opcode;
4139 cli_addr = ip - start;
4140 i = mono_opcode_value ((const guint8 **)&ip, end);
4143 opcode = &mono_opcodes [i];
4144 switch (opcode->argument) {
4145 case MonoInlineNone:
4148 case MonoInlineString:
4149 case MonoInlineType:
4150 case MonoInlineField:
4151 case MonoInlineMethod:
4154 case MonoShortInlineR:
4161 case MonoShortInlineVar:
4162 case MonoShortInlineI:
4165 case MonoShortInlineBrTarget:
4166 target = start + cli_addr + 2 + (signed char)ip [1];
4167 GET_BBLOCK (cfg, bblock, target);
4170 GET_BBLOCK (cfg, bblock, ip);
4172 case MonoInlineBrTarget:
4173 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4174 GET_BBLOCK (cfg, bblock, target);
4177 GET_BBLOCK (cfg, bblock, ip);
4179 case MonoInlineSwitch: {
4180 guint32 n = read32 (ip + 1);
4183 cli_addr += 5 + 4 * n;
4184 target = start + cli_addr;
4185 GET_BBLOCK (cfg, bblock, target);
4187 for (j = 0; j < n; ++j) {
4188 target = start + cli_addr + (gint32)read32 (ip);
4189 GET_BBLOCK (cfg, bblock, target);
4199 g_assert_not_reached ();
4202 if (i == CEE_THROW) {
4203 unsigned char *bb_start = ip - 1;
4205 /* Find the start of the bblock containing the throw */
4207 while ((bb_start >= start) && !bblock) {
4208 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4212 bblock->out_of_line = 1;
4221 static inline MonoMethod *
4222 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4226 if (m->wrapper_type != MONO_WRAPPER_NONE)
4227 return mono_method_get_wrapper_data (m, token);
4229 method = mono_get_method_full (m->klass->image, token, klass, context);
4234 static inline MonoMethod *
4235 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4237 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4239 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4245 static inline MonoClass*
4246 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4250 if (method->wrapper_type != MONO_WRAPPER_NONE)
4251 klass = mono_method_get_wrapper_data (method, token);
4253 klass = mono_class_get_full (method->klass->image, token, context);
4255 mono_class_init (klass);
4260 * Returns TRUE if the JIT should abort inlining because "callee"
4261 * is influenced by security attributes.
4264 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4268 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4272 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4273 if (result == MONO_JIT_SECURITY_OK)
4276 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4277 /* Generate code to throw a SecurityException before the actual call/link */
4278 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4281 NEW_ICONST (cfg, args [0], 4);
4282 NEW_METHODCONST (cfg, args [1], caller);
4283 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4284 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4285 /* don't hide previous results */
4286 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4287 cfg->exception_data = result;
4295 method_access_exception (void)
4297 static MonoMethod *method = NULL;
4300 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4301 method = mono_class_get_method_from_name (secman->securitymanager,
4302 "MethodAccessException", 2);
4309 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4310 MonoBasicBlock *bblock, unsigned char *ip)
4312 MonoMethod *thrower = method_access_exception ();
4315 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4316 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4317 mono_emit_method_call (cfg, thrower, args, NULL);
4321 verification_exception (void)
4323 static MonoMethod *method = NULL;
4326 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4327 method = mono_class_get_method_from_name (secman->securitymanager,
4328 "VerificationException", 0);
4335 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4337 MonoMethod *thrower = verification_exception ();
4339 mono_emit_method_call (cfg, thrower, NULL, NULL);
4343 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4344 MonoBasicBlock *bblock, unsigned char *ip)
4346 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4347 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4348 gboolean is_safe = TRUE;
4350 if (!(caller_level >= callee_level ||
4351 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4352 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4357 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4361 method_is_safe (MonoMethod *method)
4364 if (strcmp (method->name, "unsafeMethod") == 0)
4371 * Check that the IL instructions at ip are the array initialization
4372 * sequence and return the pointer to the data and the size.
4375 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4378 * newarr[System.Int32]
4380 * ldtoken field valuetype ...
4381 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4383 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4384 guint32 token = read32 (ip + 7);
4385 guint32 field_token = read32 (ip + 2);
4386 guint32 field_index = field_token & 0xffffff;
4388 const char *data_ptr;
4390 MonoMethod *cmethod;
4391 MonoClass *dummy_class;
4392 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4398 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4401 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4403 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4404 case MONO_TYPE_BOOLEAN:
4408 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4409 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4410 case MONO_TYPE_CHAR:
4420 return NULL; /* stupid ARM FP swapped format */
4430 if (size > mono_type_size (field->type, &dummy_align))
4433 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4434 if (!method->klass->image->dynamic) {
4435 field_index = read32 (ip + 2) & 0xffffff;
4436 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4437 data_ptr = mono_image_rva_map (method->klass->image, rva);
4438 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4439 /* for aot code we do the lookup on load */
4440 if (aot && data_ptr)
4441 return GUINT_TO_POINTER (rva);
4443 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4445 data_ptr = field->data;
4453 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4455 char *method_fname = mono_method_full_name (method, TRUE);
4458 if (mono_method_get_header (method)->code_size == 0)
4459 method_code = g_strdup ("method body is empty.");
4461 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4462 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4463 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4464 g_free (method_fname);
4465 g_free (method_code);
4469 set_exception_object (MonoCompile *cfg, MonoException *exception)
4471 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4472 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4473 cfg->exception_ptr = exception;
4477 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4481 if (cfg->generic_sharing_context)
4482 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4484 type = &klass->byval_arg;
4485 return MONO_TYPE_IS_REFERENCE (type);
4489 * mono_decompose_array_access_opts:
4491 * Decompose array access opcodes.
4494 mono_decompose_array_access_opts (MonoCompile *cfg)
4496 MonoBasicBlock *bb, *first_bb;
4499 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4500 * can be executed anytime. It should be run before decompose_long
4504 * Create a dummy bblock and emit code into it so we can use the normal
4505 * code generation macros.
4507 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4508 first_bb = cfg->cbb;
4510 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4512 MonoInst *prev = NULL;
4514 MonoInst *iargs [3];
4517 if (!bb->has_array_access)
4520 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4522 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4528 for (ins = bb->code; ins; ins = ins->next) {
4529 switch (ins->opcode) {
4531 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4532 G_STRUCT_OFFSET (MonoArray, max_length));
4533 MONO_ADD_INS (cfg->cbb, dest);
4535 case OP_BOUNDS_CHECK:
4536 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4539 if (cfg->opt & MONO_OPT_SHARED) {
4540 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4541 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4542 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4543 iargs [2]->dreg = ins->sreg1;
4545 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4546 dest->dreg = ins->dreg;
4548 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4551 NEW_VTABLECONST (cfg, iargs [0], vtable);
4552 MONO_ADD_INS (cfg->cbb, iargs [0]);
4553 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4554 iargs [1]->dreg = ins->sreg1;
4556 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4557 dest->dreg = ins->dreg;
4564 g_assert (cfg->cbb == first_bb);
4566 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4567 /* Replace the original instruction with the new code sequence */
4569 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4570 first_bb->code = first_bb->last_ins = NULL;
4571 first_bb->in_count = first_bb->out_count = 0;
4572 cfg->cbb = first_bb;
4579 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4589 #ifdef MONO_ARCH_SOFT_FLOAT
4592 * mono_handle_soft_float:
4594 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4595 * similar to long support on 32 bit platforms. 32 bit float values require special
4596 * handling when used as locals, arguments, and in calls.
4597 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4600 mono_handle_soft_float (MonoCompile *cfg)
4602 MonoBasicBlock *bb, *first_bb;
4605 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4609 * Create a dummy bblock and emit code into it so we can use the normal
4610 * code generation macros.
4612 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4613 first_bb = cfg->cbb;
4615 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4617 MonoInst *prev = NULL;
4620 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4622 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4628 for (ins = bb->code; ins; ins = ins->next) {
4629 const char *spec = INS_INFO (ins->opcode);
4631 /* Most fp operations are handled automatically by opcode emulation */
4633 switch (ins->opcode) {
4636 d.vald = *(double*)ins->inst_p0;
4637 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4642 /* We load the r8 value */
4643 d.vald = *(float*)ins->inst_p0;
4644 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4648 ins->opcode = OP_LMOVE;
4651 ins->opcode = OP_MOVE;
4652 ins->sreg1 = ins->sreg1 + 1;
4655 ins->opcode = OP_MOVE;
4656 ins->sreg1 = ins->sreg1 + 2;
4659 int reg = ins->sreg1;
4661 ins->opcode = OP_SETLRET;
4663 ins->sreg1 = reg + 1;
4664 ins->sreg2 = reg + 2;
4667 case OP_LOADR8_MEMBASE:
4668 ins->opcode = OP_LOADI8_MEMBASE;
4670 case OP_STORER8_MEMBASE_REG:
4671 ins->opcode = OP_STOREI8_MEMBASE_REG;
4673 case OP_STORER4_MEMBASE_REG: {
4674 MonoInst *iargs [2];
4677 /* Arg 1 is the double value */
4678 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4679 iargs [0]->dreg = ins->sreg1;
4681 /* Arg 2 is the address to store to */
4682 addr_reg = mono_alloc_preg (cfg);
4683 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4684 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4688 case OP_LOADR4_MEMBASE: {
4689 MonoInst *iargs [1];
4693 addr_reg = mono_alloc_preg (cfg);
4694 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4695 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4696 conv->dreg = ins->dreg;
4701 case OP_FCALL_MEMBASE: {
4702 MonoCallInst *call = (MonoCallInst*)ins;
4703 if (call->signature->ret->type == MONO_TYPE_R4) {
4704 MonoCallInst *call2;
4705 MonoInst *iargs [1];
4708 /* Convert the call into a call returning an int */
4709 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4710 memcpy (call2, call, sizeof (MonoCallInst));
4711 switch (ins->opcode) {
4713 call2->inst.opcode = OP_CALL;
4716 call2->inst.opcode = OP_CALL_REG;
4718 case OP_FCALL_MEMBASE:
4719 call2->inst.opcode = OP_CALL_MEMBASE;
4722 g_assert_not_reached ();
4724 call2->inst.dreg = mono_alloc_ireg (cfg);
4725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4727 /* FIXME: Optimize this */
4729 /* Emit an r4->r8 conversion */
4730 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4731 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4732 conv->dreg = ins->dreg;
4734 switch (ins->opcode) {
4736 ins->opcode = OP_LCALL;
4739 ins->opcode = OP_LCALL_REG;
4741 case OP_FCALL_MEMBASE:
4742 ins->opcode = OP_LCALL_MEMBASE;
4745 g_assert_not_reached ();
4751 MonoJitICallInfo *info;
4752 MonoInst *iargs [2];
4753 MonoInst *call, *cmp, *br;
4755 /* Convert fcompare+fbcc to icall+icompare+beq */
4757 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4760 /* Create dummy MonoInst's for the arguments */
4761 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4762 iargs [0]->dreg = ins->sreg1;
4763 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4764 iargs [1]->dreg = ins->sreg2;
4766 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4768 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4769 cmp->sreg1 = call->dreg;
4771 MONO_ADD_INS (cfg->cbb, cmp);
4773 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4774 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4775 br->inst_true_bb = ins->next->inst_true_bb;
4776 br->inst_false_bb = ins->next->inst_false_bb;
4777 MONO_ADD_INS (cfg->cbb, br);
4779 /* The call sequence might include fp ins */
4782 /* Skip fbcc or fccc */
4783 NULLIFY_INS (ins->next);
4791 MonoJitICallInfo *info;
4792 MonoInst *iargs [2];
4795 /* Convert fccc to icall+icompare+iceq */
4797 info = mono_find_jit_opcode_emulation (ins->opcode);
4800 /* Create dummy MonoInst's for the arguments */
4801 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4802 iargs [0]->dreg = ins->sreg1;
4803 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4804 iargs [1]->dreg = ins->sreg2;
4806 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4809 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4811 /* The call sequence might include fp ins */
4816 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4817 mono_print_ins (ins);
4818 g_assert_not_reached ();
4823 g_assert (cfg->cbb == first_bb);
4825 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4826 /* Replace the original instruction with the new code sequence */
4828 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4829 first_bb->code = first_bb->last_ins = NULL;
4830 first_bb->in_count = first_bb->out_count = 0;
4831 cfg->cbb = first_bb;
4838 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4841 mono_decompose_long_opts (cfg);
4847 * mono_method_to_ir: translates IL into basic blocks containing trees
4850 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4851 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4852 guint inline_offset, gboolean is_virtual_call)
4854 MonoInst *ins, **sp, **stack_start;
4855 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4856 MonoMethod *cmethod, *method_definition;
4857 MonoInst **arg_array;
4858 MonoMethodHeader *header;
4860 guint32 token, ins_flag;
4862 MonoClass *constrained_call = NULL;
4863 unsigned char *ip, *end, *target, *err_pos;
4864 static double r8_0 = 0.0;
4865 MonoMethodSignature *sig;
4866 MonoGenericContext *generic_context = NULL;
4867 MonoGenericContainer *generic_container = NULL;
4868 MonoType **param_types;
4869 int i, n, start_new_bblock, dreg;
4870 int num_calls = 0, inline_costs = 0;
4871 int breakpoint_id = 0;
4873 MonoBoolean security, pinvoke;
4874 MonoSecurityManager* secman = NULL;
4875 MonoDeclSecurityActions actions;
4876 GSList *class_inits = NULL;
4877 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4880 /* serialization and xdomain stuff may need access to private fields and methods */
4881 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4882 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4883 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4884 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4885 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4886 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4888 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4890 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4891 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4892 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4893 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4895 image = method->klass->image;
4896 header = mono_method_get_header (method);
4897 generic_container = mono_method_get_generic_container (method);
4898 sig = mono_method_signature (method);
4899 num_args = sig->hasthis + sig->param_count;
4900 ip = (unsigned char*)header->code;
4901 cfg->cil_start = ip;
4902 end = ip + header->code_size;
4903 mono_jit_stats.cil_code_size += header->code_size;
4905 method_definition = method;
4906 while (method_definition->is_inflated) {
4907 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4908 method_definition = imethod->declaring;
4911 /* SkipVerification is not allowed if core-clr is enabled */
4912 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4914 dont_verify_stloc = TRUE;
4917 if (!dont_verify && mini_method_verify (cfg, method_definition))
4918 goto exception_exit;
4920 if (sig->is_inflated)
4921 generic_context = mono_method_get_context (method);
4922 else if (generic_container)
4923 generic_context = &generic_container->context;
4924 cfg->generic_context = generic_context;
4926 if (!cfg->generic_sharing_context)
4927 g_assert (!sig->has_type_parameters);
4929 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4930 g_assert (method->is_inflated);
4931 g_assert (mono_method_get_context (method)->method_inst);
4933 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4934 g_assert (sig->generic_param_count);
4936 if (cfg->method == method) {
4937 cfg->real_offset = 0;
4939 cfg->real_offset = inline_offset;
4942 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4943 cfg->cil_offset_to_bb_len = header->code_size;
4945 cfg->current_method = method;
4947 if (cfg->verbose_level > 2)
4948 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4950 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
4952 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
4953 for (n = 0; n < sig->param_count; ++n)
4954 param_types [n + sig->hasthis] = sig->params [n];
4955 cfg->arg_types = param_types;
4957 dont_inline = g_list_prepend (dont_inline, method);
4958 if (cfg->method == method) {
4960 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4961 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4964 NEW_BBLOCK (cfg, start_bblock);
4965 cfg->bb_entry = start_bblock;
4966 start_bblock->cil_code = NULL;
4967 start_bblock->cil_length = 0;
4970 NEW_BBLOCK (cfg, end_bblock);
4971 cfg->bb_exit = end_bblock;
4972 end_bblock->cil_code = NULL;
4973 end_bblock->cil_length = 0;
4974 g_assert (cfg->num_bblocks == 2);
4976 arg_array = cfg->args;
4978 if (header->num_clauses) {
4979 cfg->spvars = g_hash_table_new (NULL, NULL);
4980 cfg->exvars = g_hash_table_new (NULL, NULL);
4982 /* handle exception clauses */
4983 for (i = 0; i < header->num_clauses; ++i) {
4984 MonoBasicBlock *try_bb;
4985 MonoExceptionClause *clause = &header->clauses [i];
4986 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4987 try_bb->real_offset = clause->try_offset;
4988 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4989 tblock->real_offset = clause->handler_offset;
4990 tblock->flags |= BB_EXCEPTION_HANDLER;
4992 link_bblock (cfg, try_bb, tblock);
4994 if (*(ip + clause->handler_offset) == CEE_POP)
4995 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
4997 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
4998 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
4999 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5000 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5001 MONO_ADD_INS (tblock, ins);
5003 /* todo: is a fault block unsafe to optimize? */
5004 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5005 tblock->flags |= BB_EXCEPTION_UNSAFE;
5009 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5011 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5013 /* catch and filter blocks get the exception object on the stack */
5014 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5015 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5016 MonoInst *dummy_use;
5018 /* mostly like handle_stack_args (), but just sets the input args */
5019 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5020 tblock->in_scount = 1;
5021 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5022 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5025 * Add a dummy use for the exvar so its liveness info will be
5029 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5031 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5032 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5033 tblock->real_offset = clause->data.filter_offset;
5034 tblock->in_scount = 1;
5035 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5036 /* The filter block shares the exvar with the handler block */
5037 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5038 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5039 MONO_ADD_INS (tblock, ins);
5043 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5044 clause->data.catch_class &&
5045 cfg->generic_sharing_context &&
5046 mono_class_check_context_used (clause->data.catch_class)) {
5047 if (mono_method_get_context (method)->method_inst)
5048 GENERIC_SHARING_FAILURE (CEE_NOP);
5051 * In shared generic code with catch
5052 * clauses containing type variables
5053 * the exception handling code has to
5054 * be able to get to the rgctx.
5055 * Therefore we have to make sure that
5056 * the vtable/mrgctx argument (for
5057 * static or generic methods) or the
5058 * "this" argument (for non-static
5059 * methods) are live.
5061 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5062 mini_method_get_context (method)->method_inst) {
5063 mono_get_vtable_var (cfg);
5065 MonoInst *dummy_use;
5067 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5072 arg_array = alloca (sizeof (MonoInst *) * num_args);
5073 cfg->cbb = start_bblock;
5074 cfg->args = arg_array;
5075 mono_save_args (cfg, sig, inline_args);
5078 /* FIRST CODE BLOCK */
5079 NEW_BBLOCK (cfg, bblock);
5080 bblock->cil_code = ip;
5084 ADD_BBLOCK (cfg, bblock);
5086 if (cfg->method == method) {
5087 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5088 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5089 MONO_INST_NEW (cfg, ins, OP_BREAK);
5090 MONO_ADD_INS (bblock, ins);
5094 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5095 secman = mono_security_manager_get_methods ();
5097 security = (secman && mono_method_has_declsec (method));
5098 /* at this point having security doesn't mean we have any code to generate */
5099 if (security && (cfg->method == method)) {
5100 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5101 * And we do not want to enter the next section (with allocation) if we
5102 * have nothing to generate */
5103 security = mono_declsec_get_demands (method, &actions);
5106 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5107 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5109 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5110 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5111 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5113 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5114 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5118 mono_custom_attrs_free (custom);
5121 custom = mono_custom_attrs_from_class (wrapped->klass);
5122 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5126 mono_custom_attrs_free (custom);
5129 /* not a P/Invoke after all */
5134 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5135 /* we use a separate basic block for the initialization code */
5136 NEW_BBLOCK (cfg, init_localsbb);
5137 cfg->bb_init = init_localsbb;
5138 init_localsbb->real_offset = cfg->real_offset;
5139 start_bblock->next_bb = init_localsbb;
5140 init_localsbb->next_bb = bblock;
5141 link_bblock (cfg, start_bblock, init_localsbb);
5142 link_bblock (cfg, init_localsbb, bblock);
5144 cfg->cbb = init_localsbb;
5146 start_bblock->next_bb = bblock;
5147 link_bblock (cfg, start_bblock, bblock);
5150 /* at this point we know, if security is TRUE, that some code needs to be generated */
5151 if (security && (cfg->method == method)) {
5154 mono_jit_stats.cas_demand_generation++;
5156 if (actions.demand.blob) {
5157 /* Add code for SecurityAction.Demand */
5158 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5159 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5160 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5161 mono_emit_method_call (cfg, secman->demand, args, NULL);
5163 if (actions.noncasdemand.blob) {
5164 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5165 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5166 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5167 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5168 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5169 mono_emit_method_call (cfg, secman->demand, args, NULL);
5171 if (actions.demandchoice.blob) {
5172 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5173 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5174 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5175 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5176 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5180 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5182 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5185 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5186 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5187 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5188 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5189 if (!(method->klass && method->klass->image &&
5190 mono_security_core_clr_is_platform_image (method->klass->image))) {
5191 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5195 if (!method_is_safe (method))
5196 emit_throw_verification_exception (cfg, bblock, ip);
5199 if (header->code_size == 0)
5202 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5207 if (cfg->method == method)
5208 mono_debug_init_method (cfg, bblock, breakpoint_id);
5210 for (n = 0; n < header->num_locals; ++n) {
5211 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5216 /* add a check for this != NULL to inlined methods */
5217 if (is_virtual_call) {
5220 NEW_ARGLOAD (cfg, arg_ins, 0);
5221 MONO_ADD_INS (cfg->cbb, arg_ins);
5222 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5223 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5224 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5227 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5228 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5231 start_new_bblock = 0;
5235 if (cfg->method == method)
5236 cfg->real_offset = ip - header->code;
5238 cfg->real_offset = inline_offset;
5243 if (start_new_bblock) {
5244 bblock->cil_length = ip - bblock->cil_code;
5245 if (start_new_bblock == 2) {
5246 g_assert (ip == tblock->cil_code);
5248 GET_BBLOCK (cfg, tblock, ip);
5250 bblock->next_bb = tblock;
5253 start_new_bblock = 0;
5254 for (i = 0; i < bblock->in_scount; ++i) {
5255 if (cfg->verbose_level > 3)
5256 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5257 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5261 g_slist_free (class_inits);
5264 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5265 link_bblock (cfg, bblock, tblock);
5266 if (sp != stack_start) {
5267 handle_stack_args (cfg, stack_start, sp - stack_start);
5269 CHECK_UNVERIFIABLE (cfg);
5271 bblock->next_bb = tblock;
5274 for (i = 0; i < bblock->in_scount; ++i) {
5275 if (cfg->verbose_level > 3)
5276 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5277 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5280 g_slist_free (class_inits);
5285 bblock->real_offset = cfg->real_offset;
5287 if ((cfg->method == method) && cfg->coverage_info) {
5288 guint32 cil_offset = ip - header->code;
5289 cfg->coverage_info->data [cil_offset].cil_code = ip;
5291 /* TODO: Use an increment here */
5292 #if defined(__i386__)
5293 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5294 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5296 MONO_ADD_INS (cfg->cbb, ins);
5298 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5299 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5303 if (cfg->verbose_level > 3)
5304 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5309 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5311 MONO_ADD_INS (bblock, ins);
5317 CHECK_STACK_OVF (1);
5318 n = (*ip)-CEE_LDARG_0;
5320 EMIT_NEW_ARGLOAD (cfg, ins, n);
5328 CHECK_STACK_OVF (1);
5329 n = (*ip)-CEE_LDLOC_0;
5331 EMIT_NEW_LOCLOAD (cfg, ins, n);
5342 n = (*ip)-CEE_STLOC_0;
5345 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5348 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5349 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5350 /* Optimize reg-reg moves away */
5352 * Can't optimize other opcodes, since sp[0] might point to
5353 * the last ins of a decomposed opcode.
5355 sp [0]->dreg = (cfg)->locals [n]->dreg;
5357 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5365 CHECK_STACK_OVF (1);
5368 EMIT_NEW_ARGLOAD (cfg, ins, n);
5374 CHECK_STACK_OVF (1);
5377 NEW_ARGLOADA (cfg, ins, n);
5378 MONO_ADD_INS (cfg->cbb, ins);
5388 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5390 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5395 CHECK_STACK_OVF (1);
5398 EMIT_NEW_LOCLOAD (cfg, ins, n);
5404 CHECK_STACK_OVF (1);
5405 CHECK_LOCAL (ip [1]);
5408 * ldloca inhibits many optimizations so try to get rid of it in common
5411 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5412 gboolean skip = FALSE;
5414 /* From the INITOBJ case */
5415 token = read32 (ip + 4);
5416 klass = mini_get_class (method, token, generic_context);
5417 CHECK_TYPELOAD (klass);
5418 if (generic_class_is_reference_type (cfg, klass)) {
5419 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5420 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5421 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5422 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5423 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5435 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5443 CHECK_LOCAL (ip [1]);
5444 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5446 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5451 CHECK_STACK_OVF (1);
5452 EMIT_NEW_PCONST (cfg, ins, NULL);
5453 ins->type = STACK_OBJ;
5458 CHECK_STACK_OVF (1);
5459 EMIT_NEW_ICONST (cfg, ins, -1);
5472 CHECK_STACK_OVF (1);
5473 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5479 CHECK_STACK_OVF (1);
5481 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5487 CHECK_STACK_OVF (1);
5488 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5494 CHECK_STACK_OVF (1);
5495 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5496 ins->type = STACK_I8;
5497 ins->dreg = alloc_dreg (cfg, STACK_I8);
5499 ins->inst_l = (gint64)read64 (ip);
5500 MONO_ADD_INS (bblock, ins);
5506 /* FIXME: we should really allocate this only late in the compilation process */
5507 mono_domain_lock (cfg->domain);
5508 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5509 mono_domain_unlock (cfg->domain);
5511 CHECK_STACK_OVF (1);
5512 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5513 ins->type = STACK_R8;
5514 ins->dreg = alloc_dreg (cfg, STACK_R8);
5518 MONO_ADD_INS (bblock, ins);
5526 /* FIXME: we should really allocate this only late in the compilation process */
5527 mono_domain_lock (cfg->domain);
5528 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5529 mono_domain_unlock (cfg->domain);
5531 CHECK_STACK_OVF (1);
5532 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5533 ins->type = STACK_R8;
5534 ins->dreg = alloc_dreg (cfg, STACK_R8);
5538 MONO_ADD_INS (bblock, ins);
5545 MonoInst *temp, *store;
5547 CHECK_STACK_OVF (1);
5551 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5552 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5554 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5557 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5570 if (sp [0]->type == STACK_R8)
5571 /* we need to pop the value from the x86 FP stack */
5572 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5579 if (stack_start != sp)
5581 token = read32 (ip + 1);
5582 /* FIXME: check the signature matches */
5583 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5588 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5589 GENERIC_SHARING_FAILURE (CEE_JMP);
5591 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5592 if (check_linkdemand (cfg, method, cmethod))
5594 CHECK_CFG_EXCEPTION;
5599 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5602 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5604 /* Handle tail calls similarly to calls */
5605 n = fsig->param_count + fsig->hasthis;
5607 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5608 call->method = cmethod;
5609 call->tail_call = TRUE;
5610 call->signature = mono_method_signature (cmethod);
5611 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5612 call->inst.inst_p0 = cmethod;
5613 for (i = 0; i < n; ++i)
5614 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5616 mono_arch_emit_call (cfg, call);
5617 MONO_ADD_INS (bblock, (MonoInst*)call);
5620 for (i = 0; i < num_args; ++i)
5621 /* Prevent arguments from being optimized away */
5622 arg_array [i]->flags |= MONO_INST_VOLATILE;
5624 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5625 ins = (MonoInst*)call;
5626 ins->inst_p0 = cmethod;
5627 MONO_ADD_INS (bblock, ins);
5631 start_new_bblock = 1;
5636 case CEE_CALLVIRT: {
5637 MonoInst *addr = NULL;
5638 MonoMethodSignature *fsig = NULL;
5640 int virtual = *ip == CEE_CALLVIRT;
5641 int calli = *ip == CEE_CALLI;
5642 gboolean pass_imt_from_rgctx = FALSE;
5643 MonoInst *imt_arg = NULL;
5644 gboolean pass_vtable = FALSE;
5645 gboolean pass_mrgctx = FALSE;
5646 MonoInst *vtable_arg = NULL;
5647 gboolean check_this = FALSE;
5650 token = read32 (ip + 1);
5657 if (method->wrapper_type != MONO_WRAPPER_NONE)
5658 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5660 fsig = mono_metadata_parse_signature (image, token);
5662 n = fsig->param_count + fsig->hasthis;
5664 MonoMethod *cil_method;
5666 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5667 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5668 cil_method = cmethod;
5669 } else if (constrained_call) {
5670 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5672 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5673 cil_method = cmethod;
5678 if (!dont_verify && !cfg->skip_visibility) {
5679 MonoMethod *target_method = cil_method;
5680 if (method->is_inflated) {
5681 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5683 if (!mono_method_can_access_method (method_definition, target_method) &&
5684 !mono_method_can_access_method (method, cil_method))
5685 METHOD_ACCESS_FAILURE;
5688 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5689 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5691 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5692 /* MS.NET seems to silently convert this to a callvirt */
5695 if (!cmethod->klass->inited)
5696 if (!mono_class_init (cmethod->klass))
5699 if (mono_method_signature (cmethod)->pinvoke) {
5700 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5701 fsig = mono_method_signature (wrapper);
5702 } else if (constrained_call) {
5703 fsig = mono_method_signature (cmethod);
5705 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5708 mono_save_token_info (cfg, image, token, cmethod);
5710 n = fsig->param_count + fsig->hasthis;
5712 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5713 if (check_linkdemand (cfg, method, cmethod))
5715 CHECK_CFG_EXCEPTION;
5718 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5719 mini_class_is_system_array (cmethod->klass)) {
5720 array_rank = cmethod->klass->rank;
5723 if (cmethod->string_ctor)
5724 g_assert_not_reached ();
5727 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5730 if (!cfg->generic_sharing_context && cmethod)
5731 g_assert (!mono_method_check_context_used (cmethod));
5735 //g_assert (!virtual || fsig->hasthis);
5739 if (constrained_call) {
5741 * We have the `constrained.' prefix opcode.
5743 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5747 * The type parameter is instantiated as a valuetype,
5748 * but that type doesn't override the method we're
5749 * calling, so we need to box `this'.
5751 dreg = alloc_dreg (cfg, STACK_VTYPE);
5752 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5753 ins->klass = constrained_call;
5754 sp [0] = handle_box (cfg, ins, constrained_call);
5755 } else if (!constrained_call->valuetype) {
5756 int dreg = alloc_preg (cfg);
5759 * The type parameter is instantiated as a reference
5760 * type. We have a managed pointer on the stack, so
5761 * we need to dereference it here.
5763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5764 ins->type = STACK_OBJ;
5766 } else if (cmethod->klass->valuetype)
5768 constrained_call = NULL;
5771 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5775 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5776 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5777 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5778 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5779 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5782 * Pass vtable iff target method might
5783 * be shared, which means that sharing
5784 * is enabled for its class and its
5785 * context is sharable (and it's not a
5788 if (sharing_enabled && context_sharable &&
5789 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5793 if (cmethod && mini_method_get_context (cmethod) &&
5794 mini_method_get_context (cmethod)->method_inst) {
5795 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5796 MonoGenericContext *context = mini_method_get_context (cmethod);
5797 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5799 g_assert (!pass_vtable);
5801 if (sharing_enabled && context_sharable)
5805 if (cfg->generic_sharing_context && cmethod) {
5806 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5808 context_used = mono_method_check_context_used (cmethod);
5810 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5811 /* Generic method interface
5812 calls are resolved via a
5813 helper function and don't
5815 if (!cmethod_context || !cmethod_context->method_inst)
5816 pass_imt_from_rgctx = TRUE;
5820 * If a shared method calls another
5821 * shared method then the caller must
5822 * have a generic sharing context
5823 * because the magic trampoline
5824 * requires it. FIXME: We shouldn't
5825 * have to force the vtable/mrgctx
5826 * variable here. Instead there
5827 * should be a flag in the cfg to
5828 * request a generic sharing context.
5830 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5831 mono_get_vtable_var (cfg);
5838 EMIT_GET_RGCTX (rgctx, context_used);
5839 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5841 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5843 CHECK_TYPELOAD (cmethod->klass);
5844 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5849 g_assert (!vtable_arg);
5854 EMIT_GET_RGCTX (rgctx, context_used);
5855 vtable_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5857 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5860 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5861 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5868 if (pass_imt_from_rgctx) {
5871 g_assert (!pass_vtable);
5874 EMIT_GET_RGCTX (rgctx, context_used);
5875 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5876 MONO_RGCTX_INFO_METHOD);
5882 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5883 check->sreg1 = sp [0]->dreg;
5884 MONO_ADD_INS (cfg->cbb, check);
5887 /* Calling virtual generic methods */
5888 if (cmethod && virtual &&
5889 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5890 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5891 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5892 mono_method_signature (cmethod)->generic_param_count) {
5893 MonoInst *this_temp, *this_arg_temp, *store;
5894 MonoInst *iargs [4];
5896 g_assert (mono_method_signature (cmethod)->is_inflated);
5898 /* Prevent inlining of methods that contain indirect calls */
5901 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5902 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5903 MONO_ADD_INS (bblock, store);
5905 /* FIXME: This should be a managed pointer */
5906 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5908 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5912 EMIT_GET_RGCTX (rgctx, context_used);
5913 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5914 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5915 addr = mono_emit_jit_icall (cfg,
5916 mono_helper_compile_generic_method, iargs);
5918 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5919 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5920 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5923 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5925 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5926 if (!MONO_TYPE_IS_VOID (fsig->ret))
5935 /* FIXME: runtime generic context pointer for jumps? */
5936 /* FIXME: handle this for generic sharing eventually */
5937 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5938 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5941 /* FIXME: runtime generic context pointer for jumps? */
5942 GENERIC_SHARING_FAILURE (*ip);
5944 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5947 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5948 call->tail_call = TRUE;
5949 call->method = cmethod;
5950 call->signature = mono_method_signature (cmethod);
5953 /* Handle tail calls similarly to calls */
5954 call->inst.opcode = OP_TAILCALL;
5956 mono_arch_emit_call (cfg, call);
5959 * We implement tail calls by storing the actual arguments into the
5960 * argument variables, then emitting a CEE_JMP.
5962 for (i = 0; i < n; ++i) {
5963 /* Prevent argument from being register allocated */
5964 arg_array [i]->flags |= MONO_INST_VOLATILE;
5965 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5969 ins = (MonoInst*)call;
5970 ins->inst_p0 = cmethod;
5971 ins->inst_p1 = arg_array [0];
5972 MONO_ADD_INS (bblock, ins);
5973 link_bblock (cfg, bblock, end_bblock);
5974 start_new_bblock = 1;
5975 /* skip CEE_RET as well */
5981 /* Conversion to a JIT intrinsic */
5982 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5983 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5984 type_to_eval_stack_type ((cfg), fsig->ret, ins);
5995 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
5996 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
5997 mono_method_check_inlining (cfg, cmethod) &&
5998 !g_list_find (dont_inline, cmethod)) {
6000 gboolean allways = FALSE;
6002 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6003 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6004 /* Prevent inlining of methods that call wrappers */
6006 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6010 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6012 cfg->real_offset += 5;
6015 if (!MONO_TYPE_IS_VOID (fsig->ret))
6016 /* *sp is already set by inline_method */
6019 inline_costs += costs;
6025 inline_costs += 10 * num_calls++;
6027 /* Tail recursion elimination */
6028 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6029 gboolean has_vtargs = FALSE;
6032 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6035 /* keep it simple */
6036 for (i = fsig->param_count - 1; i >= 0; i--) {
6037 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6042 for (i = 0; i < n; ++i)
6043 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6044 MONO_INST_NEW (cfg, ins, OP_BR);
6045 MONO_ADD_INS (bblock, ins);
6046 tblock = start_bblock->out_bb [0];
6047 link_bblock (cfg, bblock, tblock);
6048 ins->inst_target_bb = tblock;
6049 start_new_bblock = 1;
6051 /* skip the CEE_RET, too */
6052 if (ip_in_bb (cfg, bblock, ip + 5))
6062 /* Generic sharing */
6063 /* FIXME: only do this for generic methods if
6064 they are not shared! */
6066 (cmethod->klass->valuetype ||
6067 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6068 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6069 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6070 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6071 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6072 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6077 g_assert (cfg->generic_sharing_context && cmethod);
6081 * We are compiling a call to a
6082 * generic method from shared code,
6083 * which means that we have to look up
6084 * the method in the rgctx and do an
6088 EMIT_GET_RGCTX (rgctx, context_used);
6089 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6092 /* Indirect calls */
6094 g_assert (!imt_arg);
6096 if (*ip == CEE_CALL)
6097 g_assert (context_used);
6098 else if (*ip == CEE_CALLI)
6099 g_assert (!vtable_arg);
6101 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6102 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6104 /* Prevent inlining of methods with indirect calls */
6108 #ifdef MONO_ARCH_RGCTX_REG
6110 int rgctx_reg = mono_alloc_preg (cfg);
6112 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6113 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6114 call = (MonoCallInst*)ins;
6115 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6116 cfg->uses_rgctx_reg = TRUE;
6121 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6123 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6124 if (fsig->pinvoke && !fsig->ret->byref) {
6128 * Native code might return non register sized integers
6129 * without initializing the upper bits.
6131 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6132 case OP_LOADI1_MEMBASE:
6133 widen_op = OP_ICONV_TO_I1;
6135 case OP_LOADU1_MEMBASE:
6136 widen_op = OP_ICONV_TO_U1;
6138 case OP_LOADI2_MEMBASE:
6139 widen_op = OP_ICONV_TO_I2;
6141 case OP_LOADU2_MEMBASE:
6142 widen_op = OP_ICONV_TO_U2;
6148 if (widen_op != -1) {
6149 int dreg = alloc_preg (cfg);
6152 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6153 widen->type = ins->type;
6170 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6171 if (sp [fsig->param_count]->type == STACK_OBJ) {
6172 MonoInst *iargs [2];
6175 iargs [1] = sp [fsig->param_count];
6177 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6180 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6181 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6182 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6183 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6185 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6188 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6189 if (!cmethod->klass->element_class->valuetype && !readonly)
6190 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6193 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6196 g_assert_not_reached ();
6204 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6206 if (!MONO_TYPE_IS_VOID (fsig->ret))
6217 #ifdef MONO_ARCH_RGCTX_REG
6219 int rgctx_reg = mono_alloc_preg (cfg);
6221 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6222 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6223 call = (MonoCallInst*)ins;
6224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6225 cfg->uses_rgctx_reg = TRUE;
6229 } else if (imt_arg) {
6230 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6232 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6235 if (!MONO_TYPE_IS_VOID (fsig->ret))
6243 if (cfg->method != method) {
6244 /* return from inlined method */
6246 * If in_count == 0, that means the ret is unreachable due to
6247 * being preceeded by a throw. In that case, inline_method () will
6248 * handle setting the return value
6249 * (test case: test_0_inline_throw ()).
6251 if (return_var && cfg->cbb->in_count) {
6255 //g_assert (returnvar != -1);
6256 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6257 cfg->ret_var_set = TRUE;
6261 MonoType *ret_type = mono_method_signature (method)->ret;
6263 g_assert (!return_var);
6266 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6269 if (!cfg->vret_addr) {
6272 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6274 EMIT_NEW_RETLOADA (cfg, ret_addr);
6276 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6277 ins->klass = mono_class_from_mono_type (ret_type);
6280 #ifdef MONO_ARCH_SOFT_FLOAT
6281 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6282 MonoInst *iargs [1];
6286 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6287 mono_arch_emit_setret (cfg, method, conv);
6289 mono_arch_emit_setret (cfg, method, *sp);
6292 mono_arch_emit_setret (cfg, method, *sp);
6297 if (sp != stack_start)
6299 MONO_INST_NEW (cfg, ins, OP_BR);
6301 ins->inst_target_bb = end_bblock;
6302 MONO_ADD_INS (bblock, ins);
6303 link_bblock (cfg, bblock, end_bblock);
6304 start_new_bblock = 1;
6308 MONO_INST_NEW (cfg, ins, OP_BR);
6310 target = ip + 1 + (signed char)(*ip);
6312 GET_BBLOCK (cfg, tblock, target);
6313 link_bblock (cfg, bblock, tblock);
6314 ins->inst_target_bb = tblock;
6315 if (sp != stack_start) {
6316 handle_stack_args (cfg, stack_start, sp - stack_start);
6318 CHECK_UNVERIFIABLE (cfg);
6320 MONO_ADD_INS (bblock, ins);
6321 start_new_bblock = 1;
6322 inline_costs += BRANCH_COST;
6336 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6338 target = ip + 1 + *(signed char*)ip;
6344 inline_costs += BRANCH_COST;
6348 MONO_INST_NEW (cfg, ins, OP_BR);
6351 target = ip + 4 + (gint32)read32(ip);
6353 GET_BBLOCK (cfg, tblock, target);
6354 link_bblock (cfg, bblock, tblock);
6355 ins->inst_target_bb = tblock;
6356 if (sp != stack_start) {
6357 handle_stack_args (cfg, stack_start, sp - stack_start);
6359 CHECK_UNVERIFIABLE (cfg);
6362 MONO_ADD_INS (bblock, ins);
6364 start_new_bblock = 1;
6365 inline_costs += BRANCH_COST;
6372 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6373 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6374 guint32 opsize = is_short ? 1 : 4;
6376 CHECK_OPSIZE (opsize);
6378 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6381 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6386 GET_BBLOCK (cfg, tblock, target);
6387 link_bblock (cfg, bblock, tblock);
6388 GET_BBLOCK (cfg, tblock, ip);
6389 link_bblock (cfg, bblock, tblock);
6391 if (sp != stack_start) {
6392 handle_stack_args (cfg, stack_start, sp - stack_start);
6393 CHECK_UNVERIFIABLE (cfg);
6396 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6397 cmp->sreg1 = sp [0]->dreg;
6398 type_from_op (cmp, sp [0], NULL);
6401 #if SIZEOF_VOID_P == 4
6402 if (cmp->opcode == OP_LCOMPARE_IMM) {
6403 /* Convert it to OP_LCOMPARE */
6404 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6405 ins->type = STACK_I8;
6406 ins->dreg = alloc_dreg (cfg, STACK_I8);
6408 MONO_ADD_INS (bblock, ins);
6409 cmp->opcode = OP_LCOMPARE;
6410 cmp->sreg2 = ins->dreg;
6413 MONO_ADD_INS (bblock, cmp);
6415 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6416 type_from_op (ins, sp [0], NULL);
6417 MONO_ADD_INS (bblock, ins);
6418 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6419 GET_BBLOCK (cfg, tblock, target);
6420 ins->inst_true_bb = tblock;
6421 GET_BBLOCK (cfg, tblock, ip);
6422 ins->inst_false_bb = tblock;
6423 start_new_bblock = 2;
6426 inline_costs += BRANCH_COST;
6441 MONO_INST_NEW (cfg, ins, *ip);
6443 target = ip + 4 + (gint32)read32(ip);
6449 inline_costs += BRANCH_COST;
6453 MonoBasicBlock **targets;
6454 MonoBasicBlock *default_bblock;
6455 MonoJumpInfoBBTable *table;
6457 int offset_reg = alloc_preg (cfg);
6458 int target_reg = alloc_preg (cfg);
6459 int table_reg = alloc_preg (cfg);
6460 int sum_reg = alloc_preg (cfg);
6465 n = read32 (ip + 1);
6468 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6472 CHECK_OPSIZE (n * sizeof (guint32));
6473 target = ip + n * sizeof (guint32);
6475 GET_BBLOCK (cfg, default_bblock, target);
6477 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6478 for (i = 0; i < n; ++i) {
6479 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6480 targets [i] = tblock;
6484 if (sp != stack_start) {
6486 * Link the current bb with the targets as well, so handle_stack_args
6487 * will set their in_stack correctly.
6489 link_bblock (cfg, bblock, default_bblock);
6490 for (i = 0; i < n; ++i)
6491 link_bblock (cfg, bblock, targets [i]);
6493 handle_stack_args (cfg, stack_start, sp - stack_start);
6495 CHECK_UNVERIFIABLE (cfg);
6498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6499 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6502 for (i = 0; i < n; ++i)
6503 link_bblock (cfg, bblock, targets [i]);
6505 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6506 table->table = targets;
6507 table->table_size = n;
6510 /* ARM implements SWITCH statements differently */
6511 /* FIXME: Make it use the generic implementation */
6512 /* the backend code will deal with aot vs normal case */
6513 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6514 ins->sreg1 = src1->dreg;
6515 ins->inst_p0 = table;
6516 ins->inst_many_bb = targets;
6517 ins->klass = GUINT_TO_POINTER (n);
6518 MONO_ADD_INS (cfg->cbb, ins);
6520 if (sizeof (gpointer) == 8)
6521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6525 #if SIZEOF_VOID_P == 8
6526 /* The upper word might not be zero, and we add it to a 64 bit address later */
6527 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6530 if (cfg->compile_aot) {
6531 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6533 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6534 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6535 ins->inst_p0 = table;
6536 ins->dreg = table_reg;
6537 MONO_ADD_INS (cfg->cbb, ins);
6540 /* FIXME: Use load_memindex */
6541 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6543 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6545 start_new_bblock = 1;
6546 inline_costs += (BRANCH_COST * 2);
6566 dreg = alloc_freg (cfg);
6569 dreg = alloc_lreg (cfg);
6572 dreg = alloc_preg (cfg);
6575 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6576 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6577 ins->flags |= ins_flag;
6579 MONO_ADD_INS (bblock, ins);
6594 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6595 ins->flags |= ins_flag;
6597 MONO_ADD_INS (bblock, ins);
6605 MONO_INST_NEW (cfg, ins, (*ip));
6607 ins->sreg1 = sp [0]->dreg;
6608 ins->sreg2 = sp [1]->dreg;
6609 type_from_op (ins, sp [0], sp [1]);
6611 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6613 /* Use the immediate opcodes if possible */
6614 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6615 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6616 if (imm_opcode != -1) {
6617 ins->opcode = imm_opcode;
6618 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6621 sp [1]->opcode = OP_NOP;
6625 MONO_ADD_INS ((cfg)->cbb, (ins));
6628 mono_decompose_opcode (cfg, ins);
6645 MONO_INST_NEW (cfg, ins, (*ip));
6647 ins->sreg1 = sp [0]->dreg;
6648 ins->sreg2 = sp [1]->dreg;
6649 type_from_op (ins, sp [0], sp [1]);
6651 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6652 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6654 /* FIXME: Pass opcode to is_inst_imm */
6656 /* Use the immediate opcodes if possible */
6657 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6660 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6661 if (imm_opcode != -1) {
6662 ins->opcode = imm_opcode;
6663 if (sp [1]->opcode == OP_I8CONST) {
6664 #if SIZEOF_VOID_P == 8
6665 ins->inst_imm = sp [1]->inst_l;
6667 ins->inst_ls_word = sp [1]->inst_ls_word;
6668 ins->inst_ms_word = sp [1]->inst_ms_word;
6672 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6675 sp [1]->opcode = OP_NOP;
6678 MONO_ADD_INS ((cfg)->cbb, (ins));
6681 mono_decompose_opcode (cfg, ins);
6694 case CEE_CONV_OVF_I8:
6695 case CEE_CONV_OVF_U8:
6699 /* Special case this earlier so we have long constants in the IR */
6700 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6701 int data = sp [-1]->inst_c0;
6702 sp [-1]->opcode = OP_I8CONST;
6703 sp [-1]->type = STACK_I8;
6704 #if SIZEOF_VOID_P == 8
6705 if ((*ip) == CEE_CONV_U8)
6706 sp [-1]->inst_c0 = (guint32)data;
6708 sp [-1]->inst_c0 = data;
6710 sp [-1]->inst_ls_word = data;
6711 if ((*ip) == CEE_CONV_U8)
6712 sp [-1]->inst_ms_word = 0;
6714 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6716 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6723 case CEE_CONV_OVF_I4:
6724 case CEE_CONV_OVF_I1:
6725 case CEE_CONV_OVF_I2:
6726 case CEE_CONV_OVF_I:
6727 case CEE_CONV_OVF_U:
6730 if (sp [-1]->type == STACK_R8) {
6731 ADD_UNOP (CEE_CONV_OVF_I8);
6738 case CEE_CONV_OVF_U1:
6739 case CEE_CONV_OVF_U2:
6740 case CEE_CONV_OVF_U4:
6743 if (sp [-1]->type == STACK_R8) {
6744 ADD_UNOP (CEE_CONV_OVF_U8);
6751 case CEE_CONV_OVF_I1_UN:
6752 case CEE_CONV_OVF_I2_UN:
6753 case CEE_CONV_OVF_I4_UN:
6754 case CEE_CONV_OVF_I8_UN:
6755 case CEE_CONV_OVF_U1_UN:
6756 case CEE_CONV_OVF_U2_UN:
6757 case CEE_CONV_OVF_U4_UN:
6758 case CEE_CONV_OVF_U8_UN:
6759 case CEE_CONV_OVF_I_UN:
6760 case CEE_CONV_OVF_U_UN:
6770 case CEE_ADD_OVF_UN:
6772 case CEE_MUL_OVF_UN:
6774 case CEE_SUB_OVF_UN:
6782 token = read32 (ip + 1);
6783 klass = mini_get_class (method, token, generic_context);
6784 CHECK_TYPELOAD (klass);
6786 if (generic_class_is_reference_type (cfg, klass)) {
6787 MonoInst *store, *load;
6788 int dreg = alloc_preg (cfg);
6790 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6791 load->flags |= ins_flag;
6792 MONO_ADD_INS (cfg->cbb, load);
6794 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6795 store->flags |= ins_flag;
6796 MONO_ADD_INS (cfg->cbb, store);
6798 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6810 token = read32 (ip + 1);
6811 klass = mini_get_class (method, token, generic_context);
6812 CHECK_TYPELOAD (klass);
6814 /* Optimize the common ldobj+stloc combination */
6824 loc_index = ip [5] - CEE_STLOC_0;
6831 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6832 CHECK_LOCAL (loc_index);
6834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6835 ins->dreg = cfg->locals [loc_index]->dreg;
6841 /* Optimize the ldobj+stobj combination */
6842 /* The reference case ends up being a load+store anyway */
6843 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6848 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6855 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6864 CHECK_STACK_OVF (1);
6866 n = read32 (ip + 1);
6868 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6869 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6870 ins->type = STACK_OBJ;
6873 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6874 MonoInst *iargs [1];
6876 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6877 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6879 if (cfg->opt & MONO_OPT_SHARED) {
6880 MonoInst *iargs [3];
6882 if (cfg->compile_aot) {
6883 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6885 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6886 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6887 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6888 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6889 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6891 if (bblock->out_of_line) {
6892 MonoInst *iargs [2];
6894 if (cfg->method->klass->image == mono_defaults.corlib) {
6896 * Avoid relocations in AOT and save some space by using a
6897 * version of helper_ldstr specialized to mscorlib.
6899 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6900 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6902 /* Avoid creating the string object */
6903 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6904 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6905 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6909 if (cfg->compile_aot) {
6910 NEW_LDSTRCONST (cfg, ins, image, n);
6912 MONO_ADD_INS (bblock, ins);
6915 NEW_PCONST (cfg, ins, NULL);
6916 ins->type = STACK_OBJ;
6917 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6919 MONO_ADD_INS (bblock, ins);
6928 MonoInst *iargs [2];
6929 MonoMethodSignature *fsig;
6934 token = read32 (ip + 1);
6935 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6938 fsig = mono_method_get_signature (cmethod, image, token);
6940 mono_save_token_info (cfg, image, token, cmethod);
6942 if (!mono_class_init (cmethod->klass))
6945 if (cfg->generic_sharing_context)
6946 context_used = mono_method_check_context_used (cmethod);
6948 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6949 if (check_linkdemand (cfg, method, cmethod))
6951 CHECK_CFG_EXCEPTION;
6952 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6953 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6956 n = fsig->param_count;
6960 * Generate smaller code for the common newobj <exception> instruction in
6961 * argument checking code.
6963 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6964 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6965 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6966 MonoInst *iargs [3];
6970 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6973 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6977 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6982 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6985 g_assert_not_reached ();
6993 /* move the args to allow room for 'this' in the first position */
6999 /* check_call_signature () requires sp[0] to be set */
7000 this_ins.type = STACK_OBJ;
7002 if (check_call_signature (cfg, fsig, sp))
7007 if (mini_class_is_system_array (cmethod->klass)) {
7008 g_assert (!context_used);
7009 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7011 /* Avoid varargs in the common case */
7012 if (fsig->param_count == 1)
7013 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7014 else if (fsig->param_count == 2)
7015 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7017 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7018 } else if (cmethod->string_ctor) {
7019 g_assert (!context_used);
7020 /* we simply pass a null pointer */
7021 EMIT_NEW_PCONST (cfg, *sp, NULL);
7022 /* now call the string ctor */
7023 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7025 MonoInst* callvirt_this_arg = NULL;
7027 if (cmethod->klass->valuetype) {
7028 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7029 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7030 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7035 * The code generated by mini_emit_virtual_call () expects
7036 * iargs [0] to be a boxed instance, but luckily the vcall
7037 * will be transformed into a normal call there.
7039 } else if (context_used) {
7040 MonoInst *rgctx, *data;
7043 EMIT_GET_RGCTX (rgctx, context_used);
7044 if (cfg->opt & MONO_OPT_SHARED)
7045 rgctx_info = MONO_RGCTX_INFO_KLASS;
7047 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7048 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7050 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7053 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7055 CHECK_TYPELOAD (cmethod->klass);
7058 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7059 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7060 * As a workaround, we call class cctors before allocating objects.
7062 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7063 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7064 if (cfg->verbose_level > 2)
7065 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7066 class_inits = g_slist_prepend (class_inits, vtable);
7069 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7074 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7076 /* Now call the actual ctor */
7077 /* Avoid virtual calls to ctors if possible */
7078 if (cmethod->klass->marshalbyref)
7079 callvirt_this_arg = sp [0];
7081 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7082 mono_method_check_inlining (cfg, cmethod) &&
7083 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7084 !g_list_find (dont_inline, cmethod)) {
7087 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7088 cfg->real_offset += 5;
7091 inline_costs += costs - 5;
7094 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7096 } else if (context_used &&
7097 (cmethod->klass->valuetype ||
7098 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7099 MonoInst *rgctx, *cmethod_addr;
7101 g_assert (!callvirt_this_arg);
7103 EMIT_GET_RGCTX (rgctx, context_used);
7104 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7105 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7107 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7110 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7114 if (alloc == NULL) {
7116 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7117 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7131 token = read32 (ip + 1);
7132 klass = mini_get_class (method, token, generic_context);
7133 CHECK_TYPELOAD (klass);
7134 if (sp [0]->type != STACK_OBJ)
7137 if (cfg->generic_sharing_context)
7138 context_used = mono_class_check_context_used (klass);
7141 MonoInst *rgctx, *args [2];
7143 g_assert (!method->klass->valuetype);
7149 EMIT_GET_RGCTX (rgctx, context_used);
7150 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7151 MONO_RGCTX_INFO_KLASS);
7153 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7157 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7158 MonoMethod *mono_castclass;
7159 MonoInst *iargs [1];
7162 mono_castclass = mono_marshal_get_castclass (klass);
7165 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7166 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7167 g_assert (costs > 0);
7170 cfg->real_offset += 5;
7175 inline_costs += costs;
7178 ins = handle_castclass (cfg, klass, *sp);
7188 token = read32 (ip + 1);
7189 klass = mini_get_class (method, token, generic_context);
7190 CHECK_TYPELOAD (klass);
7191 if (sp [0]->type != STACK_OBJ)
7194 if (cfg->generic_sharing_context)
7195 context_used = mono_class_check_context_used (klass);
7198 MonoInst *rgctx, *args [2];
7204 EMIT_GET_RGCTX (rgctx, context_used);
7205 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7207 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7211 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7212 MonoMethod *mono_isinst;
7213 MonoInst *iargs [1];
7216 mono_isinst = mono_marshal_get_isinst (klass);
7219 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7220 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7221 g_assert (costs > 0);
7224 cfg->real_offset += 5;
7229 inline_costs += costs;
7232 ins = handle_isinst (cfg, klass, *sp);
7239 case CEE_UNBOX_ANY: {
7240 MonoInst *rgctx = NULL;
7245 token = read32 (ip + 1);
7246 klass = mini_get_class (method, token, generic_context);
7247 CHECK_TYPELOAD (klass);
7249 mono_save_token_info (cfg, image, token, klass);
7251 if (cfg->generic_sharing_context)
7252 context_used = mono_class_check_context_used (klass);
7254 if (generic_class_is_reference_type (cfg, klass)) {
7257 MonoInst *iargs [2];
7259 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
7264 EMIT_GET_RGCTX (rgctx, context_used);
7265 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7266 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7270 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7271 MonoMethod *mono_castclass;
7272 MonoInst *iargs [1];
7275 mono_castclass = mono_marshal_get_castclass (klass);
7278 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7279 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7281 g_assert (costs > 0);
7284 cfg->real_offset += 5;
7288 inline_costs += costs;
7290 ins = handle_castclass (cfg, klass, *sp);
7299 EMIT_GET_RGCTX (rgctx, context_used);
7301 if (mono_class_is_nullable (klass)) {
7302 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7309 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7315 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7328 token = read32 (ip + 1);
7329 klass = mini_get_class (method, token, generic_context);
7330 CHECK_TYPELOAD (klass);
7332 mono_save_token_info (cfg, image, token, klass);
7334 if (cfg->generic_sharing_context)
7335 context_used = mono_class_check_context_used (klass);
7337 if (generic_class_is_reference_type (cfg, klass)) {
7343 if (klass == mono_defaults.void_class)
7345 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7347 /* frequent check in generic code: box (struct), brtrue */
7348 if (!mono_class_is_nullable (klass) &&
7349 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7350 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7352 MONO_INST_NEW (cfg, ins, OP_BR);
7353 if (*ip == CEE_BRTRUE_S) {
7356 target = ip + 1 + (signed char)(*ip);
7361 target = ip + 4 + (gint)(read32 (ip));
7364 GET_BBLOCK (cfg, tblock, target);
7365 link_bblock (cfg, bblock, tblock);
7366 ins->inst_target_bb = tblock;
7367 GET_BBLOCK (cfg, tblock, ip);
7369 * This leads to some inconsistency, since the two bblocks are not
7370 * really connected, but it is needed for handling stack arguments
7371 * correct (See test_0_box_brtrue_opt_regress_81102).
7373 link_bblock (cfg, bblock, tblock);
7374 if (sp != stack_start) {
7375 handle_stack_args (cfg, stack_start, sp - stack_start);
7377 CHECK_UNVERIFIABLE (cfg);
7379 MONO_ADD_INS (bblock, ins);
7380 start_new_bblock = 1;
7389 EMIT_GET_RGCTX (rgctx, context_used);
7390 if (cfg->opt & MONO_OPT_SHARED)
7391 rgctx_info = MONO_RGCTX_INFO_KLASS;
7393 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7394 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7395 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, rgctx, data);
7397 *sp++ = handle_box (cfg, val, klass);
7405 MonoInst *rgctx = NULL;
7410 token = read32 (ip + 1);
7411 klass = mini_get_class (method, token, generic_context);
7412 CHECK_TYPELOAD (klass);
7414 mono_save_token_info (cfg, image, token, klass);
7416 if (cfg->generic_sharing_context)
7417 context_used = mono_class_check_context_used (klass);
7420 EMIT_GET_RGCTX (rgctx, context_used);
7422 if (mono_class_is_nullable (klass)) {
7425 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7426 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7430 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7440 MonoClassField *field;
7444 if (*ip == CEE_STFLD) {
7451 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7453 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7456 token = read32 (ip + 1);
7457 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7458 field = mono_method_get_wrapper_data (method, token);
7459 klass = field->parent;
7462 field = mono_field_from_token (image, token, &klass, generic_context);
7466 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7467 FIELD_ACCESS_FAILURE;
7468 mono_class_init (klass);
7470 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7471 if (*ip == CEE_STFLD) {
7472 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7474 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7475 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7476 MonoInst *iargs [5];
7479 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7480 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7481 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7485 if (cfg->opt & MONO_OPT_INLINE) {
7486 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7487 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7488 g_assert (costs > 0);
7491 cfg->real_offset += 5;
7494 inline_costs += costs;
7497 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7502 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7504 store->flags |= ins_flag;
7511 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7512 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7513 MonoInst *iargs [4];
7516 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7517 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7518 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7519 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7520 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7521 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7523 g_assert (costs > 0);
7526 cfg->real_offset += 5;
7530 inline_costs += costs;
7533 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7537 if (sp [0]->type == STACK_VTYPE) {
7540 /* Have to compute the address of the variable */
7542 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7544 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7546 g_assert (var->klass == klass);
7548 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7552 if (*ip == CEE_LDFLDA) {
7553 dreg = alloc_preg (cfg);
7555 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7556 ins->klass = mono_class_from_mono_type (field->type);
7557 ins->type = STACK_MP;
7562 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7563 load->flags |= ins_flag;
7574 MonoClassField *field;
7575 gpointer addr = NULL;
7576 gboolean is_special_static;
7579 token = read32 (ip + 1);
7581 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7582 field = mono_method_get_wrapper_data (method, token);
7583 klass = field->parent;
7586 field = mono_field_from_token (image, token, &klass, generic_context);
7589 mono_class_init (klass);
7590 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7591 FIELD_ACCESS_FAILURE;
7594 * We can only support shared generic static
7595 * field access on architectures where the
7596 * trampoline code has been extended to handle
7597 * the generic class init.
7599 #ifndef MONO_ARCH_VTABLE_REG
7600 GENERIC_SHARING_FAILURE (*ip);
7603 if (cfg->generic_sharing_context)
7604 context_used = mono_class_check_context_used (klass);
7606 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7608 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7609 * to be called here.
7611 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7612 mono_class_vtable (cfg->domain, klass);
7613 CHECK_TYPELOAD (klass);
7615 mono_domain_lock (cfg->domain);
7616 if (cfg->domain->special_static_fields)
7617 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7618 mono_domain_unlock (cfg->domain);
7620 is_special_static = mono_class_field_is_special_static (field);
7622 /* Generate IR to compute the field address */
7624 if ((cfg->opt & MONO_OPT_SHARED) ||
7625 (cfg->compile_aot && is_special_static) ||
7626 (context_used && is_special_static)) {
7627 MonoInst *iargs [2];
7629 g_assert (field->parent);
7630 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7634 EMIT_GET_RGCTX (rgctx, context_used);
7635 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7637 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7639 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7640 } else if (context_used) {
7641 MonoInst *rgctx, *static_data;
7644 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7645 method->klass->name_space, method->klass->name, method->name,
7646 depth, field->offset);
7649 if (mono_class_needs_cctor_run (klass, method)) {
7651 MonoInst *vtable, *rgctx;
7653 EMIT_GET_RGCTX (rgctx, context_used);
7654 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7656 // FIXME: This doesn't work since it tries to pass the argument
7657 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7659 * The vtable pointer is always passed in a register regardless of
7660 * the calling convention, so assign it manually, and make a call
7661 * using a signature without parameters.
7663 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7664 #ifdef MONO_ARCH_VTABLE_REG
7665 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7666 cfg->uses_vtable_reg = TRUE;
7673 * The pointer we're computing here is
7675 * super_info.static_data + field->offset
7677 EMIT_GET_RGCTX (rgctx, context_used);
7678 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7680 if (field->offset == 0) {
7683 int addr_reg = mono_alloc_preg (cfg);
7684 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7686 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7687 MonoInst *iargs [2];
7689 g_assert (field->parent);
7690 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7691 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7692 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7694 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7696 CHECK_TYPELOAD (klass);
7698 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7699 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7700 if (cfg->verbose_level > 2)
7701 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7702 class_inits = g_slist_prepend (class_inits, vtable);
7704 if (cfg->run_cctors) {
7706 /* This makes so that inline cannot trigger */
7707 /* .cctors: too many apps depend on them */
7708 /* running with a specific order... */
7709 if (! vtable->initialized)
7711 ex = mono_runtime_class_init_full (vtable, FALSE);
7713 set_exception_object (cfg, ex);
7714 goto exception_exit;
7718 addr = (char*)vtable->data + field->offset;
7720 if (cfg->compile_aot)
7721 EMIT_NEW_SFLDACONST (cfg, ins, field);
7723 EMIT_NEW_PCONST (cfg, ins, addr);
7726 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7727 * This could be later optimized to do just a couple of
7728 * memory dereferences with constant offsets.
7730 MonoInst *iargs [1];
7731 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7732 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7736 /* Generate IR to do the actual load/store operation */
7738 if (*ip == CEE_LDSFLDA) {
7739 ins->klass = mono_class_from_mono_type (field->type);
7741 } else if (*ip == CEE_STSFLD) {
7746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7747 store->flags |= ins_flag;
7750 gboolean is_const = FALSE;
7751 MonoVTable *vtable = NULL;
7753 if (!context_used) {
7754 vtable = mono_class_vtable (cfg->domain, klass);
7755 CHECK_TYPELOAD (klass);
7757 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7758 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7759 gpointer addr = (char*)vtable->data + field->offset;
7760 int ro_type = field->type->type;
7761 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7762 ro_type = field->type->data.klass->enum_basetype->type;
7764 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7767 case MONO_TYPE_BOOLEAN:
7769 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7773 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7776 case MONO_TYPE_CHAR:
7778 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7782 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7787 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7791 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7796 case MONO_TYPE_STRING:
7797 case MONO_TYPE_OBJECT:
7798 case MONO_TYPE_CLASS:
7799 case MONO_TYPE_SZARRAY:
7801 case MONO_TYPE_FNPTR:
7802 case MONO_TYPE_ARRAY:
7803 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7804 type_to_eval_stack_type ((cfg), field->type, *sp);
7809 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7814 case MONO_TYPE_VALUETYPE:
7824 CHECK_STACK_OVF (1);
7826 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7827 load->flags |= ins_flag;
7839 token = read32 (ip + 1);
7840 klass = mini_get_class (method, token, generic_context);
7841 CHECK_TYPELOAD (klass);
7842 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7843 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7854 const char *data_ptr;
7861 token = read32 (ip + 1);
7863 klass = mini_get_class (method, token, generic_context);
7864 CHECK_TYPELOAD (klass);
7866 if (cfg->generic_sharing_context)
7867 context_used = mono_class_check_context_used (klass);
7873 /* FIXME: Decompose later to help abcrem */
7876 EMIT_GET_RGCTX (rgctx, context_used);
7877 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7882 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7884 if (cfg->opt & MONO_OPT_SHARED) {
7885 /* Decompose now to avoid problems with references to the domainvar */
7886 MonoInst *iargs [3];
7888 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7889 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7892 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7894 /* Decompose later since it is needed by abcrem */
7895 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7896 ins->dreg = alloc_preg (cfg);
7897 ins->sreg1 = sp [0]->dreg;
7898 ins->inst_newa_class = klass;
7899 ins->type = STACK_OBJ;
7901 MONO_ADD_INS (cfg->cbb, ins);
7902 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7903 cfg->cbb->has_array_access = TRUE;
7905 /* Needed so mono_emit_load_get_addr () gets called */
7906 mono_get_got_var (cfg);
7916 * we inline/optimize the initialization sequence if possible.
7917 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7918 * for small sizes open code the memcpy
7919 * ensure the rva field is big enough
7921 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7922 MonoMethod *memcpy_method = get_memcpy_method ();
7923 MonoInst *iargs [3];
7924 int add_reg = alloc_preg (cfg);
7926 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7927 if (cfg->compile_aot) {
7928 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7930 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7932 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7933 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
7942 if (sp [0]->type != STACK_OBJ)
7945 dreg = alloc_preg (cfg);
7946 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7947 ins->dreg = alloc_preg (cfg);
7948 ins->sreg1 = sp [0]->dreg;
7949 ins->type = STACK_I4;
7950 MONO_ADD_INS (cfg->cbb, ins);
7951 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7952 cfg->cbb->has_array_access = TRUE;
7960 if (sp [0]->type != STACK_OBJ)
7963 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7965 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7966 CHECK_TYPELOAD (klass);
7967 /* we need to make sure that this array is exactly the type it needs
7968 * to be for correctness. the wrappers are lax with their usage
7969 * so we need to ignore them here
7971 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7972 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7975 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7979 case CEE_LDELEM_ANY:
7990 case CEE_LDELEM_REF: {
7996 if (*ip == CEE_LDELEM_ANY) {
7998 token = read32 (ip + 1);
7999 klass = mini_get_class (method, token, generic_context);
8000 CHECK_TYPELOAD (klass);
8001 mono_class_init (klass);
8004 klass = array_access_to_klass (*ip);
8006 if (sp [0]->type != STACK_OBJ)
8009 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8011 if (sp [1]->opcode == OP_ICONST) {
8012 int array_reg = sp [0]->dreg;
8013 int index_reg = sp [1]->dreg;
8014 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8016 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8017 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8019 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8023 if (*ip == CEE_LDELEM_ANY)
8036 case CEE_STELEM_REF:
8037 case CEE_STELEM_ANY: {
8043 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8045 if (*ip == CEE_STELEM_ANY) {
8047 token = read32 (ip + 1);
8048 klass = mini_get_class (method, token, generic_context);
8049 CHECK_TYPELOAD (klass);
8050 mono_class_init (klass);
8053 klass = array_access_to_klass (*ip);
8055 if (sp [0]->type != STACK_OBJ)
8058 /* storing a NULL doesn't need any of the complex checks in stelemref */
8059 if (generic_class_is_reference_type (cfg, klass) &&
8060 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8061 MonoMethod* helper = mono_marshal_get_stelemref ();
8062 MonoInst *iargs [3];
8064 if (sp [0]->type != STACK_OBJ)
8066 if (sp [2]->type != STACK_OBJ)
8073 mono_emit_method_call (cfg, helper, iargs, NULL);
8075 if (sp [1]->opcode == OP_ICONST) {
8076 int array_reg = sp [0]->dreg;
8077 int index_reg = sp [1]->dreg;
8078 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8080 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8081 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8083 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8084 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8088 if (*ip == CEE_STELEM_ANY)
8095 case CEE_CKFINITE: {
8099 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8100 ins->sreg1 = sp [0]->dreg;
8101 ins->dreg = alloc_freg (cfg);
8102 ins->type = STACK_R8;
8103 MONO_ADD_INS (bblock, ins);
8106 mono_decompose_opcode (cfg, ins);
8111 case CEE_REFANYVAL: {
8112 MonoInst *src_var, *src;
8114 int klass_reg = alloc_preg (cfg);
8115 int dreg = alloc_preg (cfg);
8118 MONO_INST_NEW (cfg, ins, *ip);
8121 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8122 CHECK_TYPELOAD (klass);
8123 mono_class_init (klass);
8125 if (cfg->generic_sharing_context)
8126 context_used = mono_class_check_context_used (klass);
8129 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8131 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8132 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8136 MonoInst *rgctx, *klass_ins;
8138 EMIT_GET_RGCTX (rgctx, context_used);
8139 klass_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8143 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8145 mini_emit_class_check (cfg, klass_reg, klass);
8147 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8148 ins->type = STACK_MP;
8153 case CEE_MKREFANY: {
8154 MonoInst *loc, *addr;
8157 MONO_INST_NEW (cfg, ins, *ip);
8160 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8161 CHECK_TYPELOAD (klass);
8162 mono_class_init (klass);
8164 if (cfg->generic_sharing_context)
8165 context_used = mono_class_check_context_used (klass);
8167 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8168 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8171 MonoInst *rgctx, *const_ins;
8172 int type_reg = alloc_preg (cfg);
8174 EMIT_GET_RGCTX (rgctx, context_used);
8175 const_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8178 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8179 } else if (cfg->compile_aot) {
8180 int const_reg = alloc_preg (cfg);
8181 int type_reg = alloc_preg (cfg);
8183 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8184 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8186 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8188 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8189 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8191 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8193 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8194 ins->type = STACK_VTYPE;
8195 ins->klass = mono_defaults.typed_reference_class;
8202 MonoClass *handle_class;
8204 CHECK_STACK_OVF (1);
8207 n = read32 (ip + 1);
8209 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8210 handle = mono_method_get_wrapper_data (method, n);
8211 handle_class = mono_method_get_wrapper_data (method, n + 1);
8212 if (handle_class == mono_defaults.typehandle_class)
8213 handle = &((MonoClass*)handle)->byval_arg;
8216 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8220 mono_class_init (handle_class);
8221 if (cfg->generic_sharing_context) {
8222 if (handle_class == mono_defaults.typehandle_class) {
8223 /* If we get a MONO_TYPE_CLASS
8224 then we need to provide the
8226 instantiation of it. */
8227 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8230 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8231 } else if (handle_class == mono_defaults.fieldhandle_class)
8232 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8233 else if (handle_class == mono_defaults.methodhandle_class)
8234 context_used = mono_method_check_context_used (handle);
8236 g_assert_not_reached ();
8239 if (cfg->opt & MONO_OPT_SHARED) {
8240 MonoInst *addr, *vtvar, *iargs [3];
8241 int method_context_used;
8243 if (cfg->generic_sharing_context)
8244 method_context_used = mono_method_check_context_used (method);
8246 method_context_used = 0;
8248 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8250 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8251 EMIT_NEW_ICONST (cfg, iargs [1], n);
8252 if (method_context_used) {
8255 EMIT_GET_RGCTX (rgctx, method_context_used);
8256 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8257 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8259 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8260 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8262 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8266 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8268 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8269 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8270 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8271 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8272 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8273 MonoClass *tclass = mono_class_from_mono_type (handle);
8275 mono_class_init (tclass);
8279 EMIT_GET_RGCTX (rgctx, context_used);
8280 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8281 } else if (cfg->compile_aot) {
8282 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8284 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8286 ins->type = STACK_OBJ;
8287 ins->klass = cmethod->klass;
8290 MonoInst *addr, *vtvar;
8292 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8297 EMIT_GET_RGCTX (rgctx, context_used);
8298 if (handle_class == mono_defaults.typehandle_class) {
8299 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8300 mono_class_from_mono_type (handle),
8301 MONO_RGCTX_INFO_TYPE);
8302 } else if (handle_class == mono_defaults.methodhandle_class) {
8303 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8304 handle, MONO_RGCTX_INFO_METHOD);
8305 } else if (handle_class == mono_defaults.fieldhandle_class) {
8306 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8307 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8309 g_assert_not_reached ();
8311 } else if (cfg->compile_aot) {
8312 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8314 EMIT_NEW_PCONST (cfg, ins, handle);
8316 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8318 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8328 MONO_INST_NEW (cfg, ins, OP_THROW);
8330 ins->sreg1 = sp [0]->dreg;
8332 bblock->out_of_line = TRUE;
8333 MONO_ADD_INS (bblock, ins);
8334 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8335 MONO_ADD_INS (bblock, ins);
8338 link_bblock (cfg, bblock, end_bblock);
8339 start_new_bblock = 1;
8341 case CEE_ENDFINALLY:
8342 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8343 MONO_ADD_INS (bblock, ins);
8345 start_new_bblock = 1;
8348 * Control will leave the method so empty the stack, otherwise
8349 * the next basic block will start with a nonempty stack.
8351 while (sp != stack_start) {
8359 if (*ip == CEE_LEAVE) {
8361 target = ip + 5 + (gint32)read32(ip + 1);
8364 target = ip + 2 + (signed char)(ip [1]);
8367 /* empty the stack */
8368 while (sp != stack_start) {
8373 * If this leave statement is in a catch block, check for a
8374 * pending exception, and rethrow it if necessary.
8376 for (i = 0; i < header->num_clauses; ++i) {
8377 MonoExceptionClause *clause = &header->clauses [i];
8380 * Use <= in the final comparison to handle clauses with multiple
8381 * leave statements, like in bug #78024.
8382 * The ordering of the exception clauses guarantees that we find the
8385 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8387 MonoBasicBlock *dont_throw;
8392 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8395 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8397 NEW_BBLOCK (cfg, dont_throw);
8400 * Currently, we allways rethrow the abort exception, despite the
8401 * fact that this is not correct. See thread6.cs for an example.
8402 * But propagating the abort exception is more important than
8403 * getting the sematics right.
8405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8407 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8409 MONO_START_BB (cfg, dont_throw);
8414 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8416 for (tmp = handlers; tmp; tmp = tmp->next) {
8418 link_bblock (cfg, bblock, tblock);
8419 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8420 ins->inst_target_bb = tblock;
8421 MONO_ADD_INS (bblock, ins);
8423 g_list_free (handlers);
8426 MONO_INST_NEW (cfg, ins, OP_BR);
8427 MONO_ADD_INS (bblock, ins);
8428 GET_BBLOCK (cfg, tblock, target);
8429 link_bblock (cfg, bblock, tblock);
8430 ins->inst_target_bb = tblock;
8431 start_new_bblock = 1;
8433 if (*ip == CEE_LEAVE)
8442 * Mono specific opcodes
8444 case MONO_CUSTOM_PREFIX: {
8446 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8450 case CEE_MONO_ICALL: {
8452 MonoJitICallInfo *info;
8454 token = read32 (ip + 2);
8455 func = mono_method_get_wrapper_data (method, token);
8456 info = mono_find_jit_icall_by_addr (func);
8459 CHECK_STACK (info->sig->param_count);
8460 sp -= info->sig->param_count;
8462 ins = mono_emit_jit_icall (cfg, info->func, sp);
8463 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8467 inline_costs += 10 * num_calls++;
8471 case CEE_MONO_LDPTR: {
8474 CHECK_STACK_OVF (1);
8476 token = read32 (ip + 2);
8478 ptr = mono_method_get_wrapper_data (method, token);
8479 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8480 MonoJitICallInfo *callinfo;
8481 const char *icall_name;
8483 icall_name = method->name + strlen ("__icall_wrapper_");
8484 g_assert (icall_name);
8485 callinfo = mono_find_jit_icall_by_name (icall_name);
8486 g_assert (callinfo);
8488 if (ptr == callinfo->func) {
8489 /* Will be transformed into an AOTCONST later */
8490 EMIT_NEW_PCONST (cfg, ins, ptr);
8496 /* FIXME: Generalize this */
8497 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8498 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8503 EMIT_NEW_PCONST (cfg, ins, ptr);
8506 inline_costs += 10 * num_calls++;
8507 /* Can't embed random pointers into AOT code */
8508 cfg->disable_aot = 1;
8511 case CEE_MONO_ICALL_ADDR: {
8512 MonoMethod *cmethod;
8515 CHECK_STACK_OVF (1);
8517 token = read32 (ip + 2);
8519 cmethod = mono_method_get_wrapper_data (method, token);
8521 if (cfg->compile_aot) {
8522 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8524 ptr = mono_lookup_internal_call (cmethod);
8526 EMIT_NEW_PCONST (cfg, ins, ptr);
8532 case CEE_MONO_VTADDR: {
8533 MonoInst *src_var, *src;
8539 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8540 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8545 case CEE_MONO_NEWOBJ: {
8546 MonoInst *iargs [2];
8548 CHECK_STACK_OVF (1);
8550 token = read32 (ip + 2);
8551 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8552 mono_class_init (klass);
8553 NEW_DOMAINCONST (cfg, iargs [0]);
8554 MONO_ADD_INS (cfg->cbb, iargs [0]);
8555 NEW_CLASSCONST (cfg, iargs [1], klass);
8556 MONO_ADD_INS (cfg->cbb, iargs [1]);
8557 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8559 inline_costs += 10 * num_calls++;
8562 case CEE_MONO_OBJADDR:
8565 MONO_INST_NEW (cfg, ins, OP_MOVE);
8566 ins->dreg = alloc_preg (cfg);
8567 ins->sreg1 = sp [0]->dreg;
8568 ins->type = STACK_MP;
8569 MONO_ADD_INS (cfg->cbb, ins);
8573 case CEE_MONO_LDNATIVEOBJ:
8575 * Similar to LDOBJ, but instead load the unmanaged
8576 * representation of the vtype to the stack.
8581 token = read32 (ip + 2);
8582 klass = mono_method_get_wrapper_data (method, token);
8583 g_assert (klass->valuetype);
8584 mono_class_init (klass);
8587 MonoInst *src, *dest, *temp;
8590 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8591 temp->backend.is_pinvoke = 1;
8592 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8593 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8595 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8596 dest->type = STACK_VTYPE;
8597 dest->klass = klass;
8603 case CEE_MONO_RETOBJ: {
8605 * Same as RET, but return the native representation of a vtype
8608 g_assert (cfg->ret);
8609 g_assert (mono_method_signature (method)->pinvoke);
8614 token = read32 (ip + 2);
8615 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8617 if (!cfg->vret_addr) {
8618 g_assert (cfg->ret_var_is_local);
8620 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8622 EMIT_NEW_RETLOADA (cfg, ins);
8624 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8626 if (sp != stack_start)
8629 MONO_INST_NEW (cfg, ins, OP_BR);
8630 ins->inst_target_bb = end_bblock;
8631 MONO_ADD_INS (bblock, ins);
8632 link_bblock (cfg, bblock, end_bblock);
8633 start_new_bblock = 1;
8637 case CEE_MONO_CISINST:
8638 case CEE_MONO_CCASTCLASS: {
8643 token = read32 (ip + 2);
8644 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8645 if (ip [1] == CEE_MONO_CISINST)
8646 ins = handle_cisinst (cfg, klass, sp [0]);
8648 ins = handle_ccastclass (cfg, klass, sp [0]);
8654 case CEE_MONO_SAVE_LMF:
8655 case CEE_MONO_RESTORE_LMF:
8656 #ifdef MONO_ARCH_HAVE_LMF_OPS
8657 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8658 MONO_ADD_INS (bblock, ins);
8659 cfg->need_lmf_area = TRUE;
8663 case CEE_MONO_CLASSCONST:
8664 CHECK_STACK_OVF (1);
8666 token = read32 (ip + 2);
8667 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8670 inline_costs += 10 * num_calls++;
8672 case CEE_MONO_NOT_TAKEN:
8673 bblock->out_of_line = TRUE;
8677 CHECK_STACK_OVF (1);
8679 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8680 ins->dreg = alloc_preg (cfg);
8681 ins->inst_offset = (gint32)read32 (ip + 2);
8682 ins->type = STACK_PTR;
8683 MONO_ADD_INS (bblock, ins);
8688 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8698 /* somewhat similar to LDTOKEN */
8699 MonoInst *addr, *vtvar;
8700 CHECK_STACK_OVF (1);
8701 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8703 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8704 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8706 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8707 ins->type = STACK_VTYPE;
8708 ins->klass = mono_defaults.argumenthandle_class;
8721 * The following transforms:
8722 * CEE_CEQ into OP_CEQ
8723 * CEE_CGT into OP_CGT
8724 * CEE_CGT_UN into OP_CGT_UN
8725 * CEE_CLT into OP_CLT
8726 * CEE_CLT_UN into OP_CLT_UN
8728 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8730 MONO_INST_NEW (cfg, ins, cmp->opcode);
8732 cmp->sreg1 = sp [0]->dreg;
8733 cmp->sreg2 = sp [1]->dreg;
8734 type_from_op (cmp, sp [0], sp [1]);
8736 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8737 cmp->opcode = OP_LCOMPARE;
8738 else if (sp [0]->type == STACK_R8)
8739 cmp->opcode = OP_FCOMPARE;
8741 cmp->opcode = OP_ICOMPARE;
8742 MONO_ADD_INS (bblock, cmp);
8743 ins->type = STACK_I4;
8744 ins->dreg = alloc_dreg (cfg, ins->type);
8745 type_from_op (ins, sp [0], sp [1]);
8747 if (cmp->opcode == OP_FCOMPARE) {
8749 * The backends expect the fceq opcodes to do the
8752 cmp->opcode = OP_NOP;
8753 ins->sreg1 = cmp->sreg1;
8754 ins->sreg2 = cmp->sreg2;
8756 MONO_ADD_INS (bblock, ins);
8763 MonoMethod *cil_method, *ctor_method;
8764 gboolean is_shared = FALSE;
8766 CHECK_STACK_OVF (1);
8768 n = read32 (ip + 2);
8769 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8772 mono_class_init (cmethod->klass);
8774 mono_save_token_info (cfg, image, n, cmethod);
8776 if (cfg->generic_sharing_context)
8777 context_used = mono_method_check_context_used (cmethod);
8779 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8780 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8781 (cmethod->klass->generic_class ||
8782 cmethod->klass->generic_container)) {
8785 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8789 cil_method = cmethod;
8790 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8791 METHOD_ACCESS_FAILURE;
8793 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8794 if (check_linkdemand (cfg, method, cmethod))
8796 CHECK_CFG_EXCEPTION;
8797 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8798 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8802 * Optimize the common case of ldftn+delegate creation
8804 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8805 /* FIXME: SGEN support */
8806 /* FIXME: handle shared static generic methods */
8807 /* FIXME: handle this in shared code */
8808 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8809 MonoInst *target_ins;
8812 if (cfg->verbose_level > 3)
8813 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8814 target_ins = sp [-1];
8816 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8827 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8829 EMIT_GET_RGCTX (rgctx, context_used);
8830 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8831 } else if (is_shared) {
8832 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8834 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8836 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8840 inline_costs += 10 * num_calls++;
8843 case CEE_LDVIRTFTN: {
8848 n = read32 (ip + 2);
8849 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8852 mono_class_init (cmethod->klass);
8854 if (cfg->generic_sharing_context)
8855 context_used = mono_method_check_context_used (cmethod);
8857 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8858 if (check_linkdemand (cfg, method, cmethod))
8860 CHECK_CFG_EXCEPTION;
8861 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8862 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8871 EMIT_GET_RGCTX (rgctx, context_used);
8872 args [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8873 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8875 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8876 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8880 inline_costs += 10 * num_calls++;
8884 CHECK_STACK_OVF (1);
8886 n = read16 (ip + 2);
8888 EMIT_NEW_ARGLOAD (cfg, ins, n);
8893 CHECK_STACK_OVF (1);
8895 n = read16 (ip + 2);
8897 NEW_ARGLOADA (cfg, ins, n);
8898 MONO_ADD_INS (cfg->cbb, ins);
8906 n = read16 (ip + 2);
8908 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8910 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8914 CHECK_STACK_OVF (1);
8916 n = read16 (ip + 2);
8918 EMIT_NEW_LOCLOAD (cfg, ins, n);
8923 CHECK_STACK_OVF (1);
8925 n = read16 (ip + 2);
8927 EMIT_NEW_LOCLOADA (cfg, ins, n);
8935 n = read16 (ip + 2);
8937 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8939 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8946 if (sp != stack_start)
8948 if (cfg->method != method)
8950 * Inlining this into a loop in a parent could lead to
8951 * stack overflows which is different behavior than the
8952 * non-inlined case, thus disable inlining in this case.
8954 goto inline_failure;
8956 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8957 ins->dreg = alloc_preg (cfg);
8958 ins->sreg1 = sp [0]->dreg;
8959 ins->type = STACK_PTR;
8960 MONO_ADD_INS (cfg->cbb, ins);
8962 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8963 if (header->init_locals)
8964 ins->flags |= MONO_INST_INIT;
8969 case CEE_ENDFILTER: {
8970 MonoExceptionClause *clause, *nearest;
8971 int cc, nearest_num;
8975 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8977 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8978 ins->sreg1 = (*sp)->dreg;
8979 MONO_ADD_INS (bblock, ins);
8980 start_new_bblock = 1;
8985 for (cc = 0; cc < header->num_clauses; ++cc) {
8986 clause = &header->clauses [cc];
8987 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8988 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8989 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8995 if ((ip - header->code) != nearest->handler_offset)
9000 case CEE_UNALIGNED_:
9001 ins_flag |= MONO_INST_UNALIGNED;
9002 /* FIXME: record alignment? we can assume 1 for now */
9007 ins_flag |= MONO_INST_VOLATILE;
9011 ins_flag |= MONO_INST_TAILCALL;
9012 cfg->flags |= MONO_CFG_HAS_TAIL;
9013 /* Can't inline tail calls at this time */
9014 inline_costs += 100000;
9021 token = read32 (ip + 2);
9022 klass = mini_get_class (method, token, generic_context);
9023 CHECK_TYPELOAD (klass);
9024 if (generic_class_is_reference_type (cfg, klass)) {
9025 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9027 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9028 mini_emit_initobj (cfg, *sp, NULL, klass);
9033 case CEE_CONSTRAINED_:
9035 token = read32 (ip + 2);
9036 constrained_call = mono_class_get_full (image, token, generic_context);
9037 CHECK_TYPELOAD (constrained_call);
9042 MonoInst *iargs [3];
9046 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9047 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9048 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9049 /* emit_memset only works when val == 0 */
9050 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9055 if (ip [1] == CEE_CPBLK) {
9056 MonoMethod *memcpy_method = get_memcpy_method ();
9057 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9059 MonoMethod *memset_method = get_memset_method ();
9060 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9070 ins_flag |= MONO_INST_NOTYPECHECK;
9072 ins_flag |= MONO_INST_NORANGECHECK;
9073 /* we ignore the no-nullcheck for now since we
9074 * really do it explicitly only when doing callvirt->call
9080 int handler_offset = -1;
9082 for (i = 0; i < header->num_clauses; ++i) {
9083 MonoExceptionClause *clause = &header->clauses [i];
9084 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9085 handler_offset = clause->handler_offset;
9090 bblock->flags |= BB_EXCEPTION_UNSAFE;
9092 g_assert (handler_offset != -1);
9094 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9095 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9096 ins->sreg1 = load->dreg;
9097 MONO_ADD_INS (bblock, ins);
9099 link_bblock (cfg, bblock, end_bblock);
9100 start_new_bblock = 1;
9108 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9110 CHECK_STACK_OVF (1);
9112 token = read32 (ip + 2);
9113 /* FIXXME: handle generics. */
9114 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9115 MonoType *type = mono_type_create_from_typespec (image, token);
9116 token = mono_type_size (type, &ialign);
9118 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9119 CHECK_TYPELOAD (klass);
9120 mono_class_init (klass);
9121 token = mono_class_value_size (klass, &align);
9123 EMIT_NEW_ICONST (cfg, ins, token);
9128 case CEE_REFANYTYPE: {
9129 MonoInst *src_var, *src;
9135 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9137 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9138 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9139 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9149 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9154 g_error ("opcode 0x%02x not handled", *ip);
9157 if (start_new_bblock != 1)
9160 bblock->cil_length = ip - bblock->cil_code;
9161 bblock->next_bb = end_bblock;
9163 if (cfg->method == method && cfg->domainvar) {
9165 MonoInst *get_domain;
9167 cfg->cbb = init_localsbb;
9169 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9170 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9173 get_domain->dreg = alloc_preg (cfg);
9174 MONO_ADD_INS (cfg->cbb, get_domain);
9176 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9177 MONO_ADD_INS (cfg->cbb, store);
9180 if (cfg->method == method && cfg->got_var)
9181 mono_emit_load_got_addr (cfg);
9183 if (header->init_locals) {
9186 cfg->cbb = init_localsbb;
9187 cfg->ip = header->code;
9188 for (i = 0; i < header->num_locals; ++i) {
9189 MonoType *ptype = header->locals [i];
9190 int t = ptype->type;
9191 dreg = cfg->locals [i]->dreg;
9193 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9194 t = ptype->data.klass->enum_basetype->type;
9196 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9197 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9198 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9199 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9200 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9201 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9202 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9203 ins->type = STACK_R8;
9204 ins->inst_p0 = (void*)&r8_0;
9205 ins->dreg = alloc_dreg (cfg, STACK_R8);
9206 MONO_ADD_INS (init_localsbb, ins);
9207 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9208 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9209 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9210 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9212 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9219 if (cfg->method == method) {
9221 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9222 bb->region = mono_find_block_region (cfg, bb->real_offset);
9224 mono_create_spvar_for_region (cfg, bb->region);
9225 if (cfg->verbose_level > 2)
9226 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9230 g_slist_free (class_inits);
9231 dont_inline = g_list_remove (dont_inline, method);
9233 if (inline_costs < 0) {
9236 /* Method is too large */
9237 mname = mono_method_full_name (method, TRUE);
9238 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9239 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9244 if ((cfg->verbose_level > 1) && (cfg->method == method))
9245 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9247 return inline_costs;
9250 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9251 g_slist_free (class_inits);
9252 dont_inline = g_list_remove (dont_inline, method);
9256 g_slist_free (class_inits);
9257 dont_inline = g_list_remove (dont_inline, method);
9261 g_slist_free (class_inits);
9262 dont_inline = g_list_remove (dont_inline, method);
9263 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9267 g_slist_free (class_inits);
9268 dont_inline = g_list_remove (dont_inline, method);
9269 set_exception_type_from_invalid_il (cfg, method, ip);
9274 store_membase_reg_to_store_membase_imm (int opcode)
9277 case OP_STORE_MEMBASE_REG:
9278 return OP_STORE_MEMBASE_IMM;
9279 case OP_STOREI1_MEMBASE_REG:
9280 return OP_STOREI1_MEMBASE_IMM;
9281 case OP_STOREI2_MEMBASE_REG:
9282 return OP_STOREI2_MEMBASE_IMM;
9283 case OP_STOREI4_MEMBASE_REG:
9284 return OP_STOREI4_MEMBASE_IMM;
9285 case OP_STOREI8_MEMBASE_REG:
9286 return OP_STOREI8_MEMBASE_IMM;
9288 g_assert_not_reached ();
9294 #endif /* DISABLE_JIT */
9297 mono_op_to_op_imm (int opcode)
9307 return OP_IDIV_UN_IMM;
9311 return OP_IREM_UN_IMM;
9325 return OP_ISHR_UN_IMM;
9342 return OP_LSHR_UN_IMM;
9345 return OP_COMPARE_IMM;
9347 return OP_ICOMPARE_IMM;
9349 return OP_LCOMPARE_IMM;
9351 case OP_STORE_MEMBASE_REG:
9352 return OP_STORE_MEMBASE_IMM;
9353 case OP_STOREI1_MEMBASE_REG:
9354 return OP_STOREI1_MEMBASE_IMM;
9355 case OP_STOREI2_MEMBASE_REG:
9356 return OP_STOREI2_MEMBASE_IMM;
9357 case OP_STOREI4_MEMBASE_REG:
9358 return OP_STOREI4_MEMBASE_IMM;
9360 #if defined(__i386__) || defined (__x86_64__)
9362 return OP_X86_PUSH_IMM;
9363 case OP_X86_COMPARE_MEMBASE_REG:
9364 return OP_X86_COMPARE_MEMBASE_IMM;
9366 #if defined(__x86_64__)
9367 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9368 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9370 case OP_VOIDCALL_REG:
9379 return OP_LOCALLOC_IMM;
9386 ldind_to_load_membase (int opcode)
9390 return OP_LOADI1_MEMBASE;
9392 return OP_LOADU1_MEMBASE;
9394 return OP_LOADI2_MEMBASE;
9396 return OP_LOADU2_MEMBASE;
9398 return OP_LOADI4_MEMBASE;
9400 return OP_LOADU4_MEMBASE;
9402 return OP_LOAD_MEMBASE;
9404 return OP_LOAD_MEMBASE;
9406 return OP_LOADI8_MEMBASE;
9408 return OP_LOADR4_MEMBASE;
9410 return OP_LOADR8_MEMBASE;
9412 g_assert_not_reached ();
9419 stind_to_store_membase (int opcode)
9423 return OP_STOREI1_MEMBASE_REG;
9425 return OP_STOREI2_MEMBASE_REG;
9427 return OP_STOREI4_MEMBASE_REG;
9430 return OP_STORE_MEMBASE_REG;
9432 return OP_STOREI8_MEMBASE_REG;
9434 return OP_STORER4_MEMBASE_REG;
9436 return OP_STORER8_MEMBASE_REG;
9438 g_assert_not_reached ();
9445 mono_load_membase_to_load_mem (int opcode)
9447 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9448 #if defined(__i386__) || defined(__x86_64__)
9450 case OP_LOAD_MEMBASE:
9452 case OP_LOADU1_MEMBASE:
9453 return OP_LOADU1_MEM;
9454 case OP_LOADU2_MEMBASE:
9455 return OP_LOADU2_MEM;
9456 case OP_LOADI4_MEMBASE:
9457 return OP_LOADI4_MEM;
9458 case OP_LOADU4_MEMBASE:
9459 return OP_LOADU4_MEM;
9460 #if SIZEOF_VOID_P == 8
9461 case OP_LOADI8_MEMBASE:
9462 return OP_LOADI8_MEM;
9471 op_to_op_dest_membase (int store_opcode, int opcode)
9473 #if defined(__i386__)
9474 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9479 return OP_X86_ADD_MEMBASE_REG;
9481 return OP_X86_SUB_MEMBASE_REG;
9483 return OP_X86_AND_MEMBASE_REG;
9485 return OP_X86_OR_MEMBASE_REG;
9487 return OP_X86_XOR_MEMBASE_REG;
9490 return OP_X86_ADD_MEMBASE_IMM;
9493 return OP_X86_SUB_MEMBASE_IMM;
9496 return OP_X86_AND_MEMBASE_IMM;
9499 return OP_X86_OR_MEMBASE_IMM;
9502 return OP_X86_XOR_MEMBASE_IMM;
9508 #if defined(__x86_64__)
9509 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9514 return OP_X86_ADD_MEMBASE_REG;
9516 return OP_X86_SUB_MEMBASE_REG;
9518 return OP_X86_AND_MEMBASE_REG;
9520 return OP_X86_OR_MEMBASE_REG;
9522 return OP_X86_XOR_MEMBASE_REG;
9524 return OP_X86_ADD_MEMBASE_IMM;
9526 return OP_X86_SUB_MEMBASE_IMM;
9528 return OP_X86_AND_MEMBASE_IMM;
9530 return OP_X86_OR_MEMBASE_IMM;
9532 return OP_X86_XOR_MEMBASE_IMM;
9534 return OP_AMD64_ADD_MEMBASE_REG;
9536 return OP_AMD64_SUB_MEMBASE_REG;
9538 return OP_AMD64_AND_MEMBASE_REG;
9540 return OP_AMD64_OR_MEMBASE_REG;
9542 return OP_AMD64_XOR_MEMBASE_REG;
9545 return OP_AMD64_ADD_MEMBASE_IMM;
9548 return OP_AMD64_SUB_MEMBASE_IMM;
9551 return OP_AMD64_AND_MEMBASE_IMM;
9554 return OP_AMD64_OR_MEMBASE_IMM;
9557 return OP_AMD64_XOR_MEMBASE_IMM;
9567 op_to_op_store_membase (int store_opcode, int opcode)
9569 #if defined(__i386__) || defined(__x86_64__)
9572 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9573 return OP_X86_SETEQ_MEMBASE;
9575 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9576 return OP_X86_SETNE_MEMBASE;
9584 op_to_op_src1_membase (int load_opcode, int opcode)
9587 /* FIXME: This has sign extension issues */
9589 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9590 return OP_X86_COMPARE_MEMBASE8_IMM;
9593 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9598 return OP_X86_PUSH_MEMBASE;
9599 case OP_COMPARE_IMM:
9600 case OP_ICOMPARE_IMM:
9601 return OP_X86_COMPARE_MEMBASE_IMM;
9604 return OP_X86_COMPARE_MEMBASE_REG;
9609 /* FIXME: This has sign extension issues */
9611 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9612 return OP_X86_COMPARE_MEMBASE8_IMM;
9617 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9618 return OP_X86_PUSH_MEMBASE;
9620 /* FIXME: This only works for 32 bit immediates
9621 case OP_COMPARE_IMM:
9622 case OP_LCOMPARE_IMM:
9623 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9624 return OP_AMD64_COMPARE_MEMBASE_IMM;
9626 case OP_ICOMPARE_IMM:
9627 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9628 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9632 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9633 return OP_AMD64_COMPARE_MEMBASE_REG;
9636 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9637 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9646 op_to_op_src2_membase (int load_opcode, int opcode)
9649 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9655 return OP_X86_COMPARE_REG_MEMBASE;
9657 return OP_X86_ADD_REG_MEMBASE;
9659 return OP_X86_SUB_REG_MEMBASE;
9661 return OP_X86_AND_REG_MEMBASE;
9663 return OP_X86_OR_REG_MEMBASE;
9665 return OP_X86_XOR_REG_MEMBASE;
9672 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9673 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9677 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9678 return OP_AMD64_COMPARE_REG_MEMBASE;
9681 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9682 return OP_X86_ADD_REG_MEMBASE;
9684 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9685 return OP_X86_SUB_REG_MEMBASE;
9687 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9688 return OP_X86_AND_REG_MEMBASE;
9690 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9691 return OP_X86_OR_REG_MEMBASE;
9693 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9694 return OP_X86_XOR_REG_MEMBASE;
9696 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9697 return OP_AMD64_ADD_REG_MEMBASE;
9699 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9700 return OP_AMD64_SUB_REG_MEMBASE;
9702 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9703 return OP_AMD64_AND_REG_MEMBASE;
9705 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9706 return OP_AMD64_OR_REG_MEMBASE;
9708 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9709 return OP_AMD64_XOR_REG_MEMBASE;
9717 mono_op_to_op_imm_noemul (int opcode)
9720 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9725 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9733 return mono_op_to_op_imm (opcode);
9740 * mono_handle_global_vregs:
9742 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9746 mono_handle_global_vregs (MonoCompile *cfg)
9752 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9754 /* Find local vregs used in more than one bb */
9755 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9756 MonoInst *ins = bb->code;
9757 int block_num = bb->block_num;
9759 if (cfg->verbose_level > 1)
9760 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9763 for (; ins; ins = ins->next) {
9764 const char *spec = INS_INFO (ins->opcode);
9765 int regtype, regindex;
9768 if (G_UNLIKELY (cfg->verbose_level > 1))
9769 mono_print_ins (ins);
9771 g_assert (ins->opcode >= MONO_CEE_LAST);
9773 for (regindex = 0; regindex < 3; regindex ++) {
9776 if (regindex == 0) {
9777 regtype = spec [MONO_INST_DEST];
9781 } else if (regindex == 1) {
9782 regtype = spec [MONO_INST_SRC1];
9787 regtype = spec [MONO_INST_SRC2];
9793 #if SIZEOF_VOID_P == 4
9794 if (regtype == 'l') {
9796 * Since some instructions reference the original long vreg,
9797 * and some reference the two component vregs, it is quite hard
9798 * to determine when it needs to be global. So be conservative.
9800 if (!get_vreg_to_inst (cfg, vreg)) {
9801 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9803 if (cfg->verbose_level > 1)
9804 printf ("LONG VREG R%d made global.\n", vreg);
9808 * Make the component vregs volatile since the optimizations can
9809 * get confused otherwise.
9811 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9812 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9816 g_assert (vreg != -1);
9818 prev_bb = vreg_to_bb [vreg];
9820 /* 0 is a valid block num */
9821 vreg_to_bb [vreg] = block_num + 1;
9822 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9823 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9826 if (!get_vreg_to_inst (cfg, vreg)) {
9827 if (G_UNLIKELY (cfg->verbose_level > 1))
9828 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9832 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9835 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9838 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9841 g_assert_not_reached ();
9845 /* Flag as having been used in more than one bb */
9846 vreg_to_bb [vreg] = -1;
9852 /* If a variable is used in only one bblock, convert it into a local vreg */
9853 for (i = 0; i < cfg->num_varinfo; i++) {
9854 MonoInst *var = cfg->varinfo [i];
9855 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9857 switch (var->type) {
9863 #if SIZEOF_VOID_P == 8
9866 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9867 /* Enabling this screws up the fp stack on x86 */
9870 /* Arguments are implicitly global */
9871 /* Putting R4 vars into registers doesn't work currently */
9872 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9874 * Make that the variable's liveness interval doesn't contain a call, since
9875 * that would cause the lvreg to be spilled, making the whole optimization
9878 /* This is too slow for JIT compilation */
9880 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9882 int def_index, call_index, ins_index;
9883 gboolean spilled = FALSE;
9888 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9889 const char *spec = INS_INFO (ins->opcode);
9891 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9892 def_index = ins_index;
9894 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9895 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9896 if (call_index > def_index) {
9902 if (MONO_IS_CALL (ins))
9903 call_index = ins_index;
9913 if (G_UNLIKELY (cfg->verbose_level > 2))
9914 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9915 var->flags |= MONO_INST_IS_DEAD;
9916 cfg->vreg_to_inst [var->dreg] = NULL;
9923 * Compress the varinfo and vars tables so the liveness computation is faster and
9924 * takes up less space.
9927 for (i = 0; i < cfg->num_varinfo; ++i) {
9928 MonoInst *var = cfg->varinfo [i];
9929 if (pos < i && cfg->locals_start == i)
9930 cfg->locals_start = pos;
9931 if (!(var->flags & MONO_INST_IS_DEAD)) {
9933 cfg->varinfo [pos] = cfg->varinfo [i];
9934 cfg->varinfo [pos]->inst_c0 = pos;
9935 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9936 cfg->vars [pos].idx = pos;
9937 #if SIZEOF_VOID_P == 4
9938 if (cfg->varinfo [pos]->type == STACK_I8) {
9939 /* Modify the two component vars too */
9942 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9943 var1->inst_c0 = pos;
9944 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9945 var1->inst_c0 = pos;
9952 cfg->num_varinfo = pos;
9953 if (cfg->locals_start > cfg->num_varinfo)
9954 cfg->locals_start = cfg->num_varinfo;
9958 * mono_spill_global_vars:
9960 * Generate spill code for variables which are not allocated to registers,
9961 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9962 * code is generated which could be optimized by the local optimization passes.
9965 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9970 guint32 *vreg_to_lvreg;
9972 guint32 i, lvregs_len;
9973 gboolean dest_has_lvreg = FALSE;
9974 guint32 stacktypes [128];
9976 *need_local_opts = FALSE;
9978 memset (spec2, 0, sizeof (spec2));
9980 /* FIXME: Move this function to mini.c */
9981 stacktypes ['i'] = STACK_PTR;
9982 stacktypes ['l'] = STACK_I8;
9983 stacktypes ['f'] = STACK_R8;
9985 #if SIZEOF_VOID_P == 4
9986 /* Create MonoInsts for longs */
9987 for (i = 0; i < cfg->num_varinfo; i++) {
9988 MonoInst *ins = cfg->varinfo [i];
9990 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9991 switch (ins->type) {
9992 #ifdef MONO_ARCH_SOFT_FLOAT
9998 g_assert (ins->opcode == OP_REGOFFSET);
10000 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10002 tree->opcode = OP_REGOFFSET;
10003 tree->inst_basereg = ins->inst_basereg;
10004 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10006 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10008 tree->opcode = OP_REGOFFSET;
10009 tree->inst_basereg = ins->inst_basereg;
10010 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10020 /* FIXME: widening and truncation */
10023 * As an optimization, when a variable allocated to the stack is first loaded into
10024 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10025 * the variable again.
10027 orig_next_vreg = cfg->next_vreg;
10028 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10029 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10032 /* Add spill loads/stores */
10033 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10036 if (cfg->verbose_level > 1)
10037 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10039 /* Clear vreg_to_lvreg array */
10040 for (i = 0; i < lvregs_len; i++)
10041 vreg_to_lvreg [lvregs [i]] = 0;
10045 MONO_BB_FOR_EACH_INS (bb, ins) {
10046 const char *spec = INS_INFO (ins->opcode);
10047 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10048 gboolean store, no_lvreg;
10050 if (G_UNLIKELY (cfg->verbose_level > 1))
10051 mono_print_ins (ins);
10053 if (ins->opcode == OP_NOP)
10057 * We handle LDADDR here as well, since it can only be decomposed
10058 * when variable addresses are known.
10060 if (ins->opcode == OP_LDADDR) {
10061 MonoInst *var = ins->inst_p0;
10063 if (var->opcode == OP_VTARG_ADDR) {
10064 /* Happens on SPARC/S390 where vtypes are passed by reference */
10065 MonoInst *vtaddr = var->inst_left;
10066 if (vtaddr->opcode == OP_REGVAR) {
10067 ins->opcode = OP_MOVE;
10068 ins->sreg1 = vtaddr->dreg;
10070 else if (var->inst_left->opcode == OP_REGOFFSET) {
10071 ins->opcode = OP_LOAD_MEMBASE;
10072 ins->inst_basereg = vtaddr->inst_basereg;
10073 ins->inst_offset = vtaddr->inst_offset;
10077 g_assert (var->opcode == OP_REGOFFSET);
10079 ins->opcode = OP_ADD_IMM;
10080 ins->sreg1 = var->inst_basereg;
10081 ins->inst_imm = var->inst_offset;
10084 *need_local_opts = TRUE;
10085 spec = INS_INFO (ins->opcode);
10088 if (ins->opcode < MONO_CEE_LAST) {
10089 mono_print_ins (ins);
10090 g_assert_not_reached ();
10094 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10098 if (MONO_IS_STORE_MEMBASE (ins)) {
10099 tmp_reg = ins->dreg;
10100 ins->dreg = ins->sreg2;
10101 ins->sreg2 = tmp_reg;
10104 spec2 [MONO_INST_DEST] = ' ';
10105 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10106 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10108 } else if (MONO_IS_STORE_MEMINDEX (ins))
10109 g_assert_not_reached ();
10114 if (G_UNLIKELY (cfg->verbose_level > 1))
10115 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10120 regtype = spec [MONO_INST_DEST];
10121 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10124 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10125 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10126 MonoInst *store_ins;
10129 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10131 if (var->opcode == OP_REGVAR) {
10132 ins->dreg = var->dreg;
10133 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10135 * Instead of emitting a load+store, use a _membase opcode.
10137 g_assert (var->opcode == OP_REGOFFSET);
10138 if (ins->opcode == OP_MOVE) {
10141 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10142 ins->inst_basereg = var->inst_basereg;
10143 ins->inst_offset = var->inst_offset;
10146 spec = INS_INFO (ins->opcode);
10150 g_assert (var->opcode == OP_REGOFFSET);
10152 prev_dreg = ins->dreg;
10154 /* Invalidate any previous lvreg for this vreg */
10155 vreg_to_lvreg [ins->dreg] = 0;
10159 #ifdef MONO_ARCH_SOFT_FLOAT
10160 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10162 store_opcode = OP_STOREI8_MEMBASE_REG;
10166 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10168 if (regtype == 'l') {
10169 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10170 mono_bblock_insert_after_ins (bb, ins, store_ins);
10171 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10172 mono_bblock_insert_after_ins (bb, ins, store_ins);
10175 g_assert (store_opcode != OP_STOREV_MEMBASE);
10177 /* Try to fuse the store into the instruction itself */
10178 /* FIXME: Add more instructions */
10179 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10180 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10181 ins->inst_imm = ins->inst_c0;
10182 ins->inst_destbasereg = var->inst_basereg;
10183 ins->inst_offset = var->inst_offset;
10184 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10185 ins->opcode = store_opcode;
10186 ins->inst_destbasereg = var->inst_basereg;
10187 ins->inst_offset = var->inst_offset;
10191 tmp_reg = ins->dreg;
10192 ins->dreg = ins->sreg2;
10193 ins->sreg2 = tmp_reg;
10196 spec2 [MONO_INST_DEST] = ' ';
10197 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10198 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10200 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10201 // FIXME: The backends expect the base reg to be in inst_basereg
10202 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10204 ins->inst_basereg = var->inst_basereg;
10205 ins->inst_offset = var->inst_offset;
10206 spec = INS_INFO (ins->opcode);
10208 /* printf ("INS: "); mono_print_ins (ins); */
10209 /* Create a store instruction */
10210 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10212 /* Insert it after the instruction */
10213 mono_bblock_insert_after_ins (bb, ins, store_ins);
10216 * We can't assign ins->dreg to var->dreg here, since the
10217 * sregs could use it. So set a flag, and do it after
10220 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10221 dest_has_lvreg = TRUE;
10230 for (srcindex = 0; srcindex < 2; ++srcindex) {
10231 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10232 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10234 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10235 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10236 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10237 MonoInst *load_ins;
10238 guint32 load_opcode;
10240 if (var->opcode == OP_REGVAR) {
10242 ins->sreg1 = var->dreg;
10244 ins->sreg2 = var->dreg;
10248 g_assert (var->opcode == OP_REGOFFSET);
10250 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10252 g_assert (load_opcode != OP_LOADV_MEMBASE);
10254 if (vreg_to_lvreg [sreg]) {
10255 /* The variable is already loaded to an lvreg */
10256 if (G_UNLIKELY (cfg->verbose_level > 1))
10257 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10259 ins->sreg1 = vreg_to_lvreg [sreg];
10261 ins->sreg2 = vreg_to_lvreg [sreg];
10265 /* Try to fuse the load into the instruction */
10266 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10267 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10268 ins->inst_basereg = var->inst_basereg;
10269 ins->inst_offset = var->inst_offset;
10270 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10271 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10272 ins->sreg2 = var->inst_basereg;
10273 ins->inst_offset = var->inst_offset;
10275 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10276 ins->opcode = OP_NOP;
10279 //printf ("%d ", srcindex); mono_print_ins (ins);
10281 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10283 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10284 if (var->dreg == prev_dreg) {
10286 * sreg refers to the value loaded by the load
10287 * emitted below, but we need to use ins->dreg
10288 * since it refers to the store emitted earlier.
10292 vreg_to_lvreg [var->dreg] = sreg;
10293 g_assert (lvregs_len < 1024);
10294 lvregs [lvregs_len ++] = var->dreg;
10303 if (regtype == 'l') {
10304 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10305 mono_bblock_insert_before_ins (bb, ins, load_ins);
10306 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10307 mono_bblock_insert_before_ins (bb, ins, load_ins);
10310 #if SIZEOF_VOID_P == 4
10311 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10313 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10314 mono_bblock_insert_before_ins (bb, ins, load_ins);
10320 if (dest_has_lvreg) {
10321 vreg_to_lvreg [prev_dreg] = ins->dreg;
10322 g_assert (lvregs_len < 1024);
10323 lvregs [lvregs_len ++] = prev_dreg;
10324 dest_has_lvreg = FALSE;
10328 tmp_reg = ins->dreg;
10329 ins->dreg = ins->sreg2;
10330 ins->sreg2 = tmp_reg;
10333 if (MONO_IS_CALL (ins)) {
10334 /* Clear vreg_to_lvreg array */
10335 for (i = 0; i < lvregs_len; i++)
10336 vreg_to_lvreg [lvregs [i]] = 0;
10340 if (cfg->verbose_level > 1)
10341 mono_print_ins_index (1, ins);
10348 * - use 'iadd' instead of 'int_add'
10349 * - handling ovf opcodes: decompose in method_to_ir.
10350 * - unify iregs/fregs
10351 * -> partly done, the missing parts are:
10352 * - a more complete unification would involve unifying the hregs as well, so
10353 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10354 * would no longer map to the machine hregs, so the code generators would need to
10355 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10356 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10357 * fp/non-fp branches speeds it up by about 15%.
10358 * - use sext/zext opcodes instead of shifts
10360 * - get rid of TEMPLOADs if possible and use vregs instead
10361 * - clean up usage of OP_P/OP_ opcodes
10362 * - cleanup usage of DUMMY_USE
10363 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10365 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10366 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10367 * - make sure handle_stack_args () is called before the branch is emitted
10368 * - when the new IR is done, get rid of all unused stuff
10369 * - COMPARE/BEQ as separate instructions or unify them ?
10370 * - keeping them separate allows specialized compare instructions like
10371 * compare_imm, compare_membase
10372 * - most back ends unify fp compare+branch, fp compare+ceq
10373 * - integrate handle_stack_args into inline_method
10374 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10375 * - Things to backport to the old JIT:
10376 * - op_atomic_exchange fix for amd64
10377 * - localloc fix for amd64
10378 * - x86 type_token change
10380 * - long eq/ne optimizations
10381 * - handle long shift opts on 32 bit platforms somehow: they require
10382 * 3 sregs (2 for arg1 and 1 for arg2)
10383 * - make byref a 'normal' type.
10384 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10385 * variable if needed.
10386 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10387 * like inline_method.
10388 * - remove inlining restrictions
10389 * - remove mono_save_args.
10390 * - add 'introduce a new optimization to simplify some range checks'
10391 * - fix LNEG and enable cfold of INEG
10392 * - generalize x86 optimizations like ldelema as a peephole optimization
10393 * - add store_mem_imm for amd64
10394 * - optimize the loading of the interruption flag in the managed->native wrappers
10395 * - avoid special handling of OP_NOP in passes
10396 * - move code inserting instructions into one function/macro.
10397 * - cleanup the code replacement in decompose_long_opts ()
10398 * - try a coalescing phase after liveness analysis
10399 * - add float -> vreg conversion + local optimizations on !x86
10400 * - figure out how to handle decomposed branches during optimizations, ie.
10401 * compare+branch, op_jump_table+op_br etc.
10402 * - promote RuntimeXHandles to vregs
10403 * - vtype cleanups:
10404 * - add a NEW_VARLOADA_VREG macro
10405 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10406 * accessing vtype fields.
10407 * - get rid of I8CONST on 64 bit platforms
10408 * - dealing with the increase in code size due to branches created during opcode
10410 * - use extended basic blocks
10411 * - all parts of the JIT
10412 * - handle_global_vregs () && local regalloc
10413 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10414 * - sources of increase in code size:
10417 * - isinst and castclass
10418 * - lvregs not allocated to global registers even if used multiple times
10419 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10421 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10422 * - add all micro optimizations from the old JIT
10423 * - put tree optimizations into the deadce pass
10424 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10425 * specific function.
10426 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10427 * fcompare + branchCC.
10428 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10429 * running generics.exe.
10430 * - create a helper function for allocating a stack slot, taking into account
10431 * MONO_CFG_HAS_SPILLUP.
10432 * - merge new GC changes in mini.c.
10434 * - merge the ia64 switch changes.
10435 * - merge the mips conditional changes.
10436 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10437 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10438 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10439 * - optimize mono_regstate2_alloc_int/float.
10440 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10441 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10442 * parts of the tree could be separated by other instructions, killing the tree
10443 * arguments, or stores killing loads etc. Also, should we fold loads into other
10444 * instructions if the result of the load is used multiple times ?
10445 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10446 * - LAST MERGE: 108395.
10447 * - when returning vtypes in registers, generate IR and append it to the end of the
10448 * last bb instead of doing it in the epilog.
10449 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10450 * ones in inssel.h.
10451 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10459 - When to decompose opcodes:
10460 - earlier: this makes some optimizations hard to implement, since the low level IR
10461 no longer contains the neccessary information. But it is easier to do.
10462 - later: harder to implement, enables more optimizations.
10463 - Branches inside bblocks:
10464 - created when decomposing complex opcodes.
10465 - branches to another bblock: harmless, but not tracked by the branch
10466 optimizations, so need to branch to a label at the start of the bblock.
10467 - branches to inside the same bblock: very problematic, trips up the local
10468 reg allocator. Can be fixed by spitting the current bblock, but that is a
10469 complex operation, since some local vregs can become global vregs etc.
10470 - Local/global vregs:
10471 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10472 local register allocator.
10473 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10474 structure, created by mono_create_var (). Assigned to hregs or the stack by
10475 the global register allocator.
10476 - When to do optimizations like alu->alu_imm:
10477 - earlier -> saves work later on since the IR will be smaller/simpler
10478 - later -> can work on more instructions
10479 - Handling of valuetypes:
10480 - When a vtype is pushed on the stack, a new tempotary is created, an
10481 instruction computing its address (LDADDR) is emitted and pushed on
10482 the stack. Need to optimize cases when the vtype is used immediately as in
10483 argument passing, stloc etc.
10484 - Instead of the to_end stuff in the old JIT, simply call the function handling
10485 the values on the stack before emitting the last instruction of the bb.
10488 #endif /* DISABLE_JIT */