2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 extern MonoMethodSignature *helper_sig_class_init_trampoline;
132 extern MonoMethodSignature *helper_sig_domain_get;
133 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
271 * Can't put this at the beginning, since other files reference stuff from this
276 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
278 #define GET_BBLOCK(cfg,tblock,ip) do { \
279 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
281 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
282 NEW_BBLOCK (cfg, (tblock)); \
283 (tblock)->cil_code = (ip); \
284 ADD_BBLOCK (cfg, (tblock)); \
288 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
289 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
290 int _length_reg = alloc_ireg (cfg); \
291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
292 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
293 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
297 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
298 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
299 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
302 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
303 ins->sreg1 = array_reg; \
304 ins->sreg2 = index_reg; \
305 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
306 MONO_ADD_INS ((cfg)->cbb, ins); \
307 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
308 (cfg)->cbb->has_array_access = TRUE; \
312 #if defined(__i386__) || defined(__x86_64__)
313 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
314 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
315 (dest)->dreg = alloc_preg ((cfg)); \
316 (dest)->sreg1 = (sr1); \
317 (dest)->sreg2 = (sr2); \
318 (dest)->inst_imm = (imm); \
319 (dest)->backend.shift_amount = (shift); \
320 MONO_ADD_INS ((cfg)->cbb, (dest)); \
324 #if SIZEOF_VOID_P == 8
325 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
326 /* FIXME: Need to add many more cases */ \
327 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
329 int dr = alloc_preg (cfg); \
330 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
331 (ins)->sreg2 = widen->dreg; \
335 #define ADD_WIDEN_OP(ins, arg1, arg2)
338 #define ADD_BINOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 ins->sreg2 = sp [1]->dreg; \
343 type_from_op (ins, sp [0], sp [1]); \
345 /* Have to insert a widening op */ \
346 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
347 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
350 mono_decompose_opcode ((cfg), (ins)); \
353 #define ADD_UNOP(op) do { \
354 MONO_INST_NEW (cfg, ins, (op)); \
356 ins->sreg1 = sp [0]->dreg; \
357 type_from_op (ins, sp [0], NULL); \
359 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
360 MONO_ADD_INS ((cfg)->cbb, (ins)); \
362 mono_decompose_opcode (cfg, ins); \
365 #define ADD_BINCOND(next_block) do { \
368 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
369 cmp->sreg1 = sp [0]->dreg; \
370 cmp->sreg2 = sp [1]->dreg; \
371 type_from_op (cmp, sp [0], sp [1]); \
373 type_from_op (ins, sp [0], sp [1]); \
374 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
375 GET_BBLOCK (cfg, tblock, target); \
376 link_bblock (cfg, bblock, tblock); \
377 ins->inst_true_bb = tblock; \
378 if ((next_block)) { \
379 link_bblock (cfg, bblock, (next_block)); \
380 ins->inst_false_bb = (next_block); \
381 start_new_bblock = 1; \
383 GET_BBLOCK (cfg, tblock, ip); \
384 link_bblock (cfg, bblock, tblock); \
385 ins->inst_false_bb = tblock; \
386 start_new_bblock = 2; \
388 if (sp != stack_start) { \
389 handle_stack_args (cfg, stack_start, sp - stack_start); \
390 CHECK_UNVERIFIABLE (cfg); \
392 MONO_ADD_INS (bblock, cmp); \
393 MONO_ADD_INS (bblock, ins); \
397 * link_bblock: Links two basic blocks
399 * links two basic blocks in the control flow graph, the 'from'
400 * argument is the starting block and the 'to' argument is the block
401 * the control flow ends to after 'from'.
404 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
406 MonoBasicBlock **newa;
410 if (from->cil_code) {
412 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
414 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
417 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
419 printf ("edge from entry to exit\n");
424 for (i = 0; i < from->out_count; ++i) {
425 if (to == from->out_bb [i]) {
431 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
432 for (i = 0; i < from->out_count; ++i) {
433 newa [i] = from->out_bb [i];
441 for (i = 0; i < to->in_count; ++i) {
442 if (from == to->in_bb [i]) {
448 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
449 for (i = 0; i < to->in_count; ++i) {
450 newa [i] = to->in_bb [i];
459 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
461 link_bblock (cfg, from, to);
465 * mono_find_block_region:
467 * We mark each basic block with a region ID. We use that to avoid BB
468 * optimizations when blocks are in different regions.
471 * A region token that encodes where this region is, and information
472 * about the clause owner for this block.
474 * The region encodes the try/catch/filter clause that owns this block
475 * as well as the type. -1 is a special value that represents a block
476 * that is in none of try/catch/filter.
479 mono_find_block_region (MonoCompile *cfg, int offset)
481 MonoMethod *method = cfg->method;
482 MonoMethodHeader *header = mono_method_get_header (method);
483 MonoExceptionClause *clause;
486 /* first search for handlers and filters */
487 for (i = 0; i < header->num_clauses; ++i) {
488 clause = &header->clauses [i];
489 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
490 (offset < (clause->handler_offset)))
491 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
493 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
494 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
495 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
496 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
497 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
499 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
503 /* search the try blocks */
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
507 return ((i + 1) << 8) | clause->flags;
514 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
516 MonoMethod *method = cfg->method;
517 MonoMethodHeader *header = mono_method_get_header (method);
518 MonoExceptionClause *clause;
519 MonoBasicBlock *handler;
523 for (i = 0; i < header->num_clauses; ++i) {
524 clause = &header->clauses [i];
525 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
526 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
527 if (clause->flags == type) {
528 handler = cfg->cil_offset_to_bb [clause->handler_offset];
530 res = g_list_append (res, handler);
538 mono_create_spvar_for_region (MonoCompile *cfg, int region)
542 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
546 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
547 /* prevent it from being register allocated */
548 var->flags |= MONO_INST_INDIRECT;
550 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
554 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
556 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
560 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
564 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
568 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
569 /* prevent it from being register allocated */
570 var->flags |= MONO_INST_INDIRECT;
572 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
578 * Returns the type used in the eval stack when @type is loaded.
579 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
582 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
587 inst->type = STACK_MP;
588 inst->klass = mono_defaults.object_class;
592 inst->klass = klass = mono_class_from_mono_type (type);
595 switch (type->type) {
597 inst->type = STACK_INV;
601 case MONO_TYPE_BOOLEAN:
607 inst->type = STACK_I4;
612 case MONO_TYPE_FNPTR:
613 inst->type = STACK_PTR;
615 case MONO_TYPE_CLASS:
616 case MONO_TYPE_STRING:
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_SZARRAY:
619 case MONO_TYPE_ARRAY:
620 inst->type = STACK_OBJ;
624 inst->type = STACK_I8;
628 inst->type = STACK_R8;
630 case MONO_TYPE_VALUETYPE:
631 if (type->data.klass->enumtype) {
632 type = type->data.klass->enum_basetype;
636 inst->type = STACK_VTYPE;
639 case MONO_TYPE_TYPEDBYREF:
640 inst->klass = mono_defaults.typed_reference_class;
641 inst->type = STACK_VTYPE;
643 case MONO_TYPE_GENERICINST:
644 type = &type->data.generic_class->container_class->byval_arg;
647 case MONO_TYPE_MVAR :
648 /* FIXME: all the arguments must be references for now,
649 * later look inside cfg and see if the arg num is
652 g_assert (cfg->generic_sharing_context);
653 inst->type = STACK_OBJ;
656 g_error ("unknown type 0x%02x in eval stack type", type->type);
661 * The following tables are used to quickly validate the IL code in type_from_op ().
664 bin_num_table [STACK_MAX] [STACK_MAX] = {
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
677 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
680 /* reduce the size of this table */
682 bin_int_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 bin_comp_table [STACK_MAX] [STACK_MAX] = {
695 /* Inv i L p F & O vt */
697 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
698 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
699 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
700 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
701 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
702 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
703 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
706 /* reduce the size of this table */
708 shift_table [STACK_MAX] [STACK_MAX] = {
709 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
710 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
711 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
712 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
720 * Tables to map from the non-specific opcode to the matching
721 * type-specific opcode.
723 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
725 binops_op_map [STACK_MAX] = {
726 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
729 /* handles from CEE_NEG to CEE_CONV_U8 */
731 unops_op_map [STACK_MAX] = {
732 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
735 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
737 ovfops_op_map [STACK_MAX] = {
738 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
741 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
743 ovf2ops_op_map [STACK_MAX] = {
744 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
747 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
749 ovf3ops_op_map [STACK_MAX] = {
750 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
753 /* handles from CEE_BEQ to CEE_BLT_UN */
755 beqops_op_map [STACK_MAX] = {
756 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
759 /* handles from CEE_CEQ to CEE_CLT_UN */
761 ceqops_op_map [STACK_MAX] = {
762 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
766 * Sets ins->type (the type on the eval stack) according to the
767 * type of the opcode and the arguments to it.
768 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
770 * FIXME: this function sets ins->type unconditionally in some cases, but
771 * it should set it to invalid for some types (a conv.x on an object)
774 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
776 switch (ins->opcode) {
783 /* FIXME: check unverifiable args for STACK_MP */
784 ins->type = bin_num_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
792 ins->type = bin_int_table [src1->type] [src2->type];
793 ins->opcode += binops_op_map [ins->type];
798 ins->type = shift_table [src1->type] [src2->type];
799 ins->opcode += binops_op_map [ins->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
806 ins->opcode = OP_LCOMPARE;
807 else if (src1->type == STACK_R8)
808 ins->opcode = OP_FCOMPARE;
810 ins->opcode = OP_ICOMPARE;
812 case OP_ICOMPARE_IMM:
813 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
814 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
815 ins->opcode = OP_LCOMPARE_IMM;
827 ins->opcode += beqops_op_map [src1->type];
830 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
831 ins->opcode += ceqops_op_map [src1->type];
837 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
838 ins->opcode += ceqops_op_map [src1->type];
842 ins->type = neg_table [src1->type];
843 ins->opcode += unops_op_map [ins->type];
846 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
847 ins->type = src1->type;
849 ins->type = STACK_INV;
850 ins->opcode += unops_op_map [ins->type];
856 ins->type = STACK_I4;
857 ins->opcode += unops_op_map [src1->type];
860 ins->type = STACK_R8;
861 switch (src1->type) {
864 ins->opcode = OP_ICONV_TO_R_UN;
867 ins->opcode = OP_LCONV_TO_R_UN;
871 case CEE_CONV_OVF_I1:
872 case CEE_CONV_OVF_U1:
873 case CEE_CONV_OVF_I2:
874 case CEE_CONV_OVF_U2:
875 case CEE_CONV_OVF_I4:
876 case CEE_CONV_OVF_U4:
877 ins->type = STACK_I4;
878 ins->opcode += ovf3ops_op_map [src1->type];
880 case CEE_CONV_OVF_I_UN:
881 case CEE_CONV_OVF_U_UN:
882 ins->type = STACK_PTR;
883 ins->opcode += ovf2ops_op_map [src1->type];
885 case CEE_CONV_OVF_I1_UN:
886 case CEE_CONV_OVF_I2_UN:
887 case CEE_CONV_OVF_I4_UN:
888 case CEE_CONV_OVF_U1_UN:
889 case CEE_CONV_OVF_U2_UN:
890 case CEE_CONV_OVF_U4_UN:
891 ins->type = STACK_I4;
892 ins->opcode += ovf2ops_op_map [src1->type];
895 ins->type = STACK_PTR;
896 switch (src1->type) {
898 ins->opcode = OP_MOVE;
902 #if SIZEOF_VOID_P == 8
903 ins->opcode = OP_LCONV_TO_U;
905 ins->opcode = OP_MOVE;
909 ins->opcode = OP_LCONV_TO_U;
912 ins->opcode = OP_FCONV_TO_U;
918 ins->type = STACK_I8;
919 ins->opcode += unops_op_map [src1->type];
921 case CEE_CONV_OVF_I8:
922 case CEE_CONV_OVF_U8:
923 ins->type = STACK_I8;
924 ins->opcode += ovf3ops_op_map [src1->type];
926 case CEE_CONV_OVF_U8_UN:
927 case CEE_CONV_OVF_I8_UN:
928 ins->type = STACK_I8;
929 ins->opcode += ovf2ops_op_map [src1->type];
933 ins->type = STACK_R8;
934 ins->opcode += unops_op_map [src1->type];
937 ins->type = STACK_R8;
941 ins->type = STACK_I4;
942 ins->opcode += ovfops_op_map [src1->type];
947 ins->type = STACK_PTR;
948 ins->opcode += ovfops_op_map [src1->type];
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += ovfops_op_map [src1->type];
958 if (ins->type == STACK_R8)
959 ins->type = STACK_INV;
961 case OP_LOAD_MEMBASE:
962 ins->type = STACK_PTR;
964 case OP_LOADI1_MEMBASE:
965 case OP_LOADU1_MEMBASE:
966 case OP_LOADI2_MEMBASE:
967 case OP_LOADU2_MEMBASE:
968 case OP_LOADI4_MEMBASE:
969 case OP_LOADU4_MEMBASE:
970 ins->type = STACK_PTR;
972 case OP_LOADI8_MEMBASE:
973 ins->type = STACK_I8;
975 case OP_LOADR4_MEMBASE:
976 case OP_LOADR8_MEMBASE:
977 ins->type = STACK_R8;
980 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
984 if (ins->type == STACK_MP)
985 ins->klass = mono_defaults.object_class;
990 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
996 param_table [STACK_MAX] [STACK_MAX] = {
1001 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1005 switch (args->type) {
1015 for (i = 0; i < sig->param_count; ++i) {
1016 switch (args [i].type) {
1020 if (!sig->params [i]->byref)
1024 if (sig->params [i]->byref)
1026 switch (sig->params [i]->type) {
1027 case MONO_TYPE_CLASS:
1028 case MONO_TYPE_STRING:
1029 case MONO_TYPE_OBJECT:
1030 case MONO_TYPE_SZARRAY:
1031 case MONO_TYPE_ARRAY:
1038 if (sig->params [i]->byref)
1040 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1049 /*if (!param_table [args [i].type] [sig->params [i]->type])
1057 * When we need a pointer to the current domain many times in a method, we
1058 * call mono_domain_get() once and we store the result in a local variable.
1059 * This function returns the variable that represents the MonoDomain*.
1061 inline static MonoInst *
1062 mono_get_domainvar (MonoCompile *cfg)
1064 if (!cfg->domainvar)
1065 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1066 return cfg->domainvar;
1070 * The got_var contains the address of the Global Offset Table when AOT
1073 inline static MonoInst *
1074 mono_get_got_var (MonoCompile *cfg)
1076 #ifdef MONO_ARCH_NEED_GOT_VAR
1077 if (!cfg->compile_aot)
1079 if (!cfg->got_var) {
1080 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1082 return cfg->got_var;
1089 mono_get_vtable_var (MonoCompile *cfg)
1091 g_assert (cfg->generic_sharing_context);
1093 if (!cfg->rgctx_var) {
1094 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1095 /* force the var to be stack allocated */
1096 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1099 return cfg->rgctx_var;
1103 type_from_stack_type (MonoInst *ins) {
1104 switch (ins->type) {
1105 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1106 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1107 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1108 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1110 return &ins->klass->this_arg;
1111 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1112 case STACK_VTYPE: return &ins->klass->byval_arg;
1114 g_error ("stack type %d to montype not handled\n", ins->type);
1119 static G_GNUC_UNUSED int
1120 type_to_stack_type (MonoType *t)
1122 switch (mono_type_get_underlying_type (t)->type) {
1125 case MONO_TYPE_BOOLEAN:
1128 case MONO_TYPE_CHAR:
1135 case MONO_TYPE_FNPTR:
1137 case MONO_TYPE_CLASS:
1138 case MONO_TYPE_STRING:
1139 case MONO_TYPE_OBJECT:
1140 case MONO_TYPE_SZARRAY:
1141 case MONO_TYPE_ARRAY:
1149 case MONO_TYPE_VALUETYPE:
1150 case MONO_TYPE_TYPEDBYREF:
1152 case MONO_TYPE_GENERICINST:
1153 if (mono_type_generic_inst_is_valuetype (t))
1159 g_assert_not_reached ();
1166 array_access_to_klass (int opcode)
1170 return mono_defaults.byte_class;
1172 return mono_defaults.uint16_class;
1175 return mono_defaults.int_class;
1178 return mono_defaults.sbyte_class;
1181 return mono_defaults.int16_class;
1184 return mono_defaults.int32_class;
1186 return mono_defaults.uint32_class;
1189 return mono_defaults.int64_class;
1192 return mono_defaults.single_class;
1195 return mono_defaults.double_class;
1196 case CEE_LDELEM_REF:
1197 case CEE_STELEM_REF:
1198 return mono_defaults.object_class;
1200 g_assert_not_reached ();
1206 * We try to share variables when possible
1209 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1214 /* inlining can result in deeper stacks */
1215 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1216 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1218 pos = ins->type - 1 + slot * STACK_MAX;
1220 switch (ins->type) {
1227 if ((vnum = cfg->intvars [pos]))
1228 return cfg->varinfo [vnum];
1229 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1230 cfg->intvars [pos] = res->inst_c0;
1233 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1239 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1241 if (cfg->compile_aot) {
1242 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1243 jump_info_token->image = image;
1244 jump_info_token->token = token;
1245 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1250 * This function is called to handle items that are left on the evaluation stack
1251 * at basic block boundaries. What happens is that we save the values to local variables
1252 * and we reload them later when first entering the target basic block (with the
1253 * handle_loaded_temps () function).
1254 * A single joint point will use the same variables (stored in the array bb->out_stack or
1255 * bb->in_stack, if the basic block is before or after the joint point).
1257 * This function needs to be called _before_ emitting the last instruction of
1258 * the bb (i.e. before emitting a branch).
1259 * If the stack merge fails at a join point, cfg->unverifiable is set.
1262 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1265 MonoBasicBlock *bb = cfg->cbb;
1266 MonoBasicBlock *outb;
1267 MonoInst *inst, **locals;
1272 if (cfg->verbose_level > 3)
1273 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1274 if (!bb->out_scount) {
1275 bb->out_scount = count;
1276 //printf ("bblock %d has out:", bb->block_num);
1278 for (i = 0; i < bb->out_count; ++i) {
1279 outb = bb->out_bb [i];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb->flags & BB_EXCEPTION_HANDLER)
1283 //printf (" %d", outb->block_num);
1284 if (outb->in_stack) {
1286 bb->out_stack = outb->in_stack;
1292 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1293 for (i = 0; i < count; ++i) {
1295 * try to reuse temps already allocated for this purpouse, if they occupy the same
1296 * stack slot and if they are of the same type.
1297 * This won't cause conflicts since if 'local' is used to
1298 * store one of the values in the in_stack of a bblock, then
1299 * the same variable will be used for the same outgoing stack
1301 * This doesn't work when inlining methods, since the bblocks
1302 * in the inlined methods do not inherit their in_stack from
1303 * the bblock they are inlined to. See bug #58863 for an
1306 if (cfg->inlined_method)
1307 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1309 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 if (outb->in_scount) {
1320 if (outb->in_scount != bb->out_scount) {
1321 cfg->unverifiable = TRUE;
1324 continue; /* check they are the same locals */
1326 outb->in_scount = count;
1327 outb->in_stack = bb->out_stack;
1330 locals = bb->out_stack;
1332 for (i = 0; i < count; ++i) {
1333 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1334 inst->cil_code = sp [i]->cil_code;
1335 sp [i] = locals [i];
1336 if (cfg->verbose_level > 3)
1337 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1341 * It is possible that the out bblocks already have in_stack assigned, and
1342 * the in_stacks differ. In this case, we will store to all the different
1349 /* Find a bblock which has a different in_stack */
1351 while (bindex < bb->out_count) {
1352 outb = bb->out_bb [bindex];
1353 /* exception handlers are linked, but they should not be considered for stack args */
1354 if (outb->flags & BB_EXCEPTION_HANDLER) {
1358 if (outb->in_stack != locals) {
1359 for (i = 0; i < count; ++i) {
1360 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1361 inst->cil_code = sp [i]->cil_code;
1362 sp [i] = locals [i];
1363 if (cfg->verbose_level > 3)
1364 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1366 locals = outb->in_stack;
1375 /* Emit code which loads interface_offsets [klass->interface_id]
1376 * The array is stored in memory before vtable.
1379 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1381 if (cfg->compile_aot) {
1382 int ioffset_reg = alloc_preg (cfg);
1383 int iid_reg = alloc_preg (cfg);
1385 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1386 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1395 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1396 * stored in "klass_reg" implements the interface "klass".
1399 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1401 int ibitmap_reg = alloc_preg (cfg);
1402 int ibitmap_byte_reg = alloc_preg (cfg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1406 if (cfg->compile_aot) {
1407 int iid_reg = alloc_preg (cfg);
1408 int shifted_iid_reg = alloc_preg (cfg);
1409 int ibitmap_byte_address_reg = alloc_preg (cfg);
1410 int masked_iid_reg = alloc_preg (cfg);
1411 int iid_one_bit_reg = alloc_preg (cfg);
1412 int iid_bit_reg = alloc_preg (cfg);
1413 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1415 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1416 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1418 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1419 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1420 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1423 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 int ibitmap_reg = alloc_preg (cfg);
1435 int ibitmap_byte_reg = alloc_preg (cfg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1439 if (cfg->compile_aot) {
1440 int iid_reg = alloc_preg (cfg);
1441 int shifted_iid_reg = alloc_preg (cfg);
1442 int ibitmap_byte_address_reg = alloc_preg (cfg);
1443 int masked_iid_reg = alloc_preg (cfg);
1444 int iid_one_bit_reg = alloc_preg (cfg);
1445 int iid_bit_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1451 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1452 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1461 * Emit code which checks whenever the interface id of @klass is smaller than
1462 * than the value given by max_iid_reg.
1465 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 if (cfg->compile_aot) {
1469 int iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1471 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1478 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1481 /* Same as above, but obtains max_iid from a vtable */
1483 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1484 MonoBasicBlock *false_target)
1486 int max_iid_reg = alloc_preg (cfg);
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1489 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1492 /* Same as above, but obtains max_iid from a klass */
1494 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1495 MonoBasicBlock *false_target)
1497 int max_iid_reg = alloc_preg (cfg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1500 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1504 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1506 int idepth_reg = alloc_preg (cfg);
1507 int stypes_reg = alloc_preg (cfg);
1508 int stype = alloc_preg (cfg);
1510 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1517 if (cfg->compile_aot) {
1518 int const_reg = alloc_preg (cfg);
1519 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1520 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1528 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1530 int intf_reg = alloc_preg (cfg);
1532 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1533 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1538 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1542 * Variant of the above that takes a register to the class, not the vtable.
1545 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1547 int intf_bit_reg = alloc_preg (cfg);
1549 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1550 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1555 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 if (cfg->compile_aot) {
1562 int const_reg = alloc_preg (cfg);
1563 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1564 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1568 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1572 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1574 if (cfg->compile_aot) {
1575 int const_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1585 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1588 int rank_reg = alloc_preg (cfg);
1589 int eclass_reg = alloc_preg (cfg);
1591 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1593 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1594 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1596 if (klass->cast_class == mono_defaults.object_class) {
1597 int parent_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1599 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1602 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class) {
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1607 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1609 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1612 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1613 /* Check that the object is a vector too */
1614 int bounds_reg = alloc_preg (cfg);
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1617 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1620 int idepth_reg = alloc_preg (cfg);
1621 int stypes_reg = alloc_preg (cfg);
1622 int stype = alloc_preg (cfg);
1624 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1625 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1627 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1631 mini_emit_class_check (cfg, stype, klass);
1636 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1640 g_assert (val == 0);
1642 if ((size <= 4) && (size <= align)) {
1645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1648 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1653 #if SIZEOF_VOID_P == 8
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1661 val_reg = alloc_preg (cfg);
1663 if (sizeof (gpointer) == 8)
1664 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1666 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1669 /* This could be optimized further if neccesary */
1671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1678 #if !NO_UNALIGNED_ACCESS
1679 if (sizeof (gpointer) == 8) {
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1710 #endif /* DISABLE_JIT */
1713 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1718 /* This could be optimized further if neccesary */
1720 cur_reg = alloc_preg (cfg);
1721 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1729 #if !NO_UNALIGNED_ACCESS
1730 if (sizeof (gpointer) == 8) {
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1751 cur_reg = alloc_preg (cfg);
1752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1759 cur_reg = alloc_preg (cfg);
1760 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1761 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1771 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1773 int vtable_reg = alloc_preg (cfg);
1775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1777 if (cfg->opt & MONO_OPT_SHARED) {
1778 int class_reg = alloc_preg (cfg);
1779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1780 if (cfg->compile_aot) {
1781 int klass_reg = alloc_preg (cfg);
1782 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1788 if (cfg->compile_aot) {
1789 int vt_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1791 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1793 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1797 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1801 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1804 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 type = mini_get_basic_type_from_generic (gsctx, type);
1808 switch (type->type) {
1809 case MONO_TYPE_VOID:
1810 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1813 case MONO_TYPE_BOOLEAN:
1816 case MONO_TYPE_CHAR:
1819 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1823 case MONO_TYPE_FNPTR:
1824 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1825 case MONO_TYPE_CLASS:
1826 case MONO_TYPE_STRING:
1827 case MONO_TYPE_OBJECT:
1828 case MONO_TYPE_SZARRAY:
1829 case MONO_TYPE_ARRAY:
1830 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1833 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1836 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1837 case MONO_TYPE_VALUETYPE:
1838 if (type->data.klass->enumtype) {
1839 type = type->data.klass->enum_basetype;
1842 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1843 case MONO_TYPE_TYPEDBYREF:
1844 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1845 case MONO_TYPE_GENERICINST:
1846 type = &type->data.generic_class->container_class->byval_arg;
1849 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1855 * target_type_is_incompatible:
1856 * @cfg: MonoCompile context
1858 * Check that the item @arg on the evaluation stack can be stored
1859 * in the target type (can be a local, or field, etc).
1860 * The cfg arg can be used to check if we need verification or just
1863 * Returns: non-0 value if arg can't be stored on a target.
1866 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1868 MonoType *simple_type;
1871 if (target->byref) {
1872 /* FIXME: check that the pointed to types match */
1873 if (arg->type == STACK_MP)
1874 return arg->klass != mono_class_from_mono_type (target);
1875 if (arg->type == STACK_PTR)
1880 simple_type = mono_type_get_underlying_type (target);
1881 switch (simple_type->type) {
1882 case MONO_TYPE_VOID:
1886 case MONO_TYPE_BOOLEAN:
1889 case MONO_TYPE_CHAR:
1892 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1896 /* STACK_MP is needed when setting pinned locals */
1897 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1902 case MONO_TYPE_FNPTR:
1903 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1906 case MONO_TYPE_CLASS:
1907 case MONO_TYPE_STRING:
1908 case MONO_TYPE_OBJECT:
1909 case MONO_TYPE_SZARRAY:
1910 case MONO_TYPE_ARRAY:
1911 if (arg->type != STACK_OBJ)
1913 /* FIXME: check type compatibility */
1917 if (arg->type != STACK_I8)
1922 if (arg->type != STACK_R8)
1925 case MONO_TYPE_VALUETYPE:
1926 if (arg->type != STACK_VTYPE)
1928 klass = mono_class_from_mono_type (simple_type);
1929 if (klass != arg->klass)
1932 case MONO_TYPE_TYPEDBYREF:
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 if (klass != arg->klass)
1939 case MONO_TYPE_GENERICINST:
1940 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1941 if (arg->type != STACK_VTYPE)
1943 klass = mono_class_from_mono_type (simple_type);
1944 if (klass != arg->klass)
1948 if (arg->type != STACK_OBJ)
1950 /* FIXME: check type compatibility */
1954 case MONO_TYPE_MVAR:
1955 /* FIXME: all the arguments must be references for now,
1956 * later look inside cfg and see if the arg num is
1957 * really a reference
1959 g_assert (cfg->generic_sharing_context);
1960 if (arg->type != STACK_OBJ)
1964 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1970 * Prepare arguments for passing to a function call.
1971 * Return a non-zero value if the arguments can't be passed to the given
1973 * The type checks are not yet complete and some conversions may need
1974 * casts on 32 or 64 bit architectures.
1976 * FIXME: implement this using target_type_is_incompatible ()
1979 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1981 MonoType *simple_type;
1985 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1989 for (i = 0; i < sig->param_count; ++i) {
1990 if (sig->params [i]->byref) {
1991 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1995 simple_type = sig->params [i];
1996 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1998 switch (simple_type->type) {
1999 case MONO_TYPE_VOID:
2004 case MONO_TYPE_BOOLEAN:
2007 case MONO_TYPE_CHAR:
2010 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2016 case MONO_TYPE_FNPTR:
2017 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2020 case MONO_TYPE_CLASS:
2021 case MONO_TYPE_STRING:
2022 case MONO_TYPE_OBJECT:
2023 case MONO_TYPE_SZARRAY:
2024 case MONO_TYPE_ARRAY:
2025 if (args [i]->type != STACK_OBJ)
2030 if (args [i]->type != STACK_I8)
2035 if (args [i]->type != STACK_R8)
2038 case MONO_TYPE_VALUETYPE:
2039 if (simple_type->data.klass->enumtype) {
2040 simple_type = simple_type->data.klass->enum_basetype;
2043 if (args [i]->type != STACK_VTYPE)
2046 case MONO_TYPE_TYPEDBYREF:
2047 if (args [i]->type != STACK_VTYPE)
2050 case MONO_TYPE_GENERICINST:
2051 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2068 case OP_VOIDCALLVIRT:
2077 g_assert_not_reached ();
2084 callvirt_to_call_membase (int opcode)
2088 return OP_CALL_MEMBASE;
2089 case OP_VOIDCALLVIRT:
2090 return OP_VOIDCALL_MEMBASE;
2092 return OP_FCALL_MEMBASE;
2094 return OP_LCALL_MEMBASE;
2096 return OP_VCALL_MEMBASE;
2098 g_assert_not_reached ();
2104 #ifdef MONO_ARCH_HAVE_IMT
2106 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2108 #ifdef MONO_ARCH_IMT_REG
2109 int method_reg = alloc_preg (cfg);
2112 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2113 } else if (cfg->compile_aot) {
2114 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2117 MONO_INST_NEW (cfg, ins, OP_PCONST);
2118 ins->inst_p0 = call->method;
2119 ins->dreg = method_reg;
2120 MONO_ADD_INS (cfg->cbb, ins);
2123 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2125 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2130 static MonoJumpInfo *
2131 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2133 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2137 ji->data.target = target;
2142 inline static MonoInst*
2143 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2145 inline static MonoCallInst *
2146 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2147 MonoInst **args, int calli, int virtual)
2150 #ifdef MONO_ARCH_SOFT_FLOAT
2154 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2157 call->signature = sig;
2159 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2161 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2162 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2165 temp->backend.is_pinvoke = sig->pinvoke;
2168 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2169 * address of return value to increase optimization opportunities.
2170 * Before vtype decomposition, the dreg of the call ins itself represents the
2171 * fact the call modifies the return value. After decomposition, the call will
2172 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2173 * will be transformed into an LDADDR.
2175 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2176 loada->dreg = alloc_preg (cfg);
2177 loada->inst_p0 = temp;
2178 /* We reference the call too since call->dreg could change during optimization */
2179 loada->inst_p1 = call;
2180 MONO_ADD_INS (cfg->cbb, loada);
2182 call->inst.dreg = temp->dreg;
2184 call->vret_var = loada;
2185 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2186 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2188 #ifdef MONO_ARCH_SOFT_FLOAT
2190 * If the call has a float argument, we would need to do an r8->r4 conversion using
2191 * an icall, but that cannot be done during the call sequence since it would clobber
2192 * the call registers + the stack. So we do it before emitting the call.
2194 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2196 MonoInst *in = call->args [i];
2198 if (i >= sig->hasthis)
2199 t = sig->params [i - sig->hasthis];
2201 t = &mono_defaults.int_class->byval_arg;
2202 t = mono_type_get_underlying_type (t);
2204 if (!t->byref && t->type == MONO_TYPE_R4) {
2205 MonoInst *iargs [1];
2209 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2211 /* The result will be in an int vreg */
2212 call->args [i] = conv;
2217 mono_arch_emit_call (cfg, call);
2219 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2220 cfg->flags |= MONO_CFG_HAS_CALLS;
2225 inline static MonoInst*
2226 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2228 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2230 call->inst.sreg1 = addr->dreg;
2232 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2234 return (MonoInst*)call;
2237 inline static MonoInst*
2238 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2240 #ifdef MONO_ARCH_RGCTX_REG
2242 int rgctx_reg = mono_alloc_preg (cfg);
2244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2245 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 return (MonoInst*)call;
2250 g_assert_not_reached ();
2256 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2257 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2259 gboolean virtual = this != NULL;
2260 gboolean enable_for_aot = TRUE;
2263 if (method->string_ctor) {
2264 /* Create the real signature */
2265 /* FIXME: Cache these */
2266 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2267 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2272 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2274 if (this && sig->hasthis &&
2275 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2276 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2277 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2279 call->method = method;
2281 call->inst.flags |= MONO_INST_HAS_METHOD;
2282 call->inst.inst_left = this;
2285 int vtable_reg, slot_reg, this_reg;
2287 this_reg = this->dreg;
2289 if ((!cfg->compile_aot || enable_for_aot) &&
2290 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2291 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2292 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2294 * the method is not virtual, we just need to ensure this is not null
2295 * and then we can call the method directly.
2297 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2298 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2301 if (!method->string_ctor) {
2302 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2303 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2304 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2307 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2309 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2311 return (MonoInst*)call;
2314 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2315 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2316 /* Make a call to delegate->invoke_impl */
2317 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2318 call->inst.inst_basereg = this_reg;
2319 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2320 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2322 return (MonoInst*)call;
2326 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2327 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2328 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2330 * the method is virtual, but we can statically dispatch since either
2331 * it's class or the method itself are sealed.
2332 * But first we need to ensure it's not a null reference.
2334 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2335 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2336 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2338 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2339 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2341 return (MonoInst*)call;
2344 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2346 /* Initialize method->slot */
2347 mono_class_setup_vtable (method->klass);
2349 vtable_reg = alloc_preg (cfg);
2350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2351 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2353 #ifdef MONO_ARCH_HAVE_IMT
2355 guint32 imt_slot = mono_method_get_imt_slot (method);
2356 emit_imt_argument (cfg, call, imt_arg);
2357 slot_reg = vtable_reg;
2358 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2361 if (slot_reg == -1) {
2362 slot_reg = alloc_preg (cfg);
2363 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2364 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2367 slot_reg = vtable_reg;
2368 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2371 call->inst.sreg1 = slot_reg;
2372 call->virtual = TRUE;
2375 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 return (MonoInst*)call;
2380 static inline MonoInst*
2381 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2383 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2387 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2394 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2397 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2399 return (MonoInst*)call;
2402 inline static MonoInst*
2403 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2405 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2409 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2413 * mono_emit_abs_call:
2415 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2417 inline static MonoInst*
2418 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2419 MonoMethodSignature *sig, MonoInst **args)
2421 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2425 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2428 if (cfg->abs_patches == NULL)
2429 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2430 g_hash_table_insert (cfg->abs_patches, ji, ji);
2431 ins = mono_emit_native_call (cfg, ji, sig, args);
2432 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2437 get_memcpy_method (void)
2439 static MonoMethod *memcpy_method = NULL;
2440 if (!memcpy_method) {
2441 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2443 g_error ("Old corlib found. Install a new one");
2445 return memcpy_method;
2449 * Emit code to copy a valuetype of type @klass whose address is stored in
2450 * @src->dreg to memory whose address is stored at @dest->dreg.
2453 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2455 MonoInst *iargs [3];
2458 MonoMethod *memcpy_method;
2462 * This check breaks with spilled vars... need to handle it during verification anyway.
2463 * g_assert (klass && klass == src->klass && klass == dest->klass);
2467 n = mono_class_native_size (klass, &align);
2469 n = mono_class_value_size (klass, &align);
2471 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2472 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2473 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2477 EMIT_NEW_ICONST (cfg, iargs [2], n);
2479 memcpy_method = get_memcpy_method ();
2480 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2485 get_memset_method (void)
2487 static MonoMethod *memset_method = NULL;
2488 if (!memset_method) {
2489 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2491 g_error ("Old corlib found. Install a new one");
2493 return memset_method;
2497 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2499 MonoInst *iargs [3];
2502 MonoMethod *memset_method;
2504 /* FIXME: Optimize this for the case when dest is an LDADDR */
2506 mono_class_init (klass);
2507 n = mono_class_value_size (klass, &align);
2509 if (n <= sizeof (gpointer) * 5) {
2510 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2513 memset_method = get_memset_method ();
2515 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2516 EMIT_NEW_ICONST (cfg, iargs [2], n);
2517 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2522 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2524 MonoInst *this = NULL;
2526 g_assert (!method->klass->valuetype);
2528 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD))
2529 EMIT_NEW_ARGLOAD (cfg, this, 0);
2531 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2532 MonoInst *mrgctx_loc, *mrgctx_var;
2535 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2537 mrgctx_loc = mono_get_vtable_var (cfg);
2538 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2541 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2542 MonoInst *vtable_loc, *vtable_var;
2546 vtable_loc = mono_get_vtable_var (cfg);
2547 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2549 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2550 MonoInst *mrgctx_var = vtable_var;
2553 vtable_reg = alloc_preg (cfg);
2554 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2555 vtable_var->type = STACK_PTR;
2561 int vtable_reg, res_reg;
2563 vtable_reg = alloc_preg (cfg);
2564 res_reg = alloc_preg (cfg);
2565 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2570 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2571 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2572 (rgctx) = emit_get_rgctx (cfg, method, (context_used)); \
2575 static MonoJumpInfoRgctxEntry *
2576 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2578 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2579 res->method = method;
2580 res->in_mrgctx = in_mrgctx;
2581 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2582 res->data->type = patch_type;
2583 res->data->data.target = patch_data;
2584 res->info_type = info_type;
2589 static inline MonoInst*
2590 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2592 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2596 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2597 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2599 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2601 return emit_rgctx_fetch (cfg, rgctx, entry);
2605 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2606 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2608 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2610 return emit_rgctx_fetch (cfg, rgctx, entry);
2614 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2615 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2617 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2619 return emit_rgctx_fetch (cfg, rgctx, entry);
2623 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2627 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2629 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2632 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2633 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2635 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2637 return mono_emit_method_call (cfg, method, &val, NULL);
2642 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2646 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2647 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2648 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2649 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2651 obj_reg = sp [0]->dreg;
2652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2655 /* FIXME: generics */
2656 g_assert (klass->rank == 0);
2659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2660 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2666 MonoInst *element_class;
2668 /* This assertion is from the unboxcast insn */
2669 g_assert (klass->rank == 0);
2671 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2672 klass->element_class, MONO_RGCTX_INFO_KLASS);
2674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2675 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2677 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2680 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2681 MONO_ADD_INS (cfg->cbb, add);
2682 add->type = STACK_MP;
2689 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2691 MonoInst *iargs [2];
2694 if (cfg->opt & MONO_OPT_SHARED) {
2695 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2696 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2698 alloc_ftn = mono_object_new;
2699 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2700 /* This happens often in argument checking code, eg. throw new FooException... */
2701 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2702 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2703 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2705 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2706 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2709 if (managed_alloc) {
2710 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2711 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2713 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2715 guint32 lw = vtable->klass->instance_size;
2716 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2717 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2718 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2721 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2725 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2729 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2732 MonoInst *iargs [2];
2733 MonoMethod *managed_alloc = NULL;
2737 FIXME: we cannot get managed_alloc here because we can't get
2738 the class's vtable (because it's not a closed class)
2740 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2741 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2744 if (cfg->opt & MONO_OPT_SHARED) {
2745 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2746 iargs [1] = data_inst;
2747 alloc_ftn = mono_object_new;
2749 if (managed_alloc) {
2750 iargs [0] = data_inst;
2751 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2754 iargs [0] = data_inst;
2755 alloc_ftn = mono_object_new_specific;
2758 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2762 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2764 MonoInst *alloc, *ins;
2766 if (mono_class_is_nullable (klass)) {
2767 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2768 return mono_emit_method_call (cfg, method, &val, NULL);
2771 alloc = handle_alloc (cfg, klass, TRUE);
2773 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2779 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *rgctx, MonoInst *data_inst)
2781 MonoInst *alloc, *ins;
2783 if (mono_class_is_nullable (klass)) {
2784 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2785 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2786 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2788 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2790 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2792 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2799 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2801 MonoBasicBlock *is_null_bb;
2802 int obj_reg = src->dreg;
2803 int vtable_reg = alloc_preg (cfg);
2805 NEW_BBLOCK (cfg, is_null_bb);
2807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2810 if (mini_get_debug_options ()->better_cast_details) {
2811 int to_klass_reg = alloc_preg (cfg);
2812 int klass_reg = alloc_preg (cfg);
2813 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2816 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2820 MONO_ADD_INS (cfg->cbb, tls_get);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2825 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2826 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2829 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2831 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2833 int klass_reg = alloc_preg (cfg);
2835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2837 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2838 /* the remoting code is broken, access the class for now */
2840 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2843 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2846 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2848 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2849 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2853 MONO_START_BB (cfg, is_null_bb);
2855 /* Reset the variables holding the cast details */
2856 if (mini_get_debug_options ()->better_cast_details) {
2857 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2859 MONO_ADD_INS (cfg->cbb, tls_get);
2860 /* It is enough to reset the from field */
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2868 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2871 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2872 int obj_reg = src->dreg;
2873 int vtable_reg = alloc_preg (cfg);
2874 int res_reg = alloc_preg (cfg);
2876 NEW_BBLOCK (cfg, is_null_bb);
2877 NEW_BBLOCK (cfg, false_bb);
2878 NEW_BBLOCK (cfg, end_bb);
2880 /* Do the assignment at the beginning, so the other assignment can be if converted */
2881 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2882 ins->type = STACK_OBJ;
2885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2888 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2890 /* the is_null_bb target simply copies the input register to the output */
2891 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2893 int klass_reg = alloc_preg (cfg);
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 int rank_reg = alloc_preg (cfg);
2899 int eclass_reg = alloc_preg (cfg);
2901 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2906 if (klass->cast_class == mono_defaults.object_class) {
2907 int parent_reg = alloc_preg (cfg);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2909 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2910 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2912 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2913 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2914 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2916 } else if (klass->cast_class == mono_defaults.enum_class) {
2917 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2918 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2919 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2920 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2922 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2923 /* Check that the object is a vector too */
2924 int bounds_reg = alloc_preg (cfg);
2925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2927 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2930 /* the is_null_bb target simply copies the input register to the output */
2931 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2933 } else if (mono_class_is_nullable (klass)) {
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2935 /* the is_null_bb target simply copies the input register to the output */
2936 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2938 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2939 /* the remoting code is broken, access the class for now */
2941 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2948 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2951 /* the is_null_bb target simply copies the input register to the output */
2952 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2957 MONO_START_BB (cfg, false_bb);
2959 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2962 MONO_START_BB (cfg, is_null_bb);
2964 MONO_START_BB (cfg, end_bb);
2970 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2972 /* This opcode takes as input an object reference and a class, and returns:
2973 0) if the object is an instance of the class,
2974 1) if the object is not instance of the class,
2975 2) if the object is a proxy whose type cannot be determined */
2978 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
2979 int obj_reg = src->dreg;
2980 int dreg = alloc_ireg (cfg);
2982 int klass_reg = alloc_preg (cfg);
2984 NEW_BBLOCK (cfg, true_bb);
2985 NEW_BBLOCK (cfg, false_bb);
2986 NEW_BBLOCK (cfg, false2_bb);
2987 NEW_BBLOCK (cfg, end_bb);
2988 NEW_BBLOCK (cfg, no_proxy_bb);
2990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2991 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
2993 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2994 NEW_BBLOCK (cfg, interface_fail_bb);
2996 tmp_reg = alloc_preg (cfg);
2997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2998 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
2999 MONO_START_BB (cfg, interface_fail_bb);
3000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3002 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3004 tmp_reg = alloc_preg (cfg);
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3009 tmp_reg = alloc_preg (cfg);
3010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3013 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3014 tmp_reg = alloc_preg (cfg);
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3016 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3018 tmp_reg = alloc_preg (cfg);
3019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3023 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3026 MONO_START_BB (cfg, no_proxy_bb);
3028 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3031 MONO_START_BB (cfg, false_bb);
3033 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3036 MONO_START_BB (cfg, false2_bb);
3038 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3039 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3041 MONO_START_BB (cfg, true_bb);
3043 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3045 MONO_START_BB (cfg, end_bb);
3048 MONO_INST_NEW (cfg, ins, OP_ICONST);
3050 ins->type = STACK_I4;
3056 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3058 /* This opcode takes as input an object reference and a class, and returns:
3059 0) if the object is an instance of the class,
3060 1) if the object is a proxy whose type cannot be determined
3061 an InvalidCastException exception is thrown otherwhise*/
3064 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3065 int obj_reg = src->dreg;
3066 int dreg = alloc_ireg (cfg);
3067 int tmp_reg = alloc_preg (cfg);
3068 int klass_reg = alloc_preg (cfg);
3070 NEW_BBLOCK (cfg, end_bb);
3071 NEW_BBLOCK (cfg, ok_result_bb);
3073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3076 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3077 NEW_BBLOCK (cfg, interface_fail_bb);
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3080 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3081 MONO_START_BB (cfg, interface_fail_bb);
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3086 tmp_reg = alloc_preg (cfg);
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3089 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3091 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3095 NEW_BBLOCK (cfg, no_proxy_bb);
3097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3099 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3101 tmp_reg = alloc_preg (cfg);
3102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3105 tmp_reg = alloc_preg (cfg);
3106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3108 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3110 NEW_BBLOCK (cfg, fail_1_bb);
3112 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3114 MONO_START_BB (cfg, fail_1_bb);
3116 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3119 MONO_START_BB (cfg, no_proxy_bb);
3121 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3124 MONO_START_BB (cfg, ok_result_bb);
3126 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3128 MONO_START_BB (cfg, end_bb);
3131 MONO_INST_NEW (cfg, ins, OP_ICONST);
3133 ins->type = STACK_I4;
3138 static G_GNUC_UNUSED MonoInst*
3139 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3141 gpointer *trampoline;
3142 MonoInst *obj, *method_ins, *tramp_ins;
3146 obj = handle_alloc (cfg, klass, FALSE);
3148 /* Inline the contents of mono_delegate_ctor */
3150 /* Set target field */
3151 /* Optimize away setting of NULL target */
3152 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3153 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3155 /* Set method field */
3156 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3160 * To avoid looking up the compiled code belonging to the target method
3161 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3162 * store it, and we fill it after the method has been compiled.
3164 if (!cfg->compile_aot && !method->dynamic) {
3165 MonoInst *code_slot_ins;
3167 domain = mono_domain_get ();
3168 mono_domain_lock (domain);
3169 if (!domain->method_code_hash)
3170 domain->method_code_hash = g_hash_table_new (NULL, NULL);
3171 code_slot = g_hash_table_lookup (domain->method_code_hash, method);
3173 code_slot = mono_mempool_alloc0 (domain->mp, sizeof (gpointer));
3174 g_hash_table_insert (domain->method_code_hash, method, code_slot);
3176 mono_domain_unlock (domain);
3178 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3182 /* Set invoke_impl field */
3183 trampoline = mono_create_delegate_trampoline (klass);
3184 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3187 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3193 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3195 MonoJitICallInfo *info;
3197 /* Need to register the icall so it gets an icall wrapper */
3198 info = mono_get_array_new_va_icall (rank);
3200 cfg->flags |= MONO_CFG_HAS_VARARGS;
3202 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3203 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3207 mono_emit_load_got_addr (MonoCompile *cfg)
3209 MonoInst *getaddr, *dummy_use;
3211 if (!cfg->got_var || cfg->got_var_allocated)
3214 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3215 getaddr->dreg = cfg->got_var->dreg;
3217 /* Add it to the start of the first bblock */
3218 if (cfg->bb_entry->code) {
3219 getaddr->next = cfg->bb_entry->code;
3220 cfg->bb_entry->code = getaddr;
3223 MONO_ADD_INS (cfg->bb_entry, getaddr);
3225 cfg->got_var_allocated = TRUE;
3228 * Add a dummy use to keep the got_var alive, since real uses might
3229 * only be generated by the back ends.
3230 * Add it to end_bblock, so the variable's lifetime covers the whole
3232 * It would be better to make the usage of the got var explicit in all
3233 * cases when the backend needs it (i.e. calls, throw etc.), so this
3234 * wouldn't be needed.
3236 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3237 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3241 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3243 MonoMethodHeader *header = mono_method_get_header (method);
3245 #ifdef MONO_ARCH_SOFT_FLOAT
3246 MonoMethodSignature *sig = mono_method_signature (method);
3250 if (cfg->generic_sharing_context)
3253 #ifdef MONO_ARCH_HAVE_LMF_OPS
3254 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3255 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3256 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3260 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3261 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3262 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3263 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3264 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3265 (method->klass->marshalbyref) ||
3266 !header || header->num_clauses)
3269 /* also consider num_locals? */
3270 /* Do the size check early to avoid creating vtables */
3271 if (getenv ("MONO_INLINELIMIT")) {
3272 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3275 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3279 * if we can initialize the class of the method right away, we do,
3280 * otherwise we don't allow inlining if the class needs initialization,
3281 * since it would mean inserting a call to mono_runtime_class_init()
3282 * inside the inlined code
3284 if (!(cfg->opt & MONO_OPT_SHARED)) {
3285 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3286 if (cfg->run_cctors && method->klass->has_cctor) {
3287 if (!method->klass->runtime_info)
3288 /* No vtable created yet */
3290 vtable = mono_class_vtable (cfg->domain, method->klass);
3293 /* This makes so that inline cannot trigger */
3294 /* .cctors: too many apps depend on them */
3295 /* running with a specific order... */
3296 if (! vtable->initialized)
3298 mono_runtime_class_init (vtable);
3300 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3301 if (!method->klass->runtime_info)
3302 /* No vtable created yet */
3304 vtable = mono_class_vtable (cfg->domain, method->klass);
3307 if (!vtable->initialized)
3312 * If we're compiling for shared code
3313 * the cctor will need to be run at aot method load time, for example,
3314 * or at the end of the compilation of the inlining method.
3316 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3321 * CAS - do not inline methods with declarative security
3322 * Note: this has to be before any possible return TRUE;
3324 if (mono_method_has_declsec (method))
3327 #ifdef MONO_ARCH_SOFT_FLOAT
3329 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3331 for (i = 0; i < sig->param_count; ++i)
3332 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3340 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3342 if (vtable->initialized && !cfg->compile_aot)
3345 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3348 if (!mono_class_needs_cctor_run (vtable->klass, method))
3351 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3352 /* The initialization is already done before the method is called */
3359 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3363 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3365 mono_class_init (klass);
3366 size = mono_class_array_element_size (klass);
3368 mult_reg = alloc_preg (cfg);
3369 array_reg = arr->dreg;
3370 index_reg = index->dreg;
3372 #if SIZEOF_VOID_P == 8
3373 /* The array reg is 64 bits but the index reg is only 32 */
3374 index2_reg = alloc_preg (cfg);
3375 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3377 index2_reg = index_reg;
3380 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3382 #if defined(__i386__) || defined(__x86_64__)
3383 if (size == 1 || size == 2 || size == 4 || size == 8) {
3384 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3386 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3387 ins->type = STACK_PTR;
3393 add_reg = alloc_preg (cfg);
3395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3396 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3397 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3398 ins->type = STACK_PTR;
3399 MONO_ADD_INS (cfg->cbb, ins);
3404 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3406 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3408 int bounds_reg = alloc_preg (cfg);
3409 int add_reg = alloc_preg (cfg);
3410 int mult_reg = alloc_preg (cfg);
3411 int mult2_reg = alloc_preg (cfg);
3412 int low1_reg = alloc_preg (cfg);
3413 int low2_reg = alloc_preg (cfg);
3414 int high1_reg = alloc_preg (cfg);
3415 int high2_reg = alloc_preg (cfg);
3416 int realidx1_reg = alloc_preg (cfg);
3417 int realidx2_reg = alloc_preg (cfg);
3418 int sum_reg = alloc_preg (cfg);
3423 mono_class_init (klass);
3424 size = mono_class_array_element_size (klass);
3426 index1 = index_ins1->dreg;
3427 index2 = index_ins2->dreg;
3429 /* range checking */
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3431 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3434 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3435 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3436 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3437 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3438 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3439 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3442 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3443 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3445 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3447 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3449 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3450 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3452 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3453 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3455 ins->type = STACK_MP;
3457 MONO_ADD_INS (cfg->cbb, ins);
3464 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3468 MonoMethod *addr_method;
3471 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3474 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3476 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3477 /* emit_ldelema_2 depends on OP_LMUL */
3478 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3479 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3483 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3484 addr_method = mono_marshal_get_array_address (rank, element_size);
3485 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3491 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3493 MonoInst *ins = NULL;
3495 static MonoClass *runtime_helpers_class = NULL;
3496 if (! runtime_helpers_class)
3497 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3498 "System.Runtime.CompilerServices", "RuntimeHelpers");
3500 if (cmethod->klass == mono_defaults.string_class) {
3501 if (strcmp (cmethod->name, "get_Chars") == 0) {
3502 int dreg = alloc_ireg (cfg);
3503 int index_reg = alloc_preg (cfg);
3504 int mult_reg = alloc_preg (cfg);
3505 int add_reg = alloc_preg (cfg);
3507 #if SIZEOF_VOID_P == 8
3508 /* The array reg is 64 bits but the index reg is only 32 */
3509 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3511 index_reg = args [1]->dreg;
3513 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3515 #if defined(__i386__) || defined(__x86_64__)
3516 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3517 add_reg = ins->dreg;
3518 /* Avoid a warning */
3520 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3524 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3525 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3526 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3528 type_from_op (ins, NULL, NULL);
3530 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3531 int dreg = alloc_ireg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3533 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3534 type_from_op (ins, NULL, NULL);
3537 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3538 int mult_reg = alloc_preg (cfg);
3539 int add_reg = alloc_preg (cfg);
3541 /* The corlib functions check for oob already. */
3542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3543 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3547 } else if (cmethod->klass == mono_defaults.object_class) {
3549 if (strcmp (cmethod->name, "GetType") == 0) {
3550 int dreg = alloc_preg (cfg);
3551 int vt_reg = alloc_preg (cfg);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3554 type_from_op (ins, NULL, NULL);
3557 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3558 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3559 int dreg = alloc_ireg (cfg);
3560 int t1 = alloc_ireg (cfg);
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3563 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3564 ins->type = STACK_I4;
3568 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3569 MONO_INST_NEW (cfg, ins, OP_NOP);
3570 MONO_ADD_INS (cfg->cbb, ins);
3574 } else if (cmethod->klass == mono_defaults.array_class) {
3575 if (cmethod->name [0] != 'g')
3578 if (strcmp (cmethod->name, "get_Rank") == 0) {
3579 int dreg = alloc_ireg (cfg);
3580 int vtable_reg = alloc_preg (cfg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3582 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3583 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3584 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3585 type_from_op (ins, NULL, NULL);
3588 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3589 int dreg = alloc_ireg (cfg);
3591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3592 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3593 type_from_op (ins, NULL, NULL);
3598 } else if (cmethod->klass == runtime_helpers_class) {
3600 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3601 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3605 } else if (cmethod->klass == mono_defaults.thread_class) {
3606 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3607 ins->dreg = alloc_preg (cfg);
3608 ins->type = STACK_OBJ;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3612 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3613 MONO_ADD_INS (cfg->cbb, ins);
3615 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3616 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3617 MONO_ADD_INS (cfg->cbb, ins);
3620 } else if (mini_class_is_system_array (cmethod->klass) &&
3621 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3622 MonoInst *addr, *store, *load;
3623 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3625 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3627 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3629 } else if (cmethod->klass->image == mono_defaults.corlib &&
3630 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3631 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3634 #if SIZEOF_VOID_P == 8
3635 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3636 /* 64 bit reads are already atomic */
3637 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3638 ins->dreg = mono_alloc_preg (cfg);
3639 ins->inst_basereg = args [0]->dreg;
3640 ins->inst_offset = 0;
3641 MONO_ADD_INS (cfg->cbb, ins);
3645 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3646 if (strcmp (cmethod->name, "Increment") == 0) {
3647 MonoInst *ins_iconst;
3650 if (fsig->params [0]->type == MONO_TYPE_I4)
3651 opcode = OP_ATOMIC_ADD_NEW_I4;
3652 #if SIZEOF_VOID_P == 8
3653 else if (fsig->params [0]->type == MONO_TYPE_I8)
3654 opcode = OP_ATOMIC_ADD_NEW_I8;
3657 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3658 ins_iconst->inst_c0 = 1;
3659 ins_iconst->dreg = mono_alloc_ireg (cfg);
3660 MONO_ADD_INS (cfg->cbb, ins_iconst);
3662 MONO_INST_NEW (cfg, ins, opcode);
3663 ins->dreg = mono_alloc_ireg (cfg);
3664 ins->inst_basereg = args [0]->dreg;
3665 ins->inst_offset = 0;
3666 ins->sreg2 = ins_iconst->dreg;
3667 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3668 MONO_ADD_INS (cfg->cbb, ins);
3670 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3671 MonoInst *ins_iconst;
3674 if (fsig->params [0]->type == MONO_TYPE_I4)
3675 opcode = OP_ATOMIC_ADD_NEW_I4;
3676 #if SIZEOF_VOID_P == 8
3677 else if (fsig->params [0]->type == MONO_TYPE_I8)
3678 opcode = OP_ATOMIC_ADD_NEW_I8;
3681 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3682 ins_iconst->inst_c0 = -1;
3683 ins_iconst->dreg = mono_alloc_ireg (cfg);
3684 MONO_ADD_INS (cfg->cbb, ins_iconst);
3686 MONO_INST_NEW (cfg, ins, opcode);
3687 ins->dreg = mono_alloc_ireg (cfg);
3688 ins->inst_basereg = args [0]->dreg;
3689 ins->inst_offset = 0;
3690 ins->sreg2 = ins_iconst->dreg;
3691 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3692 MONO_ADD_INS (cfg->cbb, ins);
3694 } else if (strcmp (cmethod->name, "Add") == 0) {
3697 if (fsig->params [0]->type == MONO_TYPE_I4)
3698 opcode = OP_ATOMIC_ADD_NEW_I4;
3699 #if SIZEOF_VOID_P == 8
3700 else if (fsig->params [0]->type == MONO_TYPE_I8)
3701 opcode = OP_ATOMIC_ADD_NEW_I8;
3705 MONO_INST_NEW (cfg, ins, opcode);
3706 ins->dreg = mono_alloc_ireg (cfg);
3707 ins->inst_basereg = args [0]->dreg;
3708 ins->inst_offset = 0;
3709 ins->sreg2 = args [1]->dreg;
3710 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3711 MONO_ADD_INS (cfg->cbb, ins);
3714 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3716 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3717 if (strcmp (cmethod->name, "Exchange") == 0) {
3720 if (fsig->params [0]->type == MONO_TYPE_I4)
3721 opcode = OP_ATOMIC_EXCHANGE_I4;
3722 #if SIZEOF_VOID_P == 8
3723 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3724 (fsig->params [0]->type == MONO_TYPE_I) ||
3725 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3726 opcode = OP_ATOMIC_EXCHANGE_I8;
3728 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3729 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3730 opcode = OP_ATOMIC_EXCHANGE_I4;
3735 MONO_INST_NEW (cfg, ins, opcode);
3736 ins->dreg = mono_alloc_ireg (cfg);
3737 ins->inst_basereg = args [0]->dreg;
3738 ins->inst_offset = 0;
3739 ins->sreg2 = args [1]->dreg;
3740 MONO_ADD_INS (cfg->cbb, ins);
3742 switch (fsig->params [0]->type) {
3744 ins->type = STACK_I4;
3748 ins->type = STACK_I8;
3750 case MONO_TYPE_OBJECT:
3751 ins->type = STACK_OBJ;
3754 g_assert_not_reached ();
3757 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3759 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3761 * Can't implement CompareExchange methods this way since they have
3762 * three arguments. We can implement one of the common cases, where the new
3763 * value is a constant.
3765 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3766 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3767 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3768 ins->dreg = alloc_ireg (cfg);
3769 ins->sreg1 = args [0]->dreg;
3770 ins->sreg2 = args [1]->dreg;
3771 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3772 ins->type = STACK_I4;
3773 MONO_ADD_INS (cfg->cbb, ins);
3775 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3777 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3781 } else if (cmethod->klass->image == mono_defaults.corlib) {
3782 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3783 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3784 MONO_INST_NEW (cfg, ins, OP_BREAK);
3785 MONO_ADD_INS (cfg->cbb, ins);
3788 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3789 && strcmp (cmethod->klass->name, "Environment") == 0) {
3790 #ifdef PLATFORM_WIN32
3791 EMIT_NEW_ICONST (cfg, ins, 1);
3793 EMIT_NEW_ICONST (cfg, ins, 0);
3797 } else if (cmethod->klass == mono_defaults.math_class) {
3799 * There is general branches code for Min/Max, but it does not work for
3801 * http://everything2.com/?node_id=1051618
3805 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3809 * This entry point could be used later for arbitrary method
3812 inline static MonoInst*
3813 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3814 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3816 if (method->klass == mono_defaults.string_class) {
3817 /* managed string allocation support */
3818 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3819 MonoInst *iargs [2];
3820 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3821 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3824 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3825 iargs [1] = args [0];
3826 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3833 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3835 MonoInst *store, *temp;
3838 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3839 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3842 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3843 * would be different than the MonoInst's used to represent arguments, and
3844 * the ldelema implementation can't deal with that.
3845 * Solution: When ldelema is used on an inline argument, create a var for
3846 * it, emit ldelema on that var, and emit the saving code below in
3847 * inline_method () if needed.
3849 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3850 cfg->args [i] = temp;
3851 /* This uses cfg->args [i] which is set by the preceeding line */
3852 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3853 store->cil_code = sp [0]->cil_code;
3858 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3859 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3861 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3863 check_inline_called_method_name_limit (MonoMethod *called_method)
3866 static char *limit = NULL;
3868 if (limit == NULL) {
3869 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3871 if (limit_string != NULL)
3872 limit = limit_string;
3874 limit = (char *) "";
3877 if (limit [0] != '\0') {
3878 char *called_method_name = mono_method_full_name (called_method, TRUE);
3880 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3881 g_free (called_method_name);
3883 //return (strncmp_result <= 0);
3884 return (strncmp_result == 0);
3891 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3893 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3896 static char *limit = NULL;
3898 if (limit == NULL) {
3899 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3900 if (limit_string != NULL) {
3901 limit = limit_string;
3903 limit = (char *) "";
3907 if (limit [0] != '\0') {
3908 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3910 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3911 g_free (caller_method_name);
3913 //return (strncmp_result <= 0);
3914 return (strncmp_result == 0);
3922 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3923 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3925 MonoInst *ins, *rvar = NULL;
3926 MonoMethodHeader *cheader;
3927 MonoBasicBlock *ebblock, *sbblock;
3929 MonoMethod *prev_inlined_method;
3930 MonoInst **prev_locals, **prev_args;
3931 MonoType **prev_arg_types;
3932 guint prev_real_offset;
3933 GHashTable *prev_cbb_hash;
3934 MonoBasicBlock **prev_cil_offset_to_bb;
3935 MonoBasicBlock *prev_cbb;
3936 unsigned char* prev_cil_start;
3937 guint32 prev_cil_offset_to_bb_len;
3938 MonoMethod *prev_current_method;
3939 MonoGenericContext *prev_generic_context;
3941 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3943 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3944 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3947 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3948 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3952 if (cfg->verbose_level > 2)
3953 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3955 if (!cmethod->inline_info) {
3956 mono_jit_stats.inlineable_methods++;
3957 cmethod->inline_info = 1;
3959 /* allocate space to store the return value */
3960 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3961 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3964 /* allocate local variables */
3965 cheader = mono_method_get_header (cmethod);
3966 prev_locals = cfg->locals;
3967 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3968 for (i = 0; i < cheader->num_locals; ++i)
3969 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3971 /* allocate start and end blocks */
3972 /* This is needed so if the inline is aborted, we can clean up */
3973 NEW_BBLOCK (cfg, sbblock);
3974 sbblock->real_offset = real_offset;
3976 NEW_BBLOCK (cfg, ebblock);
3977 ebblock->block_num = cfg->num_bblocks++;
3978 ebblock->real_offset = real_offset;
3980 prev_args = cfg->args;
3981 prev_arg_types = cfg->arg_types;
3982 prev_inlined_method = cfg->inlined_method;
3983 cfg->inlined_method = cmethod;
3984 cfg->ret_var_set = FALSE;
3985 prev_real_offset = cfg->real_offset;
3986 prev_cbb_hash = cfg->cbb_hash;
3987 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
3988 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
3989 prev_cil_start = cfg->cil_start;
3990 prev_cbb = cfg->cbb;
3991 prev_current_method = cfg->current_method;
3992 prev_generic_context = cfg->generic_context;
3994 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
3996 cfg->inlined_method = prev_inlined_method;
3997 cfg->real_offset = prev_real_offset;
3998 cfg->cbb_hash = prev_cbb_hash;
3999 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4000 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4001 cfg->cil_start = prev_cil_start;
4002 cfg->locals = prev_locals;
4003 cfg->args = prev_args;
4004 cfg->arg_types = prev_arg_types;
4005 cfg->current_method = prev_current_method;
4006 cfg->generic_context = prev_generic_context;
4008 if ((costs >= 0 && costs < 60) || inline_allways) {
4009 if (cfg->verbose_level > 2)
4010 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4012 mono_jit_stats.inlined_methods++;
4014 /* always add some code to avoid block split failures */
4015 MONO_INST_NEW (cfg, ins, OP_NOP);
4016 MONO_ADD_INS (prev_cbb, ins);
4018 prev_cbb->next_bb = sbblock;
4019 link_bblock (cfg, prev_cbb, sbblock);
4022 * Get rid of the begin and end bblocks if possible to aid local
4025 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4027 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4028 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4030 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4031 MonoBasicBlock *prev = ebblock->in_bb [0];
4032 mono_merge_basic_blocks (cfg, prev, ebblock);
4040 * If the inlined method contains only a throw, then the ret var is not
4041 * set, so set it to a dummy value.
4043 if (!cfg->ret_var_set) {
4044 static double r8_0 = 0.0;
4046 switch (rvar->type) {
4048 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4051 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4056 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4059 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4060 ins->type = STACK_R8;
4061 ins->inst_p0 = (void*)&r8_0;
4062 ins->dreg = rvar->dreg;
4063 MONO_ADD_INS (cfg->cbb, ins);
4066 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4069 g_assert_not_reached ();
4073 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4078 if (cfg->verbose_level > 2)
4079 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4080 cfg->exception_type = MONO_EXCEPTION_NONE;
4081 mono_loader_clear_error ();
4083 /* This gets rid of the newly added bblocks */
4084 cfg->cbb = prev_cbb;
4090 * Some of these comments may well be out-of-date.
4091 * Design decisions: we do a single pass over the IL code (and we do bblock
4092 * splitting/merging in the few cases when it's required: a back jump to an IL
4093 * address that was not already seen as bblock starting point).
4094 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4095 * Complex operations are decomposed in simpler ones right away. We need to let the
4096 * arch-specific code peek and poke inside this process somehow (except when the
4097 * optimizations can take advantage of the full semantic info of coarse opcodes).
4098 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4099 * MonoInst->opcode initially is the IL opcode or some simplification of that
4100 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4101 * opcode with value bigger than OP_LAST.
4102 * At this point the IR can be handed over to an interpreter, a dumb code generator
4103 * or to the optimizing code generator that will translate it to SSA form.
4105 * Profiling directed optimizations.
4106 * We may compile by default with few or no optimizations and instrument the code
4107 * or the user may indicate what methods to optimize the most either in a config file
4108 * or through repeated runs where the compiler applies offline the optimizations to
4109 * each method and then decides if it was worth it.
4112 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4113 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4114 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4115 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4116 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4117 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4118 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4119 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4121 /* offset from br.s -> br like opcodes */
4122 #define BIG_BRANCH_OFFSET 13
4125 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4127 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4129 return b == NULL || b == bb;
4133 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4135 unsigned char *ip = start;
4136 unsigned char *target;
4139 MonoBasicBlock *bblock;
4140 const MonoOpcode *opcode;
4143 cli_addr = ip - start;
4144 i = mono_opcode_value ((const guint8 **)&ip, end);
4147 opcode = &mono_opcodes [i];
4148 switch (opcode->argument) {
4149 case MonoInlineNone:
4152 case MonoInlineString:
4153 case MonoInlineType:
4154 case MonoInlineField:
4155 case MonoInlineMethod:
4158 case MonoShortInlineR:
4165 case MonoShortInlineVar:
4166 case MonoShortInlineI:
4169 case MonoShortInlineBrTarget:
4170 target = start + cli_addr + 2 + (signed char)ip [1];
4171 GET_BBLOCK (cfg, bblock, target);
4174 GET_BBLOCK (cfg, bblock, ip);
4176 case MonoInlineBrTarget:
4177 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4178 GET_BBLOCK (cfg, bblock, target);
4181 GET_BBLOCK (cfg, bblock, ip);
4183 case MonoInlineSwitch: {
4184 guint32 n = read32 (ip + 1);
4187 cli_addr += 5 + 4 * n;
4188 target = start + cli_addr;
4189 GET_BBLOCK (cfg, bblock, target);
4191 for (j = 0; j < n; ++j) {
4192 target = start + cli_addr + (gint32)read32 (ip);
4193 GET_BBLOCK (cfg, bblock, target);
4203 g_assert_not_reached ();
4206 if (i == CEE_THROW) {
4207 unsigned char *bb_start = ip - 1;
4209 /* Find the start of the bblock containing the throw */
4211 while ((bb_start >= start) && !bblock) {
4212 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4216 bblock->out_of_line = 1;
4225 static inline MonoMethod *
4226 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4230 if (m->wrapper_type != MONO_WRAPPER_NONE)
4231 return mono_method_get_wrapper_data (m, token);
4233 method = mono_get_method_full (m->klass->image, token, klass, context);
4238 static inline MonoMethod *
4239 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4241 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4243 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4249 static inline MonoClass*
4250 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4254 if (method->wrapper_type != MONO_WRAPPER_NONE)
4255 klass = mono_method_get_wrapper_data (method, token);
4257 klass = mono_class_get_full (method->klass->image, token, context);
4259 mono_class_init (klass);
4264 * Returns TRUE if the JIT should abort inlining because "callee"
4265 * is influenced by security attributes.
4268 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4272 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4276 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4277 if (result == MONO_JIT_SECURITY_OK)
4280 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4281 /* Generate code to throw a SecurityException before the actual call/link */
4282 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4285 NEW_ICONST (cfg, args [0], 4);
4286 NEW_METHODCONST (cfg, args [1], caller);
4287 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4288 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4289 /* don't hide previous results */
4290 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4291 cfg->exception_data = result;
4299 method_access_exception (void)
4301 static MonoMethod *method = NULL;
4304 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4305 method = mono_class_get_method_from_name (secman->securitymanager,
4306 "MethodAccessException", 2);
4313 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4314 MonoBasicBlock *bblock, unsigned char *ip)
4316 MonoMethod *thrower = method_access_exception ();
4319 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4320 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4321 mono_emit_method_call (cfg, thrower, args, NULL);
4325 verification_exception (void)
4327 static MonoMethod *method = NULL;
4330 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4331 method = mono_class_get_method_from_name (secman->securitymanager,
4332 "VerificationException", 0);
4339 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4341 MonoMethod *thrower = verification_exception ();
4343 mono_emit_method_call (cfg, thrower, NULL, NULL);
4347 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4348 MonoBasicBlock *bblock, unsigned char *ip)
4350 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4351 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4352 gboolean is_safe = TRUE;
4354 if (!(caller_level >= callee_level ||
4355 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4356 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4361 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4365 method_is_safe (MonoMethod *method)
4368 if (strcmp (method->name, "unsafeMethod") == 0)
4375 * Check that the IL instructions at ip are the array initialization
4376 * sequence and return the pointer to the data and the size.
4379 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4382 * newarr[System.Int32]
4384 * ldtoken field valuetype ...
4385 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4387 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4388 guint32 token = read32 (ip + 7);
4389 guint32 field_token = read32 (ip + 2);
4390 guint32 field_index = field_token & 0xffffff;
4392 const char *data_ptr;
4394 MonoMethod *cmethod;
4395 MonoClass *dummy_class;
4396 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4402 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4405 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4407 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4408 case MONO_TYPE_BOOLEAN:
4412 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4413 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4414 case MONO_TYPE_CHAR:
4424 return NULL; /* stupid ARM FP swapped format */
4434 if (size > mono_type_size (field->type, &dummy_align))
4437 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4438 if (!method->klass->image->dynamic) {
4439 field_index = read32 (ip + 2) & 0xffffff;
4440 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4441 data_ptr = mono_image_rva_map (method->klass->image, rva);
4442 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4443 /* for aot code we do the lookup on load */
4444 if (aot && data_ptr)
4445 return GUINT_TO_POINTER (rva);
4447 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4449 data_ptr = field->data;
4457 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4459 char *method_fname = mono_method_full_name (method, TRUE);
4462 if (mono_method_get_header (method)->code_size == 0)
4463 method_code = g_strdup ("method body is empty.");
4465 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4466 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4467 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4468 g_free (method_fname);
4469 g_free (method_code);
4473 set_exception_object (MonoCompile *cfg, MonoException *exception)
4475 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4476 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4477 cfg->exception_ptr = exception;
4481 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4485 if (cfg->generic_sharing_context)
4486 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4488 type = &klass->byval_arg;
4489 return MONO_TYPE_IS_REFERENCE (type);
4493 * mono_decompose_array_access_opts:
4495 * Decompose array access opcodes.
4498 mono_decompose_array_access_opts (MonoCompile *cfg)
4500 MonoBasicBlock *bb, *first_bb;
4503 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4504 * can be executed anytime. It should be run before decompose_long
4508 * Create a dummy bblock and emit code into it so we can use the normal
4509 * code generation macros.
4511 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4512 first_bb = cfg->cbb;
4514 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4516 MonoInst *prev = NULL;
4518 MonoInst *iargs [3];
4521 if (!bb->has_array_access)
4524 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4526 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4532 for (ins = bb->code; ins; ins = ins->next) {
4533 switch (ins->opcode) {
4535 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4536 G_STRUCT_OFFSET (MonoArray, max_length));
4537 MONO_ADD_INS (cfg->cbb, dest);
4539 case OP_BOUNDS_CHECK:
4540 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4543 if (cfg->opt & MONO_OPT_SHARED) {
4544 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4545 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4546 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4547 iargs [2]->dreg = ins->sreg1;
4549 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4550 dest->dreg = ins->dreg;
4552 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4555 NEW_VTABLECONST (cfg, iargs [0], vtable);
4556 MONO_ADD_INS (cfg->cbb, iargs [0]);
4557 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4558 iargs [1]->dreg = ins->sreg1;
4560 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4561 dest->dreg = ins->dreg;
4568 g_assert (cfg->cbb == first_bb);
4570 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4571 /* Replace the original instruction with the new code sequence */
4573 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4574 first_bb->code = first_bb->last_ins = NULL;
4575 first_bb->in_count = first_bb->out_count = 0;
4576 cfg->cbb = first_bb;
4583 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4593 #ifdef MONO_ARCH_SOFT_FLOAT
4596 * mono_handle_soft_float:
4598 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4599 * similar to long support on 32 bit platforms. 32 bit float values require special
4600 * handling when used as locals, arguments, and in calls.
4601 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4604 mono_handle_soft_float (MonoCompile *cfg)
4606 MonoBasicBlock *bb, *first_bb;
4609 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4613 * Create a dummy bblock and emit code into it so we can use the normal
4614 * code generation macros.
4616 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4617 first_bb = cfg->cbb;
4619 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4621 MonoInst *prev = NULL;
4624 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4626 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4632 for (ins = bb->code; ins; ins = ins->next) {
4633 const char *spec = INS_INFO (ins->opcode);
4635 /* Most fp operations are handled automatically by opcode emulation */
4637 switch (ins->opcode) {
4640 d.vald = *(double*)ins->inst_p0;
4641 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4646 /* We load the r8 value */
4647 d.vald = *(float*)ins->inst_p0;
4648 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4652 ins->opcode = OP_LMOVE;
4655 ins->opcode = OP_MOVE;
4656 ins->sreg1 = ins->sreg1 + 1;
4659 ins->opcode = OP_MOVE;
4660 ins->sreg1 = ins->sreg1 + 2;
4663 int reg = ins->sreg1;
4665 ins->opcode = OP_SETLRET;
4667 ins->sreg1 = reg + 1;
4668 ins->sreg2 = reg + 2;
4671 case OP_LOADR8_MEMBASE:
4672 ins->opcode = OP_LOADI8_MEMBASE;
4674 case OP_STORER8_MEMBASE_REG:
4675 ins->opcode = OP_STOREI8_MEMBASE_REG;
4677 case OP_STORER4_MEMBASE_REG: {
4678 MonoInst *iargs [2];
4681 /* Arg 1 is the double value */
4682 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4683 iargs [0]->dreg = ins->sreg1;
4685 /* Arg 2 is the address to store to */
4686 addr_reg = mono_alloc_preg (cfg);
4687 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4688 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4692 case OP_LOADR4_MEMBASE: {
4693 MonoInst *iargs [1];
4697 addr_reg = mono_alloc_preg (cfg);
4698 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4699 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4700 conv->dreg = ins->dreg;
4705 case OP_FCALL_MEMBASE: {
4706 MonoCallInst *call = (MonoCallInst*)ins;
4707 if (call->signature->ret->type == MONO_TYPE_R4) {
4708 MonoCallInst *call2;
4709 MonoInst *iargs [1];
4712 /* Convert the call into a call returning an int */
4713 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4714 memcpy (call2, call, sizeof (MonoCallInst));
4715 switch (ins->opcode) {
4717 call2->inst.opcode = OP_CALL;
4720 call2->inst.opcode = OP_CALL_REG;
4722 case OP_FCALL_MEMBASE:
4723 call2->inst.opcode = OP_CALL_MEMBASE;
4726 g_assert_not_reached ();
4728 call2->inst.dreg = mono_alloc_ireg (cfg);
4729 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4731 /* FIXME: Optimize this */
4733 /* Emit an r4->r8 conversion */
4734 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4735 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4736 conv->dreg = ins->dreg;
4738 switch (ins->opcode) {
4740 ins->opcode = OP_LCALL;
4743 ins->opcode = OP_LCALL_REG;
4745 case OP_FCALL_MEMBASE:
4746 ins->opcode = OP_LCALL_MEMBASE;
4749 g_assert_not_reached ();
4755 MonoJitICallInfo *info;
4756 MonoInst *iargs [2];
4757 MonoInst *call, *cmp, *br;
4759 /* Convert fcompare+fbcc to icall+icompare+beq */
4761 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4764 /* Create dummy MonoInst's for the arguments */
4765 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4766 iargs [0]->dreg = ins->sreg1;
4767 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4768 iargs [1]->dreg = ins->sreg2;
4770 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4772 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4773 cmp->sreg1 = call->dreg;
4775 MONO_ADD_INS (cfg->cbb, cmp);
4777 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4778 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4779 br->inst_true_bb = ins->next->inst_true_bb;
4780 br->inst_false_bb = ins->next->inst_false_bb;
4781 MONO_ADD_INS (cfg->cbb, br);
4783 /* The call sequence might include fp ins */
4786 /* Skip fbcc or fccc */
4787 NULLIFY_INS (ins->next);
4795 MonoJitICallInfo *info;
4796 MonoInst *iargs [2];
4799 /* Convert fccc to icall+icompare+iceq */
4801 info = mono_find_jit_opcode_emulation (ins->opcode);
4804 /* Create dummy MonoInst's for the arguments */
4805 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4806 iargs [0]->dreg = ins->sreg1;
4807 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4808 iargs [1]->dreg = ins->sreg2;
4810 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4813 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4815 /* The call sequence might include fp ins */
4820 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4821 mono_print_ins (ins);
4822 g_assert_not_reached ();
4827 g_assert (cfg->cbb == first_bb);
4829 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4830 /* Replace the original instruction with the new code sequence */
4832 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4833 first_bb->code = first_bb->last_ins = NULL;
4834 first_bb->in_count = first_bb->out_count = 0;
4835 cfg->cbb = first_bb;
4842 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4845 mono_decompose_long_opts (cfg);
4851 * mono_method_to_ir: translates IL into basic blocks containing trees
4854 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4855 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4856 guint inline_offset, gboolean is_virtual_call)
4858 MonoInst *ins, **sp, **stack_start;
4859 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4860 MonoMethod *cmethod, *method_definition;
4861 MonoInst **arg_array;
4862 MonoMethodHeader *header;
4864 guint32 token, ins_flag;
4866 MonoClass *constrained_call = NULL;
4867 unsigned char *ip, *end, *target, *err_pos;
4868 static double r8_0 = 0.0;
4869 MonoMethodSignature *sig;
4870 MonoGenericContext *generic_context = NULL;
4871 MonoGenericContainer *generic_container = NULL;
4872 MonoType **param_types;
4873 int i, n, start_new_bblock, dreg;
4874 int num_calls = 0, inline_costs = 0;
4875 int breakpoint_id = 0;
4877 MonoBoolean security, pinvoke;
4878 MonoSecurityManager* secman = NULL;
4879 MonoDeclSecurityActions actions;
4880 GSList *class_inits = NULL;
4881 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4884 /* serialization and xdomain stuff may need access to private fields and methods */
4885 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4886 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4887 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4888 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4889 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4890 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4892 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4894 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4895 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4896 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4897 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4899 image = method->klass->image;
4900 header = mono_method_get_header (method);
4901 generic_container = mono_method_get_generic_container (method);
4902 sig = mono_method_signature (method);
4903 num_args = sig->hasthis + sig->param_count;
4904 ip = (unsigned char*)header->code;
4905 cfg->cil_start = ip;
4906 end = ip + header->code_size;
4907 mono_jit_stats.cil_code_size += header->code_size;
4909 method_definition = method;
4910 while (method_definition->is_inflated) {
4911 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4912 method_definition = imethod->declaring;
4915 /* SkipVerification is not allowed if core-clr is enabled */
4916 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4918 dont_verify_stloc = TRUE;
4921 if (!dont_verify && mini_method_verify (cfg, method_definition))
4922 goto exception_exit;
4924 if (sig->is_inflated)
4925 generic_context = mono_method_get_context (method);
4926 else if (generic_container)
4927 generic_context = &generic_container->context;
4928 cfg->generic_context = generic_context;
4930 if (!cfg->generic_sharing_context)
4931 g_assert (!sig->has_type_parameters);
4933 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4934 g_assert (method->is_inflated);
4935 g_assert (mono_method_get_context (method)->method_inst);
4937 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4938 g_assert (sig->generic_param_count);
4940 if (cfg->method == method) {
4941 cfg->real_offset = 0;
4943 cfg->real_offset = inline_offset;
4946 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4947 cfg->cil_offset_to_bb_len = header->code_size;
4949 cfg->current_method = method;
4951 if (cfg->verbose_level > 2)
4952 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4954 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
4956 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
4957 for (n = 0; n < sig->param_count; ++n)
4958 param_types [n + sig->hasthis] = sig->params [n];
4959 cfg->arg_types = param_types;
4961 dont_inline = g_list_prepend (dont_inline, method);
4962 if (cfg->method == method) {
4964 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4965 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4968 NEW_BBLOCK (cfg, start_bblock);
4969 cfg->bb_entry = start_bblock;
4970 start_bblock->cil_code = NULL;
4971 start_bblock->cil_length = 0;
4974 NEW_BBLOCK (cfg, end_bblock);
4975 cfg->bb_exit = end_bblock;
4976 end_bblock->cil_code = NULL;
4977 end_bblock->cil_length = 0;
4978 g_assert (cfg->num_bblocks == 2);
4980 arg_array = cfg->args;
4982 if (header->num_clauses) {
4983 cfg->spvars = g_hash_table_new (NULL, NULL);
4984 cfg->exvars = g_hash_table_new (NULL, NULL);
4986 /* handle exception clauses */
4987 for (i = 0; i < header->num_clauses; ++i) {
4988 MonoBasicBlock *try_bb;
4989 MonoExceptionClause *clause = &header->clauses [i];
4990 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4991 try_bb->real_offset = clause->try_offset;
4992 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4993 tblock->real_offset = clause->handler_offset;
4994 tblock->flags |= BB_EXCEPTION_HANDLER;
4996 link_bblock (cfg, try_bb, tblock);
4998 if (*(ip + clause->handler_offset) == CEE_POP)
4999 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5001 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5002 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5003 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5004 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5005 MONO_ADD_INS (tblock, ins);
5007 /* todo: is a fault block unsafe to optimize? */
5008 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5009 tblock->flags |= BB_EXCEPTION_UNSAFE;
5013 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5015 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5017 /* catch and filter blocks get the exception object on the stack */
5018 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5019 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5020 MonoInst *dummy_use;
5022 /* mostly like handle_stack_args (), but just sets the input args */
5023 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5024 tblock->in_scount = 1;
5025 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5026 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5029 * Add a dummy use for the exvar so its liveness info will be
5033 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5035 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5036 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5037 tblock->real_offset = clause->data.filter_offset;
5038 tblock->in_scount = 1;
5039 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5040 /* The filter block shares the exvar with the handler block */
5041 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5042 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5043 MONO_ADD_INS (tblock, ins);
5047 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5048 clause->data.catch_class &&
5049 cfg->generic_sharing_context &&
5050 mono_class_check_context_used (clause->data.catch_class)) {
5051 if (mono_method_get_context (method)->method_inst)
5052 GENERIC_SHARING_FAILURE (CEE_NOP);
5055 * In shared generic code with catch
5056 * clauses containing type variables
5057 * the exception handling code has to
5058 * be able to get to the rgctx.
5059 * Therefore we have to make sure that
5060 * the vtable/mrgctx argument (for
5061 * static or generic methods) or the
5062 * "this" argument (for non-static
5063 * methods) are live.
5065 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5066 mini_method_get_context (method)->method_inst) {
5067 mono_get_vtable_var (cfg);
5069 MonoInst *dummy_use;
5071 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5076 arg_array = alloca (sizeof (MonoInst *) * num_args);
5077 cfg->cbb = start_bblock;
5078 cfg->args = arg_array;
5079 mono_save_args (cfg, sig, inline_args);
5082 /* FIRST CODE BLOCK */
5083 NEW_BBLOCK (cfg, bblock);
5084 bblock->cil_code = ip;
5088 ADD_BBLOCK (cfg, bblock);
5090 if (cfg->method == method) {
5091 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5092 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5093 MONO_INST_NEW (cfg, ins, OP_BREAK);
5094 MONO_ADD_INS (bblock, ins);
5098 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5099 secman = mono_security_manager_get_methods ();
5101 security = (secman && mono_method_has_declsec (method));
5102 /* at this point having security doesn't mean we have any code to generate */
5103 if (security && (cfg->method == method)) {
5104 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5105 * And we do not want to enter the next section (with allocation) if we
5106 * have nothing to generate */
5107 security = mono_declsec_get_demands (method, &actions);
5110 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5111 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5113 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5114 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5115 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5117 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5118 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5122 mono_custom_attrs_free (custom);
5125 custom = mono_custom_attrs_from_class (wrapped->klass);
5126 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5130 mono_custom_attrs_free (custom);
5133 /* not a P/Invoke after all */
5138 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5139 /* we use a separate basic block for the initialization code */
5140 NEW_BBLOCK (cfg, init_localsbb);
5141 cfg->bb_init = init_localsbb;
5142 init_localsbb->real_offset = cfg->real_offset;
5143 start_bblock->next_bb = init_localsbb;
5144 init_localsbb->next_bb = bblock;
5145 link_bblock (cfg, start_bblock, init_localsbb);
5146 link_bblock (cfg, init_localsbb, bblock);
5148 cfg->cbb = init_localsbb;
5150 start_bblock->next_bb = bblock;
5151 link_bblock (cfg, start_bblock, bblock);
5154 /* at this point we know, if security is TRUE, that some code needs to be generated */
5155 if (security && (cfg->method == method)) {
5158 mono_jit_stats.cas_demand_generation++;
5160 if (actions.demand.blob) {
5161 /* Add code for SecurityAction.Demand */
5162 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5163 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5164 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5165 mono_emit_method_call (cfg, secman->demand, args, NULL);
5167 if (actions.noncasdemand.blob) {
5168 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5169 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5170 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5171 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5172 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5173 mono_emit_method_call (cfg, secman->demand, args, NULL);
5175 if (actions.demandchoice.blob) {
5176 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5177 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5178 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5179 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5180 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5184 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5186 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5189 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5190 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5191 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5192 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5193 if (!(method->klass && method->klass->image &&
5194 mono_security_core_clr_is_platform_image (method->klass->image))) {
5195 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5199 if (!method_is_safe (method))
5200 emit_throw_verification_exception (cfg, bblock, ip);
5203 if (header->code_size == 0)
5206 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5211 if (cfg->method == method)
5212 mono_debug_init_method (cfg, bblock, breakpoint_id);
5214 for (n = 0; n < header->num_locals; ++n) {
5215 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5220 /* add a check for this != NULL to inlined methods */
5221 if (is_virtual_call) {
5224 NEW_ARGLOAD (cfg, arg_ins, 0);
5225 MONO_ADD_INS (cfg->cbb, arg_ins);
5226 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5227 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5228 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5231 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5232 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5235 start_new_bblock = 0;
5239 if (cfg->method == method)
5240 cfg->real_offset = ip - header->code;
5242 cfg->real_offset = inline_offset;
5247 if (start_new_bblock) {
5248 bblock->cil_length = ip - bblock->cil_code;
5249 if (start_new_bblock == 2) {
5250 g_assert (ip == tblock->cil_code);
5252 GET_BBLOCK (cfg, tblock, ip);
5254 bblock->next_bb = tblock;
5257 start_new_bblock = 0;
5258 for (i = 0; i < bblock->in_scount; ++i) {
5259 if (cfg->verbose_level > 3)
5260 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5261 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5265 g_slist_free (class_inits);
5268 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5269 link_bblock (cfg, bblock, tblock);
5270 if (sp != stack_start) {
5271 handle_stack_args (cfg, stack_start, sp - stack_start);
5273 CHECK_UNVERIFIABLE (cfg);
5275 bblock->next_bb = tblock;
5278 for (i = 0; i < bblock->in_scount; ++i) {
5279 if (cfg->verbose_level > 3)
5280 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5281 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5284 g_slist_free (class_inits);
5289 bblock->real_offset = cfg->real_offset;
5291 if ((cfg->method == method) && cfg->coverage_info) {
5292 guint32 cil_offset = ip - header->code;
5293 cfg->coverage_info->data [cil_offset].cil_code = ip;
5295 /* TODO: Use an increment here */
5296 #if defined(__i386__)
5297 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5298 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5300 MONO_ADD_INS (cfg->cbb, ins);
5302 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5303 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5307 if (cfg->verbose_level > 3)
5308 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5313 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5315 MONO_ADD_INS (bblock, ins);
5321 CHECK_STACK_OVF (1);
5322 n = (*ip)-CEE_LDARG_0;
5324 EMIT_NEW_ARGLOAD (cfg, ins, n);
5332 CHECK_STACK_OVF (1);
5333 n = (*ip)-CEE_LDLOC_0;
5335 EMIT_NEW_LOCLOAD (cfg, ins, n);
5346 n = (*ip)-CEE_STLOC_0;
5349 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5352 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5353 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5354 /* Optimize reg-reg moves away */
5356 * Can't optimize other opcodes, since sp[0] might point to
5357 * the last ins of a decomposed opcode.
5359 sp [0]->dreg = (cfg)->locals [n]->dreg;
5361 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5369 CHECK_STACK_OVF (1);
5372 EMIT_NEW_ARGLOAD (cfg, ins, n);
5378 CHECK_STACK_OVF (1);
5381 NEW_ARGLOADA (cfg, ins, n);
5382 MONO_ADD_INS (cfg->cbb, ins);
5392 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5394 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5399 CHECK_STACK_OVF (1);
5402 EMIT_NEW_LOCLOAD (cfg, ins, n);
5408 CHECK_STACK_OVF (1);
5409 CHECK_LOCAL (ip [1]);
5412 * ldloca inhibits many optimizations so try to get rid of it in common
5415 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5416 gboolean skip = FALSE;
5418 /* From the INITOBJ case */
5419 token = read32 (ip + 4);
5420 klass = mini_get_class (method, token, generic_context);
5421 CHECK_TYPELOAD (klass);
5422 if (generic_class_is_reference_type (cfg, klass)) {
5423 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5424 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5425 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5426 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5427 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5439 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5447 CHECK_LOCAL (ip [1]);
5448 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5450 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5455 CHECK_STACK_OVF (1);
5456 EMIT_NEW_PCONST (cfg, ins, NULL);
5457 ins->type = STACK_OBJ;
5462 CHECK_STACK_OVF (1);
5463 EMIT_NEW_ICONST (cfg, ins, -1);
5476 CHECK_STACK_OVF (1);
5477 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5483 CHECK_STACK_OVF (1);
5485 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5491 CHECK_STACK_OVF (1);
5492 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5498 CHECK_STACK_OVF (1);
5499 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5500 ins->type = STACK_I8;
5501 ins->dreg = alloc_dreg (cfg, STACK_I8);
5503 ins->inst_l = (gint64)read64 (ip);
5504 MONO_ADD_INS (bblock, ins);
5510 /* FIXME: we should really allocate this only late in the compilation process */
5511 mono_domain_lock (cfg->domain);
5512 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5513 mono_domain_unlock (cfg->domain);
5515 CHECK_STACK_OVF (1);
5516 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5517 ins->type = STACK_R8;
5518 ins->dreg = alloc_dreg (cfg, STACK_R8);
5522 MONO_ADD_INS (bblock, ins);
5530 /* FIXME: we should really allocate this only late in the compilation process */
5531 mono_domain_lock (cfg->domain);
5532 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5533 mono_domain_unlock (cfg->domain);
5535 CHECK_STACK_OVF (1);
5536 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5537 ins->type = STACK_R8;
5538 ins->dreg = alloc_dreg (cfg, STACK_R8);
5542 MONO_ADD_INS (bblock, ins);
5549 MonoInst *temp, *store;
5551 CHECK_STACK_OVF (1);
5555 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5556 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5558 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5561 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5574 if (sp [0]->type == STACK_R8)
5575 /* we need to pop the value from the x86 FP stack */
5576 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5583 if (stack_start != sp)
5585 token = read32 (ip + 1);
5586 /* FIXME: check the signature matches */
5587 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5592 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5593 GENERIC_SHARING_FAILURE (CEE_JMP);
5595 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5596 if (check_linkdemand (cfg, method, cmethod))
5598 CHECK_CFG_EXCEPTION;
5603 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5606 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5608 /* Handle tail calls similarly to calls */
5609 n = fsig->param_count + fsig->hasthis;
5611 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5612 call->method = cmethod;
5613 call->tail_call = TRUE;
5614 call->signature = mono_method_signature (cmethod);
5615 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5616 call->inst.inst_p0 = cmethod;
5617 for (i = 0; i < n; ++i)
5618 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5620 mono_arch_emit_call (cfg, call);
5621 MONO_ADD_INS (bblock, (MonoInst*)call);
5624 for (i = 0; i < num_args; ++i)
5625 /* Prevent arguments from being optimized away */
5626 arg_array [i]->flags |= MONO_INST_VOLATILE;
5628 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5629 ins = (MonoInst*)call;
5630 ins->inst_p0 = cmethod;
5631 MONO_ADD_INS (bblock, ins);
5635 start_new_bblock = 1;
5640 case CEE_CALLVIRT: {
5641 MonoInst *addr = NULL;
5642 MonoMethodSignature *fsig = NULL;
5644 int virtual = *ip == CEE_CALLVIRT;
5645 int calli = *ip == CEE_CALLI;
5646 gboolean pass_imt_from_rgctx = FALSE;
5647 MonoInst *imt_arg = NULL;
5648 gboolean pass_vtable = FALSE;
5649 gboolean pass_mrgctx = FALSE;
5650 MonoInst *vtable_arg = NULL;
5651 gboolean check_this = FALSE;
5654 token = read32 (ip + 1);
5661 if (method->wrapper_type != MONO_WRAPPER_NONE)
5662 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5664 fsig = mono_metadata_parse_signature (image, token);
5666 n = fsig->param_count + fsig->hasthis;
5668 MonoMethod *cil_method;
5670 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5671 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5672 cil_method = cmethod;
5673 } else if (constrained_call) {
5674 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5676 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5677 cil_method = cmethod;
5682 if (!dont_verify && !cfg->skip_visibility) {
5683 MonoMethod *target_method = cil_method;
5684 if (method->is_inflated) {
5685 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5687 if (!mono_method_can_access_method (method_definition, target_method) &&
5688 !mono_method_can_access_method (method, cil_method))
5689 METHOD_ACCESS_FAILURE;
5692 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5693 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5695 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5696 /* MS.NET seems to silently convert this to a callvirt */
5699 if (!cmethod->klass->inited)
5700 if (!mono_class_init (cmethod->klass))
5703 if (mono_method_signature (cmethod)->pinvoke) {
5704 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5705 fsig = mono_method_signature (wrapper);
5706 } else if (constrained_call) {
5707 fsig = mono_method_signature (cmethod);
5709 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5712 mono_save_token_info (cfg, image, token, cmethod);
5714 n = fsig->param_count + fsig->hasthis;
5716 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5717 if (check_linkdemand (cfg, method, cmethod))
5719 CHECK_CFG_EXCEPTION;
5722 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5723 mini_class_is_system_array (cmethod->klass)) {
5724 array_rank = cmethod->klass->rank;
5727 if (cmethod->string_ctor)
5728 g_assert_not_reached ();
5731 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5734 if (!cfg->generic_sharing_context && cmethod)
5735 g_assert (!mono_method_check_context_used (cmethod));
5739 //g_assert (!virtual || fsig->hasthis);
5743 if (constrained_call) {
5745 * We have the `constrained.' prefix opcode.
5747 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5751 * The type parameter is instantiated as a valuetype,
5752 * but that type doesn't override the method we're
5753 * calling, so we need to box `this'.
5755 dreg = alloc_dreg (cfg, STACK_VTYPE);
5756 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5757 ins->klass = constrained_call;
5758 sp [0] = handle_box (cfg, ins, constrained_call);
5759 } else if (!constrained_call->valuetype) {
5760 int dreg = alloc_preg (cfg);
5763 * The type parameter is instantiated as a reference
5764 * type. We have a managed pointer on the stack, so
5765 * we need to dereference it here.
5767 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5768 ins->type = STACK_OBJ;
5770 } else if (cmethod->klass->valuetype)
5772 constrained_call = NULL;
5775 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5779 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5780 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5781 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5782 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5783 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5786 * Pass vtable iff target method might
5787 * be shared, which means that sharing
5788 * is enabled for its class and its
5789 * context is sharable (and it's not a
5792 if (sharing_enabled && context_sharable &&
5793 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5797 if (cmethod && mini_method_get_context (cmethod) &&
5798 mini_method_get_context (cmethod)->method_inst) {
5799 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5800 MonoGenericContext *context = mini_method_get_context (cmethod);
5801 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5803 g_assert (!pass_vtable);
5805 if (sharing_enabled && context_sharable)
5809 if (cfg->generic_sharing_context && cmethod) {
5810 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5812 context_used = mono_method_check_context_used (cmethod);
5814 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5815 /* Generic method interface
5816 calls are resolved via a
5817 helper function and don't
5819 if (!cmethod_context || !cmethod_context->method_inst)
5820 pass_imt_from_rgctx = TRUE;
5824 * If a shared method calls another
5825 * shared method then the caller must
5826 * have a generic sharing context
5827 * because the magic trampoline
5828 * requires it. FIXME: We shouldn't
5829 * have to force the vtable/mrgctx
5830 * variable here. Instead there
5831 * should be a flag in the cfg to
5832 * request a generic sharing context.
5834 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5835 mono_get_vtable_var (cfg);
5842 EMIT_GET_RGCTX (rgctx, context_used);
5843 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5845 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5847 CHECK_TYPELOAD (cmethod->klass);
5848 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5853 g_assert (!vtable_arg);
5858 EMIT_GET_RGCTX (rgctx, context_used);
5859 vtable_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5861 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5864 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5865 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5872 if (pass_imt_from_rgctx) {
5875 g_assert (!pass_vtable);
5878 EMIT_GET_RGCTX (rgctx, context_used);
5879 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5880 MONO_RGCTX_INFO_METHOD);
5886 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5887 check->sreg1 = sp [0]->dreg;
5888 MONO_ADD_INS (cfg->cbb, check);
5891 /* Calling virtual generic methods */
5892 if (cmethod && virtual &&
5893 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5894 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5895 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5896 mono_method_signature (cmethod)->generic_param_count) {
5897 MonoInst *this_temp, *this_arg_temp, *store;
5898 MonoInst *iargs [4];
5900 g_assert (mono_method_signature (cmethod)->is_inflated);
5902 /* Prevent inlining of methods that contain indirect calls */
5905 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5906 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5907 MONO_ADD_INS (bblock, store);
5909 /* FIXME: This should be a managed pointer */
5910 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5912 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5916 EMIT_GET_RGCTX (rgctx, context_used);
5917 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5918 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5919 addr = mono_emit_jit_icall (cfg,
5920 mono_helper_compile_generic_method, iargs);
5922 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5923 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5924 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5927 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5929 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5930 if (!MONO_TYPE_IS_VOID (fsig->ret))
5939 /* FIXME: runtime generic context pointer for jumps? */
5940 /* FIXME: handle this for generic sharing eventually */
5941 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5942 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5945 /* FIXME: runtime generic context pointer for jumps? */
5946 GENERIC_SHARING_FAILURE (*ip);
5948 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5951 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5952 call->tail_call = TRUE;
5953 call->method = cmethod;
5954 call->signature = mono_method_signature (cmethod);
5957 /* Handle tail calls similarly to calls */
5958 call->inst.opcode = OP_TAILCALL;
5960 mono_arch_emit_call (cfg, call);
5963 * We implement tail calls by storing the actual arguments into the
5964 * argument variables, then emitting a CEE_JMP.
5966 for (i = 0; i < n; ++i) {
5967 /* Prevent argument from being register allocated */
5968 arg_array [i]->flags |= MONO_INST_VOLATILE;
5969 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5973 ins = (MonoInst*)call;
5974 ins->inst_p0 = cmethod;
5975 ins->inst_p1 = arg_array [0];
5976 MONO_ADD_INS (bblock, ins);
5977 link_bblock (cfg, bblock, end_bblock);
5978 start_new_bblock = 1;
5979 /* skip CEE_RET as well */
5985 /* Conversion to a JIT intrinsic */
5986 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5987 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5988 type_to_eval_stack_type ((cfg), fsig->ret, ins);
5999 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6000 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6001 mono_method_check_inlining (cfg, cmethod) &&
6002 !g_list_find (dont_inline, cmethod)) {
6004 gboolean allways = FALSE;
6006 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6007 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6008 /* Prevent inlining of methods that call wrappers */
6010 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6014 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6016 cfg->real_offset += 5;
6019 if (!MONO_TYPE_IS_VOID (fsig->ret))
6020 /* *sp is already set by inline_method */
6023 inline_costs += costs;
6029 inline_costs += 10 * num_calls++;
6031 /* Tail recursion elimination */
6032 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6033 gboolean has_vtargs = FALSE;
6036 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6039 /* keep it simple */
6040 for (i = fsig->param_count - 1; i >= 0; i--) {
6041 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6046 for (i = 0; i < n; ++i)
6047 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6048 MONO_INST_NEW (cfg, ins, OP_BR);
6049 MONO_ADD_INS (bblock, ins);
6050 tblock = start_bblock->out_bb [0];
6051 link_bblock (cfg, bblock, tblock);
6052 ins->inst_target_bb = tblock;
6053 start_new_bblock = 1;
6055 /* skip the CEE_RET, too */
6056 if (ip_in_bb (cfg, bblock, ip + 5))
6066 /* Generic sharing */
6067 /* FIXME: only do this for generic methods if
6068 they are not shared! */
6070 (cmethod->klass->valuetype ||
6071 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6072 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6073 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6074 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6075 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6076 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6081 g_assert (cfg->generic_sharing_context && cmethod);
6085 * We are compiling a call to a
6086 * generic method from shared code,
6087 * which means that we have to look up
6088 * the method in the rgctx and do an
6092 EMIT_GET_RGCTX (rgctx, context_used);
6093 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6096 /* Indirect calls */
6098 g_assert (!imt_arg);
6100 if (*ip == CEE_CALL)
6101 g_assert (context_used);
6102 else if (*ip == CEE_CALLI)
6103 g_assert (!vtable_arg);
6105 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6106 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6108 /* Prevent inlining of methods with indirect calls */
6112 #ifdef MONO_ARCH_RGCTX_REG
6114 int rgctx_reg = mono_alloc_preg (cfg);
6116 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6117 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6118 call = (MonoCallInst*)ins;
6119 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6120 cfg->uses_rgctx_reg = TRUE;
6125 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6127 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6128 if (fsig->pinvoke && !fsig->ret->byref) {
6132 * Native code might return non register sized integers
6133 * without initializing the upper bits.
6135 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6136 case OP_LOADI1_MEMBASE:
6137 widen_op = OP_ICONV_TO_I1;
6139 case OP_LOADU1_MEMBASE:
6140 widen_op = OP_ICONV_TO_U1;
6142 case OP_LOADI2_MEMBASE:
6143 widen_op = OP_ICONV_TO_I2;
6145 case OP_LOADU2_MEMBASE:
6146 widen_op = OP_ICONV_TO_U2;
6152 if (widen_op != -1) {
6153 int dreg = alloc_preg (cfg);
6156 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6157 widen->type = ins->type;
6174 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6175 if (sp [fsig->param_count]->type == STACK_OBJ) {
6176 MonoInst *iargs [2];
6179 iargs [1] = sp [fsig->param_count];
6181 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6184 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6185 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6186 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6187 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6189 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6192 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6193 if (!cmethod->klass->element_class->valuetype && !readonly)
6194 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6197 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6200 g_assert_not_reached ();
6208 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6210 if (!MONO_TYPE_IS_VOID (fsig->ret))
6221 #ifdef MONO_ARCH_RGCTX_REG
6223 int rgctx_reg = mono_alloc_preg (cfg);
6225 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6226 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6227 call = (MonoCallInst*)ins;
6228 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6229 cfg->uses_rgctx_reg = TRUE;
6233 } else if (imt_arg) {
6234 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6236 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6239 if (!MONO_TYPE_IS_VOID (fsig->ret))
6247 if (cfg->method != method) {
6248 /* return from inlined method */
6250 * If in_count == 0, that means the ret is unreachable due to
6251 * being preceeded by a throw. In that case, inline_method () will
6252 * handle setting the return value
6253 * (test case: test_0_inline_throw ()).
6255 if (return_var && cfg->cbb->in_count) {
6259 //g_assert (returnvar != -1);
6260 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6261 cfg->ret_var_set = TRUE;
6265 MonoType *ret_type = mono_method_signature (method)->ret;
6267 g_assert (!return_var);
6270 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6273 if (!cfg->vret_addr) {
6276 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6278 EMIT_NEW_RETLOADA (cfg, ret_addr);
6280 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6281 ins->klass = mono_class_from_mono_type (ret_type);
6284 #ifdef MONO_ARCH_SOFT_FLOAT
6285 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6286 MonoInst *iargs [1];
6290 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6291 mono_arch_emit_setret (cfg, method, conv);
6293 mono_arch_emit_setret (cfg, method, *sp);
6296 mono_arch_emit_setret (cfg, method, *sp);
6301 if (sp != stack_start)
6303 MONO_INST_NEW (cfg, ins, OP_BR);
6305 ins->inst_target_bb = end_bblock;
6306 MONO_ADD_INS (bblock, ins);
6307 link_bblock (cfg, bblock, end_bblock);
6308 start_new_bblock = 1;
6312 MONO_INST_NEW (cfg, ins, OP_BR);
6314 target = ip + 1 + (signed char)(*ip);
6316 GET_BBLOCK (cfg, tblock, target);
6317 link_bblock (cfg, bblock, tblock);
6318 ins->inst_target_bb = tblock;
6319 if (sp != stack_start) {
6320 handle_stack_args (cfg, stack_start, sp - stack_start);
6322 CHECK_UNVERIFIABLE (cfg);
6324 MONO_ADD_INS (bblock, ins);
6325 start_new_bblock = 1;
6326 inline_costs += BRANCH_COST;
6340 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6342 target = ip + 1 + *(signed char*)ip;
6348 inline_costs += BRANCH_COST;
6352 MONO_INST_NEW (cfg, ins, OP_BR);
6355 target = ip + 4 + (gint32)read32(ip);
6357 GET_BBLOCK (cfg, tblock, target);
6358 link_bblock (cfg, bblock, tblock);
6359 ins->inst_target_bb = tblock;
6360 if (sp != stack_start) {
6361 handle_stack_args (cfg, stack_start, sp - stack_start);
6363 CHECK_UNVERIFIABLE (cfg);
6366 MONO_ADD_INS (bblock, ins);
6368 start_new_bblock = 1;
6369 inline_costs += BRANCH_COST;
6376 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6377 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6378 guint32 opsize = is_short ? 1 : 4;
6380 CHECK_OPSIZE (opsize);
6382 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6385 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6390 GET_BBLOCK (cfg, tblock, target);
6391 link_bblock (cfg, bblock, tblock);
6392 GET_BBLOCK (cfg, tblock, ip);
6393 link_bblock (cfg, bblock, tblock);
6395 if (sp != stack_start) {
6396 handle_stack_args (cfg, stack_start, sp - stack_start);
6397 CHECK_UNVERIFIABLE (cfg);
6400 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6401 cmp->sreg1 = sp [0]->dreg;
6402 type_from_op (cmp, sp [0], NULL);
6405 #if SIZEOF_VOID_P == 4
6406 if (cmp->opcode == OP_LCOMPARE_IMM) {
6407 /* Convert it to OP_LCOMPARE */
6408 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6409 ins->type = STACK_I8;
6410 ins->dreg = alloc_dreg (cfg, STACK_I8);
6412 MONO_ADD_INS (bblock, ins);
6413 cmp->opcode = OP_LCOMPARE;
6414 cmp->sreg2 = ins->dreg;
6417 MONO_ADD_INS (bblock, cmp);
6419 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6420 type_from_op (ins, sp [0], NULL);
6421 MONO_ADD_INS (bblock, ins);
6422 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6423 GET_BBLOCK (cfg, tblock, target);
6424 ins->inst_true_bb = tblock;
6425 GET_BBLOCK (cfg, tblock, ip);
6426 ins->inst_false_bb = tblock;
6427 start_new_bblock = 2;
6430 inline_costs += BRANCH_COST;
6445 MONO_INST_NEW (cfg, ins, *ip);
6447 target = ip + 4 + (gint32)read32(ip);
6453 inline_costs += BRANCH_COST;
6457 MonoBasicBlock **targets;
6458 MonoBasicBlock *default_bblock;
6459 MonoJumpInfoBBTable *table;
6461 int offset_reg = alloc_preg (cfg);
6462 int target_reg = alloc_preg (cfg);
6463 int table_reg = alloc_preg (cfg);
6464 int sum_reg = alloc_preg (cfg);
6469 n = read32 (ip + 1);
6472 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6476 CHECK_OPSIZE (n * sizeof (guint32));
6477 target = ip + n * sizeof (guint32);
6479 GET_BBLOCK (cfg, default_bblock, target);
6481 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6482 for (i = 0; i < n; ++i) {
6483 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6484 targets [i] = tblock;
6488 if (sp != stack_start) {
6490 * Link the current bb with the targets as well, so handle_stack_args
6491 * will set their in_stack correctly.
6493 link_bblock (cfg, bblock, default_bblock);
6494 for (i = 0; i < n; ++i)
6495 link_bblock (cfg, bblock, targets [i]);
6497 handle_stack_args (cfg, stack_start, sp - stack_start);
6499 CHECK_UNVERIFIABLE (cfg);
6502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6506 for (i = 0; i < n; ++i)
6507 link_bblock (cfg, bblock, targets [i]);
6509 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6510 table->table = targets;
6511 table->table_size = n;
6514 /* ARM implements SWITCH statements differently */
6515 /* FIXME: Make it use the generic implementation */
6516 /* the backend code will deal with aot vs normal case */
6517 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6518 ins->sreg1 = src1->dreg;
6519 ins->inst_p0 = table;
6520 ins->inst_many_bb = targets;
6521 ins->klass = GUINT_TO_POINTER (n);
6522 MONO_ADD_INS (cfg->cbb, ins);
6524 if (sizeof (gpointer) == 8)
6525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6529 #if SIZEOF_VOID_P == 8
6530 /* The upper word might not be zero, and we add it to a 64 bit address later */
6531 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6534 if (cfg->compile_aot) {
6535 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6537 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6538 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6539 ins->inst_p0 = table;
6540 ins->dreg = table_reg;
6541 MONO_ADD_INS (cfg->cbb, ins);
6544 /* FIXME: Use load_memindex */
6545 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6547 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6549 start_new_bblock = 1;
6550 inline_costs += (BRANCH_COST * 2);
6570 dreg = alloc_freg (cfg);
6573 dreg = alloc_lreg (cfg);
6576 dreg = alloc_preg (cfg);
6579 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6580 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6581 ins->flags |= ins_flag;
6583 MONO_ADD_INS (bblock, ins);
6598 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6599 ins->flags |= ins_flag;
6601 MONO_ADD_INS (bblock, ins);
6609 MONO_INST_NEW (cfg, ins, (*ip));
6611 ins->sreg1 = sp [0]->dreg;
6612 ins->sreg2 = sp [1]->dreg;
6613 type_from_op (ins, sp [0], sp [1]);
6615 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6617 /* Use the immediate opcodes if possible */
6618 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6619 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6620 if (imm_opcode != -1) {
6621 ins->opcode = imm_opcode;
6622 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6625 sp [1]->opcode = OP_NOP;
6629 MONO_ADD_INS ((cfg)->cbb, (ins));
6632 mono_decompose_opcode (cfg, ins);
6649 MONO_INST_NEW (cfg, ins, (*ip));
6651 ins->sreg1 = sp [0]->dreg;
6652 ins->sreg2 = sp [1]->dreg;
6653 type_from_op (ins, sp [0], sp [1]);
6655 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6656 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6658 /* FIXME: Pass opcode to is_inst_imm */
6660 /* Use the immediate opcodes if possible */
6661 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6664 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6665 if (imm_opcode != -1) {
6666 ins->opcode = imm_opcode;
6667 if (sp [1]->opcode == OP_I8CONST) {
6668 #if SIZEOF_VOID_P == 8
6669 ins->inst_imm = sp [1]->inst_l;
6671 ins->inst_ls_word = sp [1]->inst_ls_word;
6672 ins->inst_ms_word = sp [1]->inst_ms_word;
6676 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6679 sp [1]->opcode = OP_NOP;
6682 MONO_ADD_INS ((cfg)->cbb, (ins));
6685 mono_decompose_opcode (cfg, ins);
6698 case CEE_CONV_OVF_I8:
6699 case CEE_CONV_OVF_U8:
6703 /* Special case this earlier so we have long constants in the IR */
6704 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6705 int data = sp [-1]->inst_c0;
6706 sp [-1]->opcode = OP_I8CONST;
6707 sp [-1]->type = STACK_I8;
6708 #if SIZEOF_VOID_P == 8
6709 if ((*ip) == CEE_CONV_U8)
6710 sp [-1]->inst_c0 = (guint32)data;
6712 sp [-1]->inst_c0 = data;
6714 sp [-1]->inst_ls_word = data;
6715 if ((*ip) == CEE_CONV_U8)
6716 sp [-1]->inst_ms_word = 0;
6718 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6720 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6727 case CEE_CONV_OVF_I4:
6728 case CEE_CONV_OVF_I1:
6729 case CEE_CONV_OVF_I2:
6730 case CEE_CONV_OVF_I:
6731 case CEE_CONV_OVF_U:
6734 if (sp [-1]->type == STACK_R8) {
6735 ADD_UNOP (CEE_CONV_OVF_I8);
6742 case CEE_CONV_OVF_U1:
6743 case CEE_CONV_OVF_U2:
6744 case CEE_CONV_OVF_U4:
6747 if (sp [-1]->type == STACK_R8) {
6748 ADD_UNOP (CEE_CONV_OVF_U8);
6755 case CEE_CONV_OVF_I1_UN:
6756 case CEE_CONV_OVF_I2_UN:
6757 case CEE_CONV_OVF_I4_UN:
6758 case CEE_CONV_OVF_I8_UN:
6759 case CEE_CONV_OVF_U1_UN:
6760 case CEE_CONV_OVF_U2_UN:
6761 case CEE_CONV_OVF_U4_UN:
6762 case CEE_CONV_OVF_U8_UN:
6763 case CEE_CONV_OVF_I_UN:
6764 case CEE_CONV_OVF_U_UN:
6774 case CEE_ADD_OVF_UN:
6776 case CEE_MUL_OVF_UN:
6778 case CEE_SUB_OVF_UN:
6786 token = read32 (ip + 1);
6787 klass = mini_get_class (method, token, generic_context);
6788 CHECK_TYPELOAD (klass);
6790 if (generic_class_is_reference_type (cfg, klass)) {
6791 MonoInst *store, *load;
6792 int dreg = alloc_preg (cfg);
6794 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6795 load->flags |= ins_flag;
6796 MONO_ADD_INS (cfg->cbb, load);
6798 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6799 store->flags |= ins_flag;
6800 MONO_ADD_INS (cfg->cbb, store);
6802 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6814 token = read32 (ip + 1);
6815 klass = mini_get_class (method, token, generic_context);
6816 CHECK_TYPELOAD (klass);
6818 /* Optimize the common ldobj+stloc combination */
6828 loc_index = ip [5] - CEE_STLOC_0;
6835 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6836 CHECK_LOCAL (loc_index);
6838 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6839 ins->dreg = cfg->locals [loc_index]->dreg;
6845 /* Optimize the ldobj+stobj combination */
6846 /* The reference case ends up being a load+store anyway */
6847 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6852 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6859 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6868 CHECK_STACK_OVF (1);
6870 n = read32 (ip + 1);
6872 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6873 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6874 ins->type = STACK_OBJ;
6877 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6878 MonoInst *iargs [1];
6880 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6881 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6883 if (cfg->opt & MONO_OPT_SHARED) {
6884 MonoInst *iargs [3];
6886 if (cfg->compile_aot) {
6887 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6889 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6890 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6891 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6892 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6893 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6895 if (bblock->out_of_line) {
6896 MonoInst *iargs [2];
6898 if (cfg->method->klass->image == mono_defaults.corlib) {
6900 * Avoid relocations in AOT and save some space by using a
6901 * version of helper_ldstr specialized to mscorlib.
6903 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6904 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6906 /* Avoid creating the string object */
6907 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6908 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6909 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6913 if (cfg->compile_aot) {
6914 NEW_LDSTRCONST (cfg, ins, image, n);
6916 MONO_ADD_INS (bblock, ins);
6919 NEW_PCONST (cfg, ins, NULL);
6920 ins->type = STACK_OBJ;
6921 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6923 MONO_ADD_INS (bblock, ins);
6932 MonoInst *iargs [2];
6933 MonoMethodSignature *fsig;
6938 token = read32 (ip + 1);
6939 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6942 fsig = mono_method_get_signature (cmethod, image, token);
6944 mono_save_token_info (cfg, image, token, cmethod);
6946 if (!mono_class_init (cmethod->klass))
6949 if (cfg->generic_sharing_context)
6950 context_used = mono_method_check_context_used (cmethod);
6952 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6953 if (check_linkdemand (cfg, method, cmethod))
6955 CHECK_CFG_EXCEPTION;
6956 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6957 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6960 n = fsig->param_count;
6964 * Generate smaller code for the common newobj <exception> instruction in
6965 * argument checking code.
6967 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6968 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6969 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6970 MonoInst *iargs [3];
6974 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6977 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6981 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6986 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6989 g_assert_not_reached ();
6997 /* move the args to allow room for 'this' in the first position */
7003 /* check_call_signature () requires sp[0] to be set */
7004 this_ins.type = STACK_OBJ;
7006 if (check_call_signature (cfg, fsig, sp))
7011 if (mini_class_is_system_array (cmethod->klass)) {
7012 g_assert (!context_used);
7013 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7015 /* Avoid varargs in the common case */
7016 if (fsig->param_count == 1)
7017 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7018 else if (fsig->param_count == 2)
7019 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7021 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7022 } else if (cmethod->string_ctor) {
7023 g_assert (!context_used);
7024 /* we simply pass a null pointer */
7025 EMIT_NEW_PCONST (cfg, *sp, NULL);
7026 /* now call the string ctor */
7027 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7029 MonoInst* callvirt_this_arg = NULL;
7031 if (cmethod->klass->valuetype) {
7032 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7033 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7034 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7039 * The code generated by mini_emit_virtual_call () expects
7040 * iargs [0] to be a boxed instance, but luckily the vcall
7041 * will be transformed into a normal call there.
7043 } else if (context_used) {
7044 MonoInst *rgctx, *data;
7047 EMIT_GET_RGCTX (rgctx, context_used);
7048 if (cfg->opt & MONO_OPT_SHARED)
7049 rgctx_info = MONO_RGCTX_INFO_KLASS;
7051 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7052 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7054 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7057 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7059 CHECK_TYPELOAD (cmethod->klass);
7062 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7063 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7064 * As a workaround, we call class cctors before allocating objects.
7066 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7067 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7068 if (cfg->verbose_level > 2)
7069 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7070 class_inits = g_slist_prepend (class_inits, vtable);
7073 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7078 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7080 /* Now call the actual ctor */
7081 /* Avoid virtual calls to ctors if possible */
7082 if (cmethod->klass->marshalbyref)
7083 callvirt_this_arg = sp [0];
7085 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7086 mono_method_check_inlining (cfg, cmethod) &&
7087 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7088 !g_list_find (dont_inline, cmethod)) {
7091 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7092 cfg->real_offset += 5;
7095 inline_costs += costs - 5;
7098 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7100 } else if (context_used &&
7101 (cmethod->klass->valuetype ||
7102 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7103 MonoInst *rgctx, *cmethod_addr;
7105 g_assert (!callvirt_this_arg);
7107 EMIT_GET_RGCTX (rgctx, context_used);
7108 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7109 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7111 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7114 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7118 if (alloc == NULL) {
7120 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7121 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7135 token = read32 (ip + 1);
7136 klass = mini_get_class (method, token, generic_context);
7137 CHECK_TYPELOAD (klass);
7138 if (sp [0]->type != STACK_OBJ)
7141 if (cfg->generic_sharing_context)
7142 context_used = mono_class_check_context_used (klass);
7145 MonoInst *rgctx, *args [2];
7147 g_assert (!method->klass->valuetype);
7153 EMIT_GET_RGCTX (rgctx, context_used);
7154 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7155 MONO_RGCTX_INFO_KLASS);
7157 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7161 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7162 MonoMethod *mono_castclass;
7163 MonoInst *iargs [1];
7166 mono_castclass = mono_marshal_get_castclass (klass);
7169 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7170 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7171 g_assert (costs > 0);
7174 cfg->real_offset += 5;
7179 inline_costs += costs;
7182 ins = handle_castclass (cfg, klass, *sp);
7192 token = read32 (ip + 1);
7193 klass = mini_get_class (method, token, generic_context);
7194 CHECK_TYPELOAD (klass);
7195 if (sp [0]->type != STACK_OBJ)
7198 if (cfg->generic_sharing_context)
7199 context_used = mono_class_check_context_used (klass);
7202 MonoInst *rgctx, *args [2];
7208 EMIT_GET_RGCTX (rgctx, context_used);
7209 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7211 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7215 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7216 MonoMethod *mono_isinst;
7217 MonoInst *iargs [1];
7220 mono_isinst = mono_marshal_get_isinst (klass);
7223 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7224 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7225 g_assert (costs > 0);
7228 cfg->real_offset += 5;
7233 inline_costs += costs;
7236 ins = handle_isinst (cfg, klass, *sp);
7243 case CEE_UNBOX_ANY: {
7244 MonoInst *rgctx = NULL;
7249 token = read32 (ip + 1);
7250 klass = mini_get_class (method, token, generic_context);
7251 CHECK_TYPELOAD (klass);
7253 mono_save_token_info (cfg, image, token, klass);
7255 if (cfg->generic_sharing_context)
7256 context_used = mono_class_check_context_used (klass);
7258 if (generic_class_is_reference_type (cfg, klass)) {
7261 MonoInst *iargs [2];
7263 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
7268 EMIT_GET_RGCTX (rgctx, context_used);
7269 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7270 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7274 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7275 MonoMethod *mono_castclass;
7276 MonoInst *iargs [1];
7279 mono_castclass = mono_marshal_get_castclass (klass);
7282 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7283 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7285 g_assert (costs > 0);
7288 cfg->real_offset += 5;
7292 inline_costs += costs;
7294 ins = handle_castclass (cfg, klass, *sp);
7303 EMIT_GET_RGCTX (rgctx, context_used);
7305 if (mono_class_is_nullable (klass)) {
7306 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7313 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7319 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7332 token = read32 (ip + 1);
7333 klass = mini_get_class (method, token, generic_context);
7334 CHECK_TYPELOAD (klass);
7336 mono_save_token_info (cfg, image, token, klass);
7338 if (cfg->generic_sharing_context)
7339 context_used = mono_class_check_context_used (klass);
7341 if (generic_class_is_reference_type (cfg, klass)) {
7347 if (klass == mono_defaults.void_class)
7349 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7351 /* frequent check in generic code: box (struct), brtrue */
7352 if (!mono_class_is_nullable (klass) &&
7353 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7354 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7356 MONO_INST_NEW (cfg, ins, OP_BR);
7357 if (*ip == CEE_BRTRUE_S) {
7360 target = ip + 1 + (signed char)(*ip);
7365 target = ip + 4 + (gint)(read32 (ip));
7368 GET_BBLOCK (cfg, tblock, target);
7369 link_bblock (cfg, bblock, tblock);
7370 ins->inst_target_bb = tblock;
7371 GET_BBLOCK (cfg, tblock, ip);
7373 * This leads to some inconsistency, since the two bblocks are not
7374 * really connected, but it is needed for handling stack arguments
7375 * correct (See test_0_box_brtrue_opt_regress_81102).
7377 link_bblock (cfg, bblock, tblock);
7378 if (sp != stack_start) {
7379 handle_stack_args (cfg, stack_start, sp - stack_start);
7381 CHECK_UNVERIFIABLE (cfg);
7383 MONO_ADD_INS (bblock, ins);
7384 start_new_bblock = 1;
7393 EMIT_GET_RGCTX (rgctx, context_used);
7394 if (cfg->opt & MONO_OPT_SHARED)
7395 rgctx_info = MONO_RGCTX_INFO_KLASS;
7397 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7398 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7399 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, rgctx, data);
7401 *sp++ = handle_box (cfg, val, klass);
7409 MonoInst *rgctx = NULL;
7414 token = read32 (ip + 1);
7415 klass = mini_get_class (method, token, generic_context);
7416 CHECK_TYPELOAD (klass);
7418 mono_save_token_info (cfg, image, token, klass);
7420 if (cfg->generic_sharing_context)
7421 context_used = mono_class_check_context_used (klass);
7424 EMIT_GET_RGCTX (rgctx, context_used);
7426 if (mono_class_is_nullable (klass)) {
7429 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7430 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7434 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7444 MonoClassField *field;
7448 if (*ip == CEE_STFLD) {
7455 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7457 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7460 token = read32 (ip + 1);
7461 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7462 field = mono_method_get_wrapper_data (method, token);
7463 klass = field->parent;
7466 field = mono_field_from_token (image, token, &klass, generic_context);
7470 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7471 FIELD_ACCESS_FAILURE;
7472 mono_class_init (klass);
7474 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7475 if (*ip == CEE_STFLD) {
7476 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7478 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7479 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7480 MonoInst *iargs [5];
7483 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7484 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7485 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7489 if (cfg->opt & MONO_OPT_INLINE) {
7490 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7491 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7492 g_assert (costs > 0);
7495 cfg->real_offset += 5;
7498 inline_costs += costs;
7501 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7506 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7508 store->flags |= ins_flag;
7515 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7516 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7517 MonoInst *iargs [4];
7520 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7521 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7522 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7523 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7524 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7525 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7527 g_assert (costs > 0);
7530 cfg->real_offset += 5;
7534 inline_costs += costs;
7537 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7541 if (sp [0]->type == STACK_VTYPE) {
7544 /* Have to compute the address of the variable */
7546 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7548 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7550 g_assert (var->klass == klass);
7552 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7556 if (*ip == CEE_LDFLDA) {
7557 dreg = alloc_preg (cfg);
7559 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7560 ins->klass = mono_class_from_mono_type (field->type);
7561 ins->type = STACK_MP;
7566 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7567 load->flags |= ins_flag;
7578 MonoClassField *field;
7579 gpointer addr = NULL;
7580 gboolean is_special_static;
7583 token = read32 (ip + 1);
7585 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7586 field = mono_method_get_wrapper_data (method, token);
7587 klass = field->parent;
7590 field = mono_field_from_token (image, token, &klass, generic_context);
7593 mono_class_init (klass);
7594 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7595 FIELD_ACCESS_FAILURE;
7598 * We can only support shared generic static
7599 * field access on architectures where the
7600 * trampoline code has been extended to handle
7601 * the generic class init.
7603 #ifndef MONO_ARCH_VTABLE_REG
7604 GENERIC_SHARING_FAILURE (*ip);
7607 if (cfg->generic_sharing_context)
7608 context_used = mono_class_check_context_used (klass);
7610 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7612 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7613 * to be called here.
7615 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7616 mono_class_vtable (cfg->domain, klass);
7617 CHECK_TYPELOAD (klass);
7619 mono_domain_lock (cfg->domain);
7620 if (cfg->domain->special_static_fields)
7621 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7622 mono_domain_unlock (cfg->domain);
7624 is_special_static = mono_class_field_is_special_static (field);
7626 /* Generate IR to compute the field address */
7628 if ((cfg->opt & MONO_OPT_SHARED) ||
7629 (cfg->compile_aot && is_special_static) ||
7630 (context_used && is_special_static)) {
7631 MonoInst *iargs [2];
7633 g_assert (field->parent);
7634 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7638 EMIT_GET_RGCTX (rgctx, context_used);
7639 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7641 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7643 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7644 } else if (context_used) {
7645 MonoInst *rgctx, *static_data;
7648 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7649 method->klass->name_space, method->klass->name, method->name,
7650 depth, field->offset);
7653 if (mono_class_needs_cctor_run (klass, method)) {
7655 MonoInst *vtable, *rgctx;
7657 EMIT_GET_RGCTX (rgctx, context_used);
7658 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7660 // FIXME: This doesn't work since it tries to pass the argument
7661 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7663 * The vtable pointer is always passed in a register regardless of
7664 * the calling convention, so assign it manually, and make a call
7665 * using a signature without parameters.
7667 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7668 #ifdef MONO_ARCH_VTABLE_REG
7669 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7670 cfg->uses_vtable_reg = TRUE;
7677 * The pointer we're computing here is
7679 * super_info.static_data + field->offset
7681 EMIT_GET_RGCTX (rgctx, context_used);
7682 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7684 if (field->offset == 0) {
7687 int addr_reg = mono_alloc_preg (cfg);
7688 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7690 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7691 MonoInst *iargs [2];
7693 g_assert (field->parent);
7694 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7695 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7696 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7698 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7700 CHECK_TYPELOAD (klass);
7702 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7703 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7704 if (cfg->verbose_level > 2)
7705 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7706 class_inits = g_slist_prepend (class_inits, vtable);
7708 if (cfg->run_cctors) {
7710 /* This makes so that inline cannot trigger */
7711 /* .cctors: too many apps depend on them */
7712 /* running with a specific order... */
7713 if (! vtable->initialized)
7715 ex = mono_runtime_class_init_full (vtable, FALSE);
7717 set_exception_object (cfg, ex);
7718 goto exception_exit;
7722 addr = (char*)vtable->data + field->offset;
7724 if (cfg->compile_aot)
7725 EMIT_NEW_SFLDACONST (cfg, ins, field);
7727 EMIT_NEW_PCONST (cfg, ins, addr);
7730 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7731 * This could be later optimized to do just a couple of
7732 * memory dereferences with constant offsets.
7734 MonoInst *iargs [1];
7735 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7736 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7740 /* Generate IR to do the actual load/store operation */
7742 if (*ip == CEE_LDSFLDA) {
7743 ins->klass = mono_class_from_mono_type (field->type);
7745 } else if (*ip == CEE_STSFLD) {
7750 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7751 store->flags |= ins_flag;
7754 gboolean is_const = FALSE;
7755 MonoVTable *vtable = NULL;
7757 if (!context_used) {
7758 vtable = mono_class_vtable (cfg->domain, klass);
7759 CHECK_TYPELOAD (klass);
7761 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7762 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7763 gpointer addr = (char*)vtable->data + field->offset;
7764 int ro_type = field->type->type;
7765 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7766 ro_type = field->type->data.klass->enum_basetype->type;
7768 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7771 case MONO_TYPE_BOOLEAN:
7773 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7777 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7780 case MONO_TYPE_CHAR:
7782 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7786 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7791 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7795 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7800 case MONO_TYPE_STRING:
7801 case MONO_TYPE_OBJECT:
7802 case MONO_TYPE_CLASS:
7803 case MONO_TYPE_SZARRAY:
7805 case MONO_TYPE_FNPTR:
7806 case MONO_TYPE_ARRAY:
7807 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7808 type_to_eval_stack_type ((cfg), field->type, *sp);
7813 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7818 case MONO_TYPE_VALUETYPE:
7828 CHECK_STACK_OVF (1);
7830 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7831 load->flags |= ins_flag;
7843 token = read32 (ip + 1);
7844 klass = mini_get_class (method, token, generic_context);
7845 CHECK_TYPELOAD (klass);
7846 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7847 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7858 const char *data_ptr;
7865 token = read32 (ip + 1);
7867 klass = mini_get_class (method, token, generic_context);
7868 CHECK_TYPELOAD (klass);
7870 if (cfg->generic_sharing_context)
7871 context_used = mono_class_check_context_used (klass);
7877 /* FIXME: Decompose later to help abcrem */
7880 EMIT_GET_RGCTX (rgctx, context_used);
7881 args [0] = emit_get_rgctx_klass (cfg, context_used, rgctx, mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7886 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7888 if (cfg->opt & MONO_OPT_SHARED) {
7889 /* Decompose now to avoid problems with references to the domainvar */
7890 MonoInst *iargs [3];
7892 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7893 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7896 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7898 /* Decompose later since it is needed by abcrem */
7899 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7900 ins->dreg = alloc_preg (cfg);
7901 ins->sreg1 = sp [0]->dreg;
7902 ins->inst_newa_class = klass;
7903 ins->type = STACK_OBJ;
7905 MONO_ADD_INS (cfg->cbb, ins);
7906 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7907 cfg->cbb->has_array_access = TRUE;
7909 /* Needed so mono_emit_load_get_addr () gets called */
7910 mono_get_got_var (cfg);
7920 * we inline/optimize the initialization sequence if possible.
7921 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7922 * for small sizes open code the memcpy
7923 * ensure the rva field is big enough
7925 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7926 MonoMethod *memcpy_method = get_memcpy_method ();
7927 MonoInst *iargs [3];
7928 int add_reg = alloc_preg (cfg);
7930 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7931 if (cfg->compile_aot) {
7932 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7934 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7936 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7937 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
7946 if (sp [0]->type != STACK_OBJ)
7949 dreg = alloc_preg (cfg);
7950 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7951 ins->dreg = alloc_preg (cfg);
7952 ins->sreg1 = sp [0]->dreg;
7953 ins->type = STACK_I4;
7954 MONO_ADD_INS (cfg->cbb, ins);
7955 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7956 cfg->cbb->has_array_access = TRUE;
7964 if (sp [0]->type != STACK_OBJ)
7967 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7969 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7970 CHECK_TYPELOAD (klass);
7971 /* we need to make sure that this array is exactly the type it needs
7972 * to be for correctness. the wrappers are lax with their usage
7973 * so we need to ignore them here
7975 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7976 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7979 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7983 case CEE_LDELEM_ANY:
7994 case CEE_LDELEM_REF: {
8000 if (*ip == CEE_LDELEM_ANY) {
8002 token = read32 (ip + 1);
8003 klass = mini_get_class (method, token, generic_context);
8004 CHECK_TYPELOAD (klass);
8005 mono_class_init (klass);
8008 klass = array_access_to_klass (*ip);
8010 if (sp [0]->type != STACK_OBJ)
8013 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8015 if (sp [1]->opcode == OP_ICONST) {
8016 int array_reg = sp [0]->dreg;
8017 int index_reg = sp [1]->dreg;
8018 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8020 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8023 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8024 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8027 if (*ip == CEE_LDELEM_ANY)
8040 case CEE_STELEM_REF:
8041 case CEE_STELEM_ANY: {
8047 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8049 if (*ip == CEE_STELEM_ANY) {
8051 token = read32 (ip + 1);
8052 klass = mini_get_class (method, token, generic_context);
8053 CHECK_TYPELOAD (klass);
8054 mono_class_init (klass);
8057 klass = array_access_to_klass (*ip);
8059 if (sp [0]->type != STACK_OBJ)
8062 /* storing a NULL doesn't need any of the complex checks in stelemref */
8063 if (generic_class_is_reference_type (cfg, klass) &&
8064 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8065 MonoMethod* helper = mono_marshal_get_stelemref ();
8066 MonoInst *iargs [3];
8068 if (sp [0]->type != STACK_OBJ)
8070 if (sp [2]->type != STACK_OBJ)
8077 mono_emit_method_call (cfg, helper, iargs, NULL);
8079 if (sp [1]->opcode == OP_ICONST) {
8080 int array_reg = sp [0]->dreg;
8081 int index_reg = sp [1]->dreg;
8082 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8084 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8085 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8087 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8088 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8092 if (*ip == CEE_STELEM_ANY)
8099 case CEE_CKFINITE: {
8103 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8104 ins->sreg1 = sp [0]->dreg;
8105 ins->dreg = alloc_freg (cfg);
8106 ins->type = STACK_R8;
8107 MONO_ADD_INS (bblock, ins);
8110 mono_decompose_opcode (cfg, ins);
8115 case CEE_REFANYVAL: {
8116 MonoInst *src_var, *src;
8118 int klass_reg = alloc_preg (cfg);
8119 int dreg = alloc_preg (cfg);
8122 MONO_INST_NEW (cfg, ins, *ip);
8125 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8126 CHECK_TYPELOAD (klass);
8127 mono_class_init (klass);
8129 if (cfg->generic_sharing_context)
8130 context_used = mono_class_check_context_used (klass);
8133 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8135 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8136 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8140 MonoInst *rgctx, *klass_ins;
8142 EMIT_GET_RGCTX (rgctx, context_used);
8143 klass_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8146 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8147 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8149 mini_emit_class_check (cfg, klass_reg, klass);
8151 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8152 ins->type = STACK_MP;
8157 case CEE_MKREFANY: {
8158 MonoInst *loc, *addr;
8161 MONO_INST_NEW (cfg, ins, *ip);
8164 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8165 CHECK_TYPELOAD (klass);
8166 mono_class_init (klass);
8168 if (cfg->generic_sharing_context)
8169 context_used = mono_class_check_context_used (klass);
8171 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8172 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8175 MonoInst *rgctx, *const_ins;
8176 int type_reg = alloc_preg (cfg);
8178 EMIT_GET_RGCTX (rgctx, context_used);
8179 const_ins = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
8180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8181 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8183 } else if (cfg->compile_aot) {
8184 int const_reg = alloc_preg (cfg);
8185 int type_reg = alloc_preg (cfg);
8187 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8188 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8190 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8192 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8193 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8195 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8197 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8198 ins->type = STACK_VTYPE;
8199 ins->klass = mono_defaults.typed_reference_class;
8206 MonoClass *handle_class;
8208 CHECK_STACK_OVF (1);
8211 n = read32 (ip + 1);
8213 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8214 handle = mono_method_get_wrapper_data (method, n);
8215 handle_class = mono_method_get_wrapper_data (method, n + 1);
8216 if (handle_class == mono_defaults.typehandle_class)
8217 handle = &((MonoClass*)handle)->byval_arg;
8220 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8224 mono_class_init (handle_class);
8225 if (cfg->generic_sharing_context) {
8226 if (handle_class == mono_defaults.typehandle_class) {
8227 /* If we get a MONO_TYPE_CLASS
8228 then we need to provide the
8230 instantiation of it. */
8231 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8234 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8235 } else if (handle_class == mono_defaults.fieldhandle_class)
8236 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8237 else if (handle_class == mono_defaults.methodhandle_class)
8238 context_used = mono_method_check_context_used (handle);
8240 g_assert_not_reached ();
8243 if (cfg->opt & MONO_OPT_SHARED) {
8244 MonoInst *addr, *vtvar, *iargs [3];
8245 int method_context_used;
8247 if (cfg->generic_sharing_context)
8248 method_context_used = mono_method_check_context_used (method);
8250 method_context_used = 0;
8252 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8254 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8255 EMIT_NEW_ICONST (cfg, iargs [1], n);
8256 if (method_context_used) {
8259 EMIT_GET_RGCTX (rgctx, method_context_used);
8260 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8261 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8263 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8264 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8266 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8270 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8272 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8273 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8274 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8275 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8276 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8277 MonoClass *tclass = mono_class_from_mono_type (handle);
8279 mono_class_init (tclass);
8283 EMIT_GET_RGCTX (rgctx, context_used);
8284 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8285 } else if (cfg->compile_aot) {
8286 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8288 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8290 ins->type = STACK_OBJ;
8291 ins->klass = cmethod->klass;
8294 MonoInst *addr, *vtvar;
8296 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8301 EMIT_GET_RGCTX (rgctx, context_used);
8302 if (handle_class == mono_defaults.typehandle_class) {
8303 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8304 mono_class_from_mono_type (handle),
8305 MONO_RGCTX_INFO_TYPE);
8306 } else if (handle_class == mono_defaults.methodhandle_class) {
8307 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8308 handle, MONO_RGCTX_INFO_METHOD);
8309 } else if (handle_class == mono_defaults.fieldhandle_class) {
8310 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8311 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8313 g_assert_not_reached ();
8315 } else if (cfg->compile_aot) {
8316 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8318 EMIT_NEW_PCONST (cfg, ins, handle);
8320 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8321 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8322 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8332 MONO_INST_NEW (cfg, ins, OP_THROW);
8334 ins->sreg1 = sp [0]->dreg;
8336 bblock->out_of_line = TRUE;
8337 MONO_ADD_INS (bblock, ins);
8338 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8339 MONO_ADD_INS (bblock, ins);
8342 link_bblock (cfg, bblock, end_bblock);
8343 start_new_bblock = 1;
8345 case CEE_ENDFINALLY:
8346 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8347 MONO_ADD_INS (bblock, ins);
8349 start_new_bblock = 1;
8352 * Control will leave the method so empty the stack, otherwise
8353 * the next basic block will start with a nonempty stack.
8355 while (sp != stack_start) {
8363 if (*ip == CEE_LEAVE) {
8365 target = ip + 5 + (gint32)read32(ip + 1);
8368 target = ip + 2 + (signed char)(ip [1]);
8371 /* empty the stack */
8372 while (sp != stack_start) {
8377 * If this leave statement is in a catch block, check for a
8378 * pending exception, and rethrow it if necessary.
8380 for (i = 0; i < header->num_clauses; ++i) {
8381 MonoExceptionClause *clause = &header->clauses [i];
8384 * Use <= in the final comparison to handle clauses with multiple
8385 * leave statements, like in bug #78024.
8386 * The ordering of the exception clauses guarantees that we find the
8389 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8391 MonoBasicBlock *dont_throw;
8396 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8399 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8401 NEW_BBLOCK (cfg, dont_throw);
8404 * Currently, we allways rethrow the abort exception, despite the
8405 * fact that this is not correct. See thread6.cs for an example.
8406 * But propagating the abort exception is more important than
8407 * getting the sematics right.
8409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8411 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8413 MONO_START_BB (cfg, dont_throw);
8418 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8420 for (tmp = handlers; tmp; tmp = tmp->next) {
8422 link_bblock (cfg, bblock, tblock);
8423 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8424 ins->inst_target_bb = tblock;
8425 MONO_ADD_INS (bblock, ins);
8427 g_list_free (handlers);
8430 MONO_INST_NEW (cfg, ins, OP_BR);
8431 MONO_ADD_INS (bblock, ins);
8432 GET_BBLOCK (cfg, tblock, target);
8433 link_bblock (cfg, bblock, tblock);
8434 ins->inst_target_bb = tblock;
8435 start_new_bblock = 1;
8437 if (*ip == CEE_LEAVE)
8446 * Mono specific opcodes
8448 case MONO_CUSTOM_PREFIX: {
8450 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8454 case CEE_MONO_ICALL: {
8456 MonoJitICallInfo *info;
8458 token = read32 (ip + 2);
8459 func = mono_method_get_wrapper_data (method, token);
8460 info = mono_find_jit_icall_by_addr (func);
8463 CHECK_STACK (info->sig->param_count);
8464 sp -= info->sig->param_count;
8466 ins = mono_emit_jit_icall (cfg, info->func, sp);
8467 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8471 inline_costs += 10 * num_calls++;
8475 case CEE_MONO_LDPTR: {
8478 CHECK_STACK_OVF (1);
8480 token = read32 (ip + 2);
8482 ptr = mono_method_get_wrapper_data (method, token);
8483 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8484 MonoJitICallInfo *callinfo;
8485 const char *icall_name;
8487 icall_name = method->name + strlen ("__icall_wrapper_");
8488 g_assert (icall_name);
8489 callinfo = mono_find_jit_icall_by_name (icall_name);
8490 g_assert (callinfo);
8492 if (ptr == callinfo->func) {
8493 /* Will be transformed into an AOTCONST later */
8494 EMIT_NEW_PCONST (cfg, ins, ptr);
8500 /* FIXME: Generalize this */
8501 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8502 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8507 EMIT_NEW_PCONST (cfg, ins, ptr);
8510 inline_costs += 10 * num_calls++;
8511 /* Can't embed random pointers into AOT code */
8512 cfg->disable_aot = 1;
8515 case CEE_MONO_ICALL_ADDR: {
8516 MonoMethod *cmethod;
8519 CHECK_STACK_OVF (1);
8521 token = read32 (ip + 2);
8523 cmethod = mono_method_get_wrapper_data (method, token);
8525 if (cfg->compile_aot) {
8526 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8528 ptr = mono_lookup_internal_call (cmethod);
8530 EMIT_NEW_PCONST (cfg, ins, ptr);
8536 case CEE_MONO_VTADDR: {
8537 MonoInst *src_var, *src;
8543 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8544 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8549 case CEE_MONO_NEWOBJ: {
8550 MonoInst *iargs [2];
8552 CHECK_STACK_OVF (1);
8554 token = read32 (ip + 2);
8555 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8556 mono_class_init (klass);
8557 NEW_DOMAINCONST (cfg, iargs [0]);
8558 MONO_ADD_INS (cfg->cbb, iargs [0]);
8559 NEW_CLASSCONST (cfg, iargs [1], klass);
8560 MONO_ADD_INS (cfg->cbb, iargs [1]);
8561 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8563 inline_costs += 10 * num_calls++;
8566 case CEE_MONO_OBJADDR:
8569 MONO_INST_NEW (cfg, ins, OP_MOVE);
8570 ins->dreg = alloc_preg (cfg);
8571 ins->sreg1 = sp [0]->dreg;
8572 ins->type = STACK_MP;
8573 MONO_ADD_INS (cfg->cbb, ins);
8577 case CEE_MONO_LDNATIVEOBJ:
8579 * Similar to LDOBJ, but instead load the unmanaged
8580 * representation of the vtype to the stack.
8585 token = read32 (ip + 2);
8586 klass = mono_method_get_wrapper_data (method, token);
8587 g_assert (klass->valuetype);
8588 mono_class_init (klass);
8591 MonoInst *src, *dest, *temp;
8594 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8595 temp->backend.is_pinvoke = 1;
8596 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8597 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8599 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8600 dest->type = STACK_VTYPE;
8601 dest->klass = klass;
8607 case CEE_MONO_RETOBJ: {
8609 * Same as RET, but return the native representation of a vtype
8612 g_assert (cfg->ret);
8613 g_assert (mono_method_signature (method)->pinvoke);
8618 token = read32 (ip + 2);
8619 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8621 if (!cfg->vret_addr) {
8622 g_assert (cfg->ret_var_is_local);
8624 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8626 EMIT_NEW_RETLOADA (cfg, ins);
8628 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8630 if (sp != stack_start)
8633 MONO_INST_NEW (cfg, ins, OP_BR);
8634 ins->inst_target_bb = end_bblock;
8635 MONO_ADD_INS (bblock, ins);
8636 link_bblock (cfg, bblock, end_bblock);
8637 start_new_bblock = 1;
8641 case CEE_MONO_CISINST:
8642 case CEE_MONO_CCASTCLASS: {
8647 token = read32 (ip + 2);
8648 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8649 if (ip [1] == CEE_MONO_CISINST)
8650 ins = handle_cisinst (cfg, klass, sp [0]);
8652 ins = handle_ccastclass (cfg, klass, sp [0]);
8658 case CEE_MONO_SAVE_LMF:
8659 case CEE_MONO_RESTORE_LMF:
8660 #ifdef MONO_ARCH_HAVE_LMF_OPS
8661 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8662 MONO_ADD_INS (bblock, ins);
8663 cfg->need_lmf_area = TRUE;
8667 case CEE_MONO_CLASSCONST:
8668 CHECK_STACK_OVF (1);
8670 token = read32 (ip + 2);
8671 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8674 inline_costs += 10 * num_calls++;
8676 case CEE_MONO_NOT_TAKEN:
8677 bblock->out_of_line = TRUE;
8681 CHECK_STACK_OVF (1);
8683 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8684 ins->dreg = alloc_preg (cfg);
8685 ins->inst_offset = (gint32)read32 (ip + 2);
8686 ins->type = STACK_PTR;
8687 MONO_ADD_INS (bblock, ins);
8692 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8702 /* somewhat similar to LDTOKEN */
8703 MonoInst *addr, *vtvar;
8704 CHECK_STACK_OVF (1);
8705 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8707 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8708 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8710 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8711 ins->type = STACK_VTYPE;
8712 ins->klass = mono_defaults.argumenthandle_class;
8725 * The following transforms:
8726 * CEE_CEQ into OP_CEQ
8727 * CEE_CGT into OP_CGT
8728 * CEE_CGT_UN into OP_CGT_UN
8729 * CEE_CLT into OP_CLT
8730 * CEE_CLT_UN into OP_CLT_UN
8732 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8734 MONO_INST_NEW (cfg, ins, cmp->opcode);
8736 cmp->sreg1 = sp [0]->dreg;
8737 cmp->sreg2 = sp [1]->dreg;
8738 type_from_op (cmp, sp [0], sp [1]);
8740 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8741 cmp->opcode = OP_LCOMPARE;
8742 else if (sp [0]->type == STACK_R8)
8743 cmp->opcode = OP_FCOMPARE;
8745 cmp->opcode = OP_ICOMPARE;
8746 MONO_ADD_INS (bblock, cmp);
8747 ins->type = STACK_I4;
8748 ins->dreg = alloc_dreg (cfg, ins->type);
8749 type_from_op (ins, sp [0], sp [1]);
8751 if (cmp->opcode == OP_FCOMPARE) {
8753 * The backends expect the fceq opcodes to do the
8756 cmp->opcode = OP_NOP;
8757 ins->sreg1 = cmp->sreg1;
8758 ins->sreg2 = cmp->sreg2;
8760 MONO_ADD_INS (bblock, ins);
8767 MonoMethod *cil_method, *ctor_method;
8768 gboolean is_shared = FALSE;
8770 CHECK_STACK_OVF (1);
8772 n = read32 (ip + 2);
8773 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8776 mono_class_init (cmethod->klass);
8778 mono_save_token_info (cfg, image, n, cmethod);
8780 if (cfg->generic_sharing_context)
8781 context_used = mono_method_check_context_used (cmethod);
8783 if (mono_class_generic_sharing_enabled (cmethod->klass)) {
8784 if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8785 (cmethod->klass->generic_class ||
8786 cmethod->klass->generic_container)) {
8789 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
8793 cil_method = cmethod;
8794 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8795 METHOD_ACCESS_FAILURE;
8797 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8798 if (check_linkdemand (cfg, method, cmethod))
8800 CHECK_CFG_EXCEPTION;
8801 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8802 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8806 * Optimize the common case of ldftn+delegate creation
8808 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8809 /* FIXME: SGEN support */
8810 /* FIXME: handle shared static generic methods */
8811 /* FIXME: handle this in shared code */
8812 if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8813 MonoInst *target_ins;
8816 if (cfg->verbose_level > 3)
8817 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8818 target_ins = sp [-1];
8820 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8831 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8833 EMIT_GET_RGCTX (rgctx, context_used);
8834 argconst = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8835 } else if (is_shared) {
8836 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8838 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8840 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8844 inline_costs += 10 * num_calls++;
8847 case CEE_LDVIRTFTN: {
8852 n = read32 (ip + 2);
8853 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8856 mono_class_init (cmethod->klass);
8858 if (cfg->generic_sharing_context)
8859 context_used = mono_method_check_context_used (cmethod);
8861 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8862 if (check_linkdemand (cfg, method, cmethod))
8864 CHECK_CFG_EXCEPTION;
8865 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8866 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8875 EMIT_GET_RGCTX (rgctx, context_used);
8876 args [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
8877 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8879 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8880 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8884 inline_costs += 10 * num_calls++;
8888 CHECK_STACK_OVF (1);
8890 n = read16 (ip + 2);
8892 EMIT_NEW_ARGLOAD (cfg, ins, n);
8897 CHECK_STACK_OVF (1);
8899 n = read16 (ip + 2);
8901 NEW_ARGLOADA (cfg, ins, n);
8902 MONO_ADD_INS (cfg->cbb, ins);
8910 n = read16 (ip + 2);
8912 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8914 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8918 CHECK_STACK_OVF (1);
8920 n = read16 (ip + 2);
8922 EMIT_NEW_LOCLOAD (cfg, ins, n);
8927 CHECK_STACK_OVF (1);
8929 n = read16 (ip + 2);
8931 EMIT_NEW_LOCLOADA (cfg, ins, n);
8939 n = read16 (ip + 2);
8941 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8943 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8950 if (sp != stack_start)
8952 if (cfg->method != method)
8954 * Inlining this into a loop in a parent could lead to
8955 * stack overflows which is different behavior than the
8956 * non-inlined case, thus disable inlining in this case.
8958 goto inline_failure;
8960 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8961 ins->dreg = alloc_preg (cfg);
8962 ins->sreg1 = sp [0]->dreg;
8963 ins->type = STACK_PTR;
8964 MONO_ADD_INS (cfg->cbb, ins);
8966 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8967 if (header->init_locals)
8968 ins->flags |= MONO_INST_INIT;
8973 case CEE_ENDFILTER: {
8974 MonoExceptionClause *clause, *nearest;
8975 int cc, nearest_num;
8979 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8981 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8982 ins->sreg1 = (*sp)->dreg;
8983 MONO_ADD_INS (bblock, ins);
8984 start_new_bblock = 1;
8989 for (cc = 0; cc < header->num_clauses; ++cc) {
8990 clause = &header->clauses [cc];
8991 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8992 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8993 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8999 if ((ip - header->code) != nearest->handler_offset)
9004 case CEE_UNALIGNED_:
9005 ins_flag |= MONO_INST_UNALIGNED;
9006 /* FIXME: record alignment? we can assume 1 for now */
9011 ins_flag |= MONO_INST_VOLATILE;
9015 ins_flag |= MONO_INST_TAILCALL;
9016 cfg->flags |= MONO_CFG_HAS_TAIL;
9017 /* Can't inline tail calls at this time */
9018 inline_costs += 100000;
9025 token = read32 (ip + 2);
9026 klass = mini_get_class (method, token, generic_context);
9027 CHECK_TYPELOAD (klass);
9028 if (generic_class_is_reference_type (cfg, klass)) {
9029 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9031 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
9032 mini_emit_initobj (cfg, *sp, NULL, klass);
9037 case CEE_CONSTRAINED_:
9039 token = read32 (ip + 2);
9040 constrained_call = mono_class_get_full (image, token, generic_context);
9041 CHECK_TYPELOAD (constrained_call);
9046 MonoInst *iargs [3];
9050 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9051 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9052 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9053 /* emit_memset only works when val == 0 */
9054 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9059 if (ip [1] == CEE_CPBLK) {
9060 MonoMethod *memcpy_method = get_memcpy_method ();
9061 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9063 MonoMethod *memset_method = get_memset_method ();
9064 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9074 ins_flag |= MONO_INST_NOTYPECHECK;
9076 ins_flag |= MONO_INST_NORANGECHECK;
9077 /* we ignore the no-nullcheck for now since we
9078 * really do it explicitly only when doing callvirt->call
9084 int handler_offset = -1;
9086 for (i = 0; i < header->num_clauses; ++i) {
9087 MonoExceptionClause *clause = &header->clauses [i];
9088 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9089 handler_offset = clause->handler_offset;
9094 bblock->flags |= BB_EXCEPTION_UNSAFE;
9096 g_assert (handler_offset != -1);
9098 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9099 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9100 ins->sreg1 = load->dreg;
9101 MONO_ADD_INS (bblock, ins);
9103 link_bblock (cfg, bblock, end_bblock);
9104 start_new_bblock = 1;
9112 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9114 CHECK_STACK_OVF (1);
9116 token = read32 (ip + 2);
9117 /* FIXXME: handle generics. */
9118 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9119 MonoType *type = mono_type_create_from_typespec (image, token);
9120 token = mono_type_size (type, &ialign);
9122 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9123 CHECK_TYPELOAD (klass);
9124 mono_class_init (klass);
9125 token = mono_class_value_size (klass, &align);
9127 EMIT_NEW_ICONST (cfg, ins, token);
9132 case CEE_REFANYTYPE: {
9133 MonoInst *src_var, *src;
9139 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9141 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9142 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9143 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9153 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9158 g_error ("opcode 0x%02x not handled", *ip);
9161 if (start_new_bblock != 1)
9164 bblock->cil_length = ip - bblock->cil_code;
9165 bblock->next_bb = end_bblock;
9167 if (cfg->method == method && cfg->domainvar) {
9169 MonoInst *get_domain;
9171 cfg->cbb = init_localsbb;
9173 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9174 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9177 get_domain->dreg = alloc_preg (cfg);
9178 MONO_ADD_INS (cfg->cbb, get_domain);
9180 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9181 MONO_ADD_INS (cfg->cbb, store);
9184 if (cfg->method == method && cfg->got_var)
9185 mono_emit_load_got_addr (cfg);
9187 if (header->init_locals) {
9190 cfg->cbb = init_localsbb;
9191 cfg->ip = header->code;
9192 for (i = 0; i < header->num_locals; ++i) {
9193 MonoType *ptype = header->locals [i];
9194 int t = ptype->type;
9195 dreg = cfg->locals [i]->dreg;
9197 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9198 t = ptype->data.klass->enum_basetype->type;
9200 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9201 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9202 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9203 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9204 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9205 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9206 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9207 ins->type = STACK_R8;
9208 ins->inst_p0 = (void*)&r8_0;
9209 ins->dreg = alloc_dreg (cfg, STACK_R8);
9210 MONO_ADD_INS (init_localsbb, ins);
9211 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9212 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9213 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9214 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9216 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9223 if (cfg->method == method) {
9225 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9226 bb->region = mono_find_block_region (cfg, bb->real_offset);
9228 mono_create_spvar_for_region (cfg, bb->region);
9229 if (cfg->verbose_level > 2)
9230 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9234 g_slist_free (class_inits);
9235 dont_inline = g_list_remove (dont_inline, method);
9237 if (inline_costs < 0) {
9240 /* Method is too large */
9241 mname = mono_method_full_name (method, TRUE);
9242 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9243 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9248 if ((cfg->verbose_level > 1) && (cfg->method == method))
9249 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9251 return inline_costs;
9254 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9255 g_slist_free (class_inits);
9256 dont_inline = g_list_remove (dont_inline, method);
9260 g_slist_free (class_inits);
9261 dont_inline = g_list_remove (dont_inline, method);
9265 g_slist_free (class_inits);
9266 dont_inline = g_list_remove (dont_inline, method);
9267 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9271 g_slist_free (class_inits);
9272 dont_inline = g_list_remove (dont_inline, method);
9273 set_exception_type_from_invalid_il (cfg, method, ip);
9278 store_membase_reg_to_store_membase_imm (int opcode)
9281 case OP_STORE_MEMBASE_REG:
9282 return OP_STORE_MEMBASE_IMM;
9283 case OP_STOREI1_MEMBASE_REG:
9284 return OP_STOREI1_MEMBASE_IMM;
9285 case OP_STOREI2_MEMBASE_REG:
9286 return OP_STOREI2_MEMBASE_IMM;
9287 case OP_STOREI4_MEMBASE_REG:
9288 return OP_STOREI4_MEMBASE_IMM;
9289 case OP_STOREI8_MEMBASE_REG:
9290 return OP_STOREI8_MEMBASE_IMM;
9292 g_assert_not_reached ();
9298 #endif /* DISABLE_JIT */
9301 mono_op_to_op_imm (int opcode)
9311 return OP_IDIV_UN_IMM;
9315 return OP_IREM_UN_IMM;
9329 return OP_ISHR_UN_IMM;
9346 return OP_LSHR_UN_IMM;
9349 return OP_COMPARE_IMM;
9351 return OP_ICOMPARE_IMM;
9353 return OP_LCOMPARE_IMM;
9355 case OP_STORE_MEMBASE_REG:
9356 return OP_STORE_MEMBASE_IMM;
9357 case OP_STOREI1_MEMBASE_REG:
9358 return OP_STOREI1_MEMBASE_IMM;
9359 case OP_STOREI2_MEMBASE_REG:
9360 return OP_STOREI2_MEMBASE_IMM;
9361 case OP_STOREI4_MEMBASE_REG:
9362 return OP_STOREI4_MEMBASE_IMM;
9364 #if defined(__i386__) || defined (__x86_64__)
9366 return OP_X86_PUSH_IMM;
9367 case OP_X86_COMPARE_MEMBASE_REG:
9368 return OP_X86_COMPARE_MEMBASE_IMM;
9370 #if defined(__x86_64__)
9371 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9372 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9374 case OP_VOIDCALL_REG:
9383 return OP_LOCALLOC_IMM;
9390 ldind_to_load_membase (int opcode)
9394 return OP_LOADI1_MEMBASE;
9396 return OP_LOADU1_MEMBASE;
9398 return OP_LOADI2_MEMBASE;
9400 return OP_LOADU2_MEMBASE;
9402 return OP_LOADI4_MEMBASE;
9404 return OP_LOADU4_MEMBASE;
9406 return OP_LOAD_MEMBASE;
9408 return OP_LOAD_MEMBASE;
9410 return OP_LOADI8_MEMBASE;
9412 return OP_LOADR4_MEMBASE;
9414 return OP_LOADR8_MEMBASE;
9416 g_assert_not_reached ();
9423 stind_to_store_membase (int opcode)
9427 return OP_STOREI1_MEMBASE_REG;
9429 return OP_STOREI2_MEMBASE_REG;
9431 return OP_STOREI4_MEMBASE_REG;
9434 return OP_STORE_MEMBASE_REG;
9436 return OP_STOREI8_MEMBASE_REG;
9438 return OP_STORER4_MEMBASE_REG;
9440 return OP_STORER8_MEMBASE_REG;
9442 g_assert_not_reached ();
9449 mono_load_membase_to_load_mem (int opcode)
9451 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9452 #if defined(__i386__) || defined(__x86_64__)
9454 case OP_LOAD_MEMBASE:
9456 case OP_LOADU1_MEMBASE:
9457 return OP_LOADU1_MEM;
9458 case OP_LOADU2_MEMBASE:
9459 return OP_LOADU2_MEM;
9460 case OP_LOADI4_MEMBASE:
9461 return OP_LOADI4_MEM;
9462 case OP_LOADU4_MEMBASE:
9463 return OP_LOADU4_MEM;
9464 #if SIZEOF_VOID_P == 8
9465 case OP_LOADI8_MEMBASE:
9466 return OP_LOADI8_MEM;
9475 op_to_op_dest_membase (int store_opcode, int opcode)
9477 #if defined(__i386__)
9478 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9483 return OP_X86_ADD_MEMBASE_REG;
9485 return OP_X86_SUB_MEMBASE_REG;
9487 return OP_X86_AND_MEMBASE_REG;
9489 return OP_X86_OR_MEMBASE_REG;
9491 return OP_X86_XOR_MEMBASE_REG;
9494 return OP_X86_ADD_MEMBASE_IMM;
9497 return OP_X86_SUB_MEMBASE_IMM;
9500 return OP_X86_AND_MEMBASE_IMM;
9503 return OP_X86_OR_MEMBASE_IMM;
9506 return OP_X86_XOR_MEMBASE_IMM;
9512 #if defined(__x86_64__)
9513 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9518 return OP_X86_ADD_MEMBASE_REG;
9520 return OP_X86_SUB_MEMBASE_REG;
9522 return OP_X86_AND_MEMBASE_REG;
9524 return OP_X86_OR_MEMBASE_REG;
9526 return OP_X86_XOR_MEMBASE_REG;
9528 return OP_X86_ADD_MEMBASE_IMM;
9530 return OP_X86_SUB_MEMBASE_IMM;
9532 return OP_X86_AND_MEMBASE_IMM;
9534 return OP_X86_OR_MEMBASE_IMM;
9536 return OP_X86_XOR_MEMBASE_IMM;
9538 return OP_AMD64_ADD_MEMBASE_REG;
9540 return OP_AMD64_SUB_MEMBASE_REG;
9542 return OP_AMD64_AND_MEMBASE_REG;
9544 return OP_AMD64_OR_MEMBASE_REG;
9546 return OP_AMD64_XOR_MEMBASE_REG;
9549 return OP_AMD64_ADD_MEMBASE_IMM;
9552 return OP_AMD64_SUB_MEMBASE_IMM;
9555 return OP_AMD64_AND_MEMBASE_IMM;
9558 return OP_AMD64_OR_MEMBASE_IMM;
9561 return OP_AMD64_XOR_MEMBASE_IMM;
9571 op_to_op_store_membase (int store_opcode, int opcode)
9573 #if defined(__i386__) || defined(__x86_64__)
9576 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9577 return OP_X86_SETEQ_MEMBASE;
9579 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9580 return OP_X86_SETNE_MEMBASE;
9588 op_to_op_src1_membase (int load_opcode, int opcode)
9591 /* FIXME: This has sign extension issues */
9593 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9594 return OP_X86_COMPARE_MEMBASE8_IMM;
9597 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9602 return OP_X86_PUSH_MEMBASE;
9603 case OP_COMPARE_IMM:
9604 case OP_ICOMPARE_IMM:
9605 return OP_X86_COMPARE_MEMBASE_IMM;
9608 return OP_X86_COMPARE_MEMBASE_REG;
9613 /* FIXME: This has sign extension issues */
9615 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9616 return OP_X86_COMPARE_MEMBASE8_IMM;
9621 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9622 return OP_X86_PUSH_MEMBASE;
9624 /* FIXME: This only works for 32 bit immediates
9625 case OP_COMPARE_IMM:
9626 case OP_LCOMPARE_IMM:
9627 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9628 return OP_AMD64_COMPARE_MEMBASE_IMM;
9630 case OP_ICOMPARE_IMM:
9631 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9632 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9636 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9637 return OP_AMD64_COMPARE_MEMBASE_REG;
9640 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9641 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9650 op_to_op_src2_membase (int load_opcode, int opcode)
9653 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9659 return OP_X86_COMPARE_REG_MEMBASE;
9661 return OP_X86_ADD_REG_MEMBASE;
9663 return OP_X86_SUB_REG_MEMBASE;
9665 return OP_X86_AND_REG_MEMBASE;
9667 return OP_X86_OR_REG_MEMBASE;
9669 return OP_X86_XOR_REG_MEMBASE;
9676 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9677 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9681 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9682 return OP_AMD64_COMPARE_REG_MEMBASE;
9685 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9686 return OP_X86_ADD_REG_MEMBASE;
9688 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9689 return OP_X86_SUB_REG_MEMBASE;
9691 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9692 return OP_X86_AND_REG_MEMBASE;
9694 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9695 return OP_X86_OR_REG_MEMBASE;
9697 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9698 return OP_X86_XOR_REG_MEMBASE;
9700 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9701 return OP_AMD64_ADD_REG_MEMBASE;
9703 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9704 return OP_AMD64_SUB_REG_MEMBASE;
9706 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9707 return OP_AMD64_AND_REG_MEMBASE;
9709 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9710 return OP_AMD64_OR_REG_MEMBASE;
9712 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9713 return OP_AMD64_XOR_REG_MEMBASE;
9721 mono_op_to_op_imm_noemul (int opcode)
9724 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9729 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9737 return mono_op_to_op_imm (opcode);
9744 * mono_handle_global_vregs:
9746 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9750 mono_handle_global_vregs (MonoCompile *cfg)
9756 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9758 /* Find local vregs used in more than one bb */
9759 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9760 MonoInst *ins = bb->code;
9761 int block_num = bb->block_num;
9763 if (cfg->verbose_level > 1)
9764 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9767 for (; ins; ins = ins->next) {
9768 const char *spec = INS_INFO (ins->opcode);
9769 int regtype, regindex;
9772 if (G_UNLIKELY (cfg->verbose_level > 1))
9773 mono_print_ins (ins);
9775 g_assert (ins->opcode >= MONO_CEE_LAST);
9777 for (regindex = 0; regindex < 3; regindex ++) {
9780 if (regindex == 0) {
9781 regtype = spec [MONO_INST_DEST];
9785 } else if (regindex == 1) {
9786 regtype = spec [MONO_INST_SRC1];
9791 regtype = spec [MONO_INST_SRC2];
9797 #if SIZEOF_VOID_P == 4
9798 if (regtype == 'l') {
9800 * Since some instructions reference the original long vreg,
9801 * and some reference the two component vregs, it is quite hard
9802 * to determine when it needs to be global. So be conservative.
9804 if (!get_vreg_to_inst (cfg, vreg)) {
9805 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9807 if (cfg->verbose_level > 1)
9808 printf ("LONG VREG R%d made global.\n", vreg);
9812 * Make the component vregs volatile since the optimizations can
9813 * get confused otherwise.
9815 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9816 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9820 g_assert (vreg != -1);
9822 prev_bb = vreg_to_bb [vreg];
9824 /* 0 is a valid block num */
9825 vreg_to_bb [vreg] = block_num + 1;
9826 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9827 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9830 if (!get_vreg_to_inst (cfg, vreg)) {
9831 if (G_UNLIKELY (cfg->verbose_level > 1))
9832 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9836 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9839 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9842 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9845 g_assert_not_reached ();
9849 /* Flag as having been used in more than one bb */
9850 vreg_to_bb [vreg] = -1;
9856 /* If a variable is used in only one bblock, convert it into a local vreg */
9857 for (i = 0; i < cfg->num_varinfo; i++) {
9858 MonoInst *var = cfg->varinfo [i];
9859 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9861 switch (var->type) {
9867 #if SIZEOF_VOID_P == 8
9870 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9871 /* Enabling this screws up the fp stack on x86 */
9874 /* Arguments are implicitly global */
9875 /* Putting R4 vars into registers doesn't work currently */
9876 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9878 * Make that the variable's liveness interval doesn't contain a call, since
9879 * that would cause the lvreg to be spilled, making the whole optimization
9882 /* This is too slow for JIT compilation */
9884 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9886 int def_index, call_index, ins_index;
9887 gboolean spilled = FALSE;
9892 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9893 const char *spec = INS_INFO (ins->opcode);
9895 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9896 def_index = ins_index;
9898 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9899 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9900 if (call_index > def_index) {
9906 if (MONO_IS_CALL (ins))
9907 call_index = ins_index;
9917 if (G_UNLIKELY (cfg->verbose_level > 2))
9918 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9919 var->flags |= MONO_INST_IS_DEAD;
9920 cfg->vreg_to_inst [var->dreg] = NULL;
9927 * Compress the varinfo and vars tables so the liveness computation is faster and
9928 * takes up less space.
9931 for (i = 0; i < cfg->num_varinfo; ++i) {
9932 MonoInst *var = cfg->varinfo [i];
9933 if (pos < i && cfg->locals_start == i)
9934 cfg->locals_start = pos;
9935 if (!(var->flags & MONO_INST_IS_DEAD)) {
9937 cfg->varinfo [pos] = cfg->varinfo [i];
9938 cfg->varinfo [pos]->inst_c0 = pos;
9939 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9940 cfg->vars [pos].idx = pos;
9941 #if SIZEOF_VOID_P == 4
9942 if (cfg->varinfo [pos]->type == STACK_I8) {
9943 /* Modify the two component vars too */
9946 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9947 var1->inst_c0 = pos;
9948 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9949 var1->inst_c0 = pos;
9956 cfg->num_varinfo = pos;
9957 if (cfg->locals_start > cfg->num_varinfo)
9958 cfg->locals_start = cfg->num_varinfo;
9962 * mono_spill_global_vars:
9964 * Generate spill code for variables which are not allocated to registers,
9965 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9966 * code is generated which could be optimized by the local optimization passes.
9969 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9974 guint32 *vreg_to_lvreg;
9976 guint32 i, lvregs_len;
9977 gboolean dest_has_lvreg = FALSE;
9978 guint32 stacktypes [128];
9980 *need_local_opts = FALSE;
9982 memset (spec2, 0, sizeof (spec2));
9984 /* FIXME: Move this function to mini.c */
9985 stacktypes ['i'] = STACK_PTR;
9986 stacktypes ['l'] = STACK_I8;
9987 stacktypes ['f'] = STACK_R8;
9989 #if SIZEOF_VOID_P == 4
9990 /* Create MonoInsts for longs */
9991 for (i = 0; i < cfg->num_varinfo; i++) {
9992 MonoInst *ins = cfg->varinfo [i];
9994 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9995 switch (ins->type) {
9996 #ifdef MONO_ARCH_SOFT_FLOAT
10002 g_assert (ins->opcode == OP_REGOFFSET);
10004 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10006 tree->opcode = OP_REGOFFSET;
10007 tree->inst_basereg = ins->inst_basereg;
10008 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10010 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10012 tree->opcode = OP_REGOFFSET;
10013 tree->inst_basereg = ins->inst_basereg;
10014 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10024 /* FIXME: widening and truncation */
10027 * As an optimization, when a variable allocated to the stack is first loaded into
10028 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10029 * the variable again.
10031 orig_next_vreg = cfg->next_vreg;
10032 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10033 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10036 /* Add spill loads/stores */
10037 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10040 if (cfg->verbose_level > 1)
10041 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10043 /* Clear vreg_to_lvreg array */
10044 for (i = 0; i < lvregs_len; i++)
10045 vreg_to_lvreg [lvregs [i]] = 0;
10049 MONO_BB_FOR_EACH_INS (bb, ins) {
10050 const char *spec = INS_INFO (ins->opcode);
10051 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10052 gboolean store, no_lvreg;
10054 if (G_UNLIKELY (cfg->verbose_level > 1))
10055 mono_print_ins (ins);
10057 if (ins->opcode == OP_NOP)
10061 * We handle LDADDR here as well, since it can only be decomposed
10062 * when variable addresses are known.
10064 if (ins->opcode == OP_LDADDR) {
10065 MonoInst *var = ins->inst_p0;
10067 if (var->opcode == OP_VTARG_ADDR) {
10068 /* Happens on SPARC/S390 where vtypes are passed by reference */
10069 MonoInst *vtaddr = var->inst_left;
10070 if (vtaddr->opcode == OP_REGVAR) {
10071 ins->opcode = OP_MOVE;
10072 ins->sreg1 = vtaddr->dreg;
10074 else if (var->inst_left->opcode == OP_REGOFFSET) {
10075 ins->opcode = OP_LOAD_MEMBASE;
10076 ins->inst_basereg = vtaddr->inst_basereg;
10077 ins->inst_offset = vtaddr->inst_offset;
10081 g_assert (var->opcode == OP_REGOFFSET);
10083 ins->opcode = OP_ADD_IMM;
10084 ins->sreg1 = var->inst_basereg;
10085 ins->inst_imm = var->inst_offset;
10088 *need_local_opts = TRUE;
10089 spec = INS_INFO (ins->opcode);
10092 if (ins->opcode < MONO_CEE_LAST) {
10093 mono_print_ins (ins);
10094 g_assert_not_reached ();
10098 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10102 if (MONO_IS_STORE_MEMBASE (ins)) {
10103 tmp_reg = ins->dreg;
10104 ins->dreg = ins->sreg2;
10105 ins->sreg2 = tmp_reg;
10108 spec2 [MONO_INST_DEST] = ' ';
10109 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10110 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10112 } else if (MONO_IS_STORE_MEMINDEX (ins))
10113 g_assert_not_reached ();
10118 if (G_UNLIKELY (cfg->verbose_level > 1))
10119 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10124 regtype = spec [MONO_INST_DEST];
10125 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10128 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10129 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10130 MonoInst *store_ins;
10133 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10135 if (var->opcode == OP_REGVAR) {
10136 ins->dreg = var->dreg;
10137 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10139 * Instead of emitting a load+store, use a _membase opcode.
10141 g_assert (var->opcode == OP_REGOFFSET);
10142 if (ins->opcode == OP_MOVE) {
10145 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10146 ins->inst_basereg = var->inst_basereg;
10147 ins->inst_offset = var->inst_offset;
10150 spec = INS_INFO (ins->opcode);
10154 g_assert (var->opcode == OP_REGOFFSET);
10156 prev_dreg = ins->dreg;
10158 /* Invalidate any previous lvreg for this vreg */
10159 vreg_to_lvreg [ins->dreg] = 0;
10163 #ifdef MONO_ARCH_SOFT_FLOAT
10164 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10166 store_opcode = OP_STOREI8_MEMBASE_REG;
10170 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10172 if (regtype == 'l') {
10173 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10174 mono_bblock_insert_after_ins (bb, ins, store_ins);
10175 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10176 mono_bblock_insert_after_ins (bb, ins, store_ins);
10179 g_assert (store_opcode != OP_STOREV_MEMBASE);
10181 /* Try to fuse the store into the instruction itself */
10182 /* FIXME: Add more instructions */
10183 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10184 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10185 ins->inst_imm = ins->inst_c0;
10186 ins->inst_destbasereg = var->inst_basereg;
10187 ins->inst_offset = var->inst_offset;
10188 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10189 ins->opcode = store_opcode;
10190 ins->inst_destbasereg = var->inst_basereg;
10191 ins->inst_offset = var->inst_offset;
10195 tmp_reg = ins->dreg;
10196 ins->dreg = ins->sreg2;
10197 ins->sreg2 = tmp_reg;
10200 spec2 [MONO_INST_DEST] = ' ';
10201 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10202 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10204 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10205 // FIXME: The backends expect the base reg to be in inst_basereg
10206 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10208 ins->inst_basereg = var->inst_basereg;
10209 ins->inst_offset = var->inst_offset;
10210 spec = INS_INFO (ins->opcode);
10212 /* printf ("INS: "); mono_print_ins (ins); */
10213 /* Create a store instruction */
10214 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10216 /* Insert it after the instruction */
10217 mono_bblock_insert_after_ins (bb, ins, store_ins);
10220 * We can't assign ins->dreg to var->dreg here, since the
10221 * sregs could use it. So set a flag, and do it after
10224 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10225 dest_has_lvreg = TRUE;
10234 for (srcindex = 0; srcindex < 2; ++srcindex) {
10235 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10236 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10238 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10239 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10240 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10241 MonoInst *load_ins;
10242 guint32 load_opcode;
10244 if (var->opcode == OP_REGVAR) {
10246 ins->sreg1 = var->dreg;
10248 ins->sreg2 = var->dreg;
10252 g_assert (var->opcode == OP_REGOFFSET);
10254 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10256 g_assert (load_opcode != OP_LOADV_MEMBASE);
10258 if (vreg_to_lvreg [sreg]) {
10259 /* The variable is already loaded to an lvreg */
10260 if (G_UNLIKELY (cfg->verbose_level > 1))
10261 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10263 ins->sreg1 = vreg_to_lvreg [sreg];
10265 ins->sreg2 = vreg_to_lvreg [sreg];
10269 /* Try to fuse the load into the instruction */
10270 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10271 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10272 ins->inst_basereg = var->inst_basereg;
10273 ins->inst_offset = var->inst_offset;
10274 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10275 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10276 ins->sreg2 = var->inst_basereg;
10277 ins->inst_offset = var->inst_offset;
10279 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10280 ins->opcode = OP_NOP;
10283 //printf ("%d ", srcindex); mono_print_ins (ins);
10285 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10287 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10288 if (var->dreg == prev_dreg) {
10290 * sreg refers to the value loaded by the load
10291 * emitted below, but we need to use ins->dreg
10292 * since it refers to the store emitted earlier.
10296 vreg_to_lvreg [var->dreg] = sreg;
10297 g_assert (lvregs_len < 1024);
10298 lvregs [lvregs_len ++] = var->dreg;
10307 if (regtype == 'l') {
10308 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10309 mono_bblock_insert_before_ins (bb, ins, load_ins);
10310 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10311 mono_bblock_insert_before_ins (bb, ins, load_ins);
10314 #if SIZEOF_VOID_P == 4
10315 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10317 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10318 mono_bblock_insert_before_ins (bb, ins, load_ins);
10324 if (dest_has_lvreg) {
10325 vreg_to_lvreg [prev_dreg] = ins->dreg;
10326 g_assert (lvregs_len < 1024);
10327 lvregs [lvregs_len ++] = prev_dreg;
10328 dest_has_lvreg = FALSE;
10332 tmp_reg = ins->dreg;
10333 ins->dreg = ins->sreg2;
10334 ins->sreg2 = tmp_reg;
10337 if (MONO_IS_CALL (ins)) {
10338 /* Clear vreg_to_lvreg array */
10339 for (i = 0; i < lvregs_len; i++)
10340 vreg_to_lvreg [lvregs [i]] = 0;
10344 if (cfg->verbose_level > 1)
10345 mono_print_ins_index (1, ins);
10352 * - use 'iadd' instead of 'int_add'
10353 * - handling ovf opcodes: decompose in method_to_ir.
10354 * - unify iregs/fregs
10355 * -> partly done, the missing parts are:
10356 * - a more complete unification would involve unifying the hregs as well, so
10357 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10358 * would no longer map to the machine hregs, so the code generators would need to
10359 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10360 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10361 * fp/non-fp branches speeds it up by about 15%.
10362 * - use sext/zext opcodes instead of shifts
10364 * - get rid of TEMPLOADs if possible and use vregs instead
10365 * - clean up usage of OP_P/OP_ opcodes
10366 * - cleanup usage of DUMMY_USE
10367 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10369 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10370 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10371 * - make sure handle_stack_args () is called before the branch is emitted
10372 * - when the new IR is done, get rid of all unused stuff
10373 * - COMPARE/BEQ as separate instructions or unify them ?
10374 * - keeping them separate allows specialized compare instructions like
10375 * compare_imm, compare_membase
10376 * - most back ends unify fp compare+branch, fp compare+ceq
10377 * - integrate handle_stack_args into inline_method
10378 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10379 * - Things to backport to the old JIT:
10380 * - op_atomic_exchange fix for amd64
10381 * - localloc fix for amd64
10382 * - x86 type_token change
10384 * - long eq/ne optimizations
10385 * - handle long shift opts on 32 bit platforms somehow: they require
10386 * 3 sregs (2 for arg1 and 1 for arg2)
10387 * - make byref a 'normal' type.
10388 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10389 * variable if needed.
10390 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10391 * like inline_method.
10392 * - remove inlining restrictions
10393 * - remove mono_save_args.
10394 * - add 'introduce a new optimization to simplify some range checks'
10395 * - fix LNEG and enable cfold of INEG
10396 * - generalize x86 optimizations like ldelema as a peephole optimization
10397 * - add store_mem_imm for amd64
10398 * - optimize the loading of the interruption flag in the managed->native wrappers
10399 * - avoid special handling of OP_NOP in passes
10400 * - move code inserting instructions into one function/macro.
10401 * - cleanup the code replacement in decompose_long_opts ()
10402 * - try a coalescing phase after liveness analysis
10403 * - add float -> vreg conversion + local optimizations on !x86
10404 * - figure out how to handle decomposed branches during optimizations, ie.
10405 * compare+branch, op_jump_table+op_br etc.
10406 * - promote RuntimeXHandles to vregs
10407 * - vtype cleanups:
10408 * - add a NEW_VARLOADA_VREG macro
10409 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10410 * accessing vtype fields.
10411 * - get rid of I8CONST on 64 bit platforms
10412 * - dealing with the increase in code size due to branches created during opcode
10414 * - use extended basic blocks
10415 * - all parts of the JIT
10416 * - handle_global_vregs () && local regalloc
10417 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10418 * - sources of increase in code size:
10421 * - isinst and castclass
10422 * - lvregs not allocated to global registers even if used multiple times
10423 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10425 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10426 * - add all micro optimizations from the old JIT
10427 * - put tree optimizations into the deadce pass
10428 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10429 * specific function.
10430 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10431 * fcompare + branchCC.
10432 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10433 * running generics.exe.
10434 * - create a helper function for allocating a stack slot, taking into account
10435 * MONO_CFG_HAS_SPILLUP.
10436 * - merge new GC changes in mini.c.
10438 * - merge the ia64 switch changes.
10439 * - merge the mips conditional changes.
10440 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10441 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10442 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10443 * - optimize mono_regstate2_alloc_int/float.
10444 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10445 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10446 * parts of the tree could be separated by other instructions, killing the tree
10447 * arguments, or stores killing loads etc. Also, should we fold loads into other
10448 * instructions if the result of the load is used multiple times ?
10449 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10450 * - LAST MERGE: 108395.
10451 * - when returning vtypes in registers, generate IR and append it to the end of the
10452 * last bb instead of doing it in the epilog.
10453 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10454 * ones in inssel.h.
10455 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10463 - When to decompose opcodes:
10464 - earlier: this makes some optimizations hard to implement, since the low level IR
10465 no longer contains the neccessary information. But it is easier to do.
10466 - later: harder to implement, enables more optimizations.
10467 - Branches inside bblocks:
10468 - created when decomposing complex opcodes.
10469 - branches to another bblock: harmless, but not tracked by the branch
10470 optimizations, so need to branch to a label at the start of the bblock.
10471 - branches to inside the same bblock: very problematic, trips up the local
10472 reg allocator. Can be fixed by spitting the current bblock, but that is a
10473 complex operation, since some local vregs can become global vregs etc.
10474 - Local/global vregs:
10475 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10476 local register allocator.
10477 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10478 structure, created by mono_create_var (). Assigned to hregs or the stack by
10479 the global register allocator.
10480 - When to do optimizations like alu->alu_imm:
10481 - earlier -> saves work later on since the IR will be smaller/simpler
10482 - later -> can work on more instructions
10483 - Handling of valuetypes:
10484 - When a vtype is pushed on the stack, a new tempotary is created, an
10485 instruction computing its address (LDADDR) is emitted and pushed on
10486 the stack. Need to optimize cases when the vtype is used immediately as in
10487 argument passing, stloc etc.
10488 - Instead of the to_end stuff in the old JIT, simply call the function handling
10489 the values on the stack before emitting the last instruction of the bb.
10492 #endif /* DISABLE_JIT */