2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 2) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
109 /* Determine whenever 'ins' represents a load of the 'this' argument */
110 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
112 static int ldind_to_load_membase (int opcode);
113 static int stind_to_store_membase (int opcode);
115 int mono_op_to_op_imm (int opcode);
116 int mono_op_to_op_imm_noemul (int opcode);
118 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
119 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
120 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
122 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
123 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
124 guint inline_offset, gboolean is_virtual_call);
126 /* helper methods signature */
127 extern MonoMethodSignature *helper_sig_class_init_trampoline;
128 extern MonoMethodSignature *helper_sig_domain_get;
129 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
130 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
133 * Instruction metadata
138 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
143 #if SIZEOF_VOID_P == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
155 extern GHashTable *jit_icall_name_hash;
157 #define MONO_INIT_VARINFO(vi,id) do { \
158 (vi)->range.first_use.pos.bid = 0xffff; \
164 mono_alloc_ireg (MonoCompile *cfg)
166 return alloc_ireg (cfg);
170 mono_alloc_freg (MonoCompile *cfg)
172 return alloc_freg (cfg);
176 mono_alloc_preg (MonoCompile *cfg)
178 return alloc_preg (cfg);
182 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
184 return alloc_dreg (cfg, stack_type);
188 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
194 switch (type->type) {
197 case MONO_TYPE_BOOLEAN:
209 case MONO_TYPE_FNPTR:
211 case MONO_TYPE_CLASS:
212 case MONO_TYPE_STRING:
213 case MONO_TYPE_OBJECT:
214 case MONO_TYPE_SZARRAY:
215 case MONO_TYPE_ARRAY:
219 #if SIZEOF_VOID_P == 8
228 case MONO_TYPE_VALUETYPE:
229 if (type->data.klass->enumtype) {
230 type = type->data.klass->enum_basetype;
234 case MONO_TYPE_TYPEDBYREF:
236 case MONO_TYPE_GENERICINST:
237 type = &type->data.generic_class->container_class->byval_arg;
241 g_assert (cfg->generic_sharing_context);
244 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
250 mono_print_bb (MonoBasicBlock *bb, const char *msg)
255 printf ("\n%s %d: [IN: ", msg, bb->block_num);
256 for (i = 0; i < bb->in_count; ++i)
257 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
259 for (i = 0; i < bb->out_count; ++i)
260 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
262 for (tree = bb->code; tree; tree = tree->next)
263 mono_print_ins_index (-1, tree);
267 * Can't put this at the beginning, since other files reference stuff from this
272 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
274 #define GET_BBLOCK(cfg,tblock,ip) do { \
275 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
277 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
278 NEW_BBLOCK (cfg, (tblock)); \
279 (tblock)->cil_code = (ip); \
280 ADD_BBLOCK (cfg, (tblock)); \
284 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
285 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
286 int _length_reg = alloc_ireg (cfg); \
287 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
288 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
289 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
293 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
294 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
295 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
298 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
299 ins->sreg1 = array_reg; \
300 ins->sreg2 = index_reg; \
301 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
302 MONO_ADD_INS ((cfg)->cbb, ins); \
303 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
304 (cfg)->cbb->has_array_access = TRUE; \
308 #if defined(__i386__) || defined(__x86_64__)
309 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
310 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
311 (dest)->dreg = alloc_preg ((cfg)); \
312 (dest)->sreg1 = (sr1); \
313 (dest)->sreg2 = (sr2); \
314 (dest)->inst_imm = (imm); \
315 (dest)->backend.shift_amount = (shift); \
316 MONO_ADD_INS ((cfg)->cbb, (dest)); \
320 #if SIZEOF_VOID_P == 8
321 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
322 /* FIXME: Need to add many more cases */ \
323 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
325 int dr = alloc_preg (cfg); \
326 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
327 (ins)->sreg2 = widen->dreg; \
331 #define ADD_WIDEN_OP(ins, arg1, arg2)
334 #define ADD_BINOP(op) do { \
335 MONO_INST_NEW (cfg, ins, (op)); \
337 ins->sreg1 = sp [0]->dreg; \
338 ins->sreg2 = sp [1]->dreg; \
339 type_from_op (ins, sp [0], sp [1]); \
341 /* Have to insert a widening op */ \
342 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
343 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
344 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 mono_decompose_opcode ((cfg), (ins)); \
349 #define ADD_UNOP(op) do { \
350 MONO_INST_NEW (cfg, ins, (op)); \
352 ins->sreg1 = sp [0]->dreg; \
353 type_from_op (ins, sp [0], NULL); \
355 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
356 MONO_ADD_INS ((cfg)->cbb, (ins)); \
358 mono_decompose_opcode (cfg, ins); \
361 #define ADD_BINCOND(next_block) do { \
364 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
365 cmp->sreg1 = sp [0]->dreg; \
366 cmp->sreg2 = sp [1]->dreg; \
367 type_from_op (cmp, sp [0], sp [1]); \
369 type_from_op (ins, sp [0], sp [1]); \
370 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
371 GET_BBLOCK (cfg, tblock, target); \
372 link_bblock (cfg, bblock, tblock); \
373 ins->inst_true_bb = tblock; \
374 if ((next_block)) { \
375 link_bblock (cfg, bblock, (next_block)); \
376 ins->inst_false_bb = (next_block); \
377 start_new_bblock = 1; \
379 GET_BBLOCK (cfg, tblock, ip); \
380 link_bblock (cfg, bblock, tblock); \
381 ins->inst_false_bb = tblock; \
382 start_new_bblock = 2; \
384 if (sp != stack_start) { \
385 handle_stack_args (cfg, stack_start, sp - stack_start); \
386 CHECK_UNVERIFIABLE (cfg); \
388 MONO_ADD_INS (bblock, cmp); \
389 MONO_ADD_INS (bblock, ins); \
393 * link_bblock: Links two basic blocks
395 * links two basic blocks in the control flow graph, the 'from'
396 * argument is the starting block and the 'to' argument is the block
397 * the control flow ends to after 'from'.
400 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
402 MonoBasicBlock **newa;
406 if (from->cil_code) {
408 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
410 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
413 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
415 printf ("edge from entry to exit\n");
420 for (i = 0; i < from->out_count; ++i) {
421 if (to == from->out_bb [i]) {
427 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
428 for (i = 0; i < from->out_count; ++i) {
429 newa [i] = from->out_bb [i];
437 for (i = 0; i < to->in_count; ++i) {
438 if (from == to->in_bb [i]) {
444 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
445 for (i = 0; i < to->in_count; ++i) {
446 newa [i] = to->in_bb [i];
455 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
457 link_bblock (cfg, from, to);
461 * mono_find_block_region:
463 * We mark each basic block with a region ID. We use that to avoid BB
464 * optimizations when blocks are in different regions.
467 * A region token that encodes where this region is, and information
468 * about the clause owner for this block.
470 * The region encodes the try/catch/filter clause that owns this block
471 * as well as the type. -1 is a special value that represents a block
472 * that is in none of try/catch/filter.
475 mono_find_block_region (MonoCompile *cfg, int offset)
477 MonoMethod *method = cfg->method;
478 MonoMethodHeader *header = mono_method_get_header (method);
479 MonoExceptionClause *clause;
482 /* first search for handlers and filters */
483 for (i = 0; i < header->num_clauses; ++i) {
484 clause = &header->clauses [i];
485 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
486 (offset < (clause->handler_offset)))
487 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
489 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
490 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
491 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
492 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
493 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
495 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
499 /* search the try blocks */
500 for (i = 0; i < header->num_clauses; ++i) {
501 clause = &header->clauses [i];
502 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
503 return ((i + 1) << 8) | clause->flags;
510 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
512 MonoMethod *method = cfg->method;
513 MonoMethodHeader *header = mono_method_get_header (method);
514 MonoExceptionClause *clause;
515 MonoBasicBlock *handler;
519 for (i = 0; i < header->num_clauses; ++i) {
520 clause = &header->clauses [i];
521 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
522 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
523 if (clause->flags == type) {
524 handler = cfg->cil_offset_to_bb [clause->handler_offset];
526 res = g_list_append (res, handler);
534 mono_create_spvar_for_region (MonoCompile *cfg, int region)
538 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
542 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
543 /* prevent it from being register allocated */
544 var->flags |= MONO_INST_INDIRECT;
546 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
550 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
552 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
556 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
560 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
564 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
565 /* prevent it from being register allocated */
566 var->flags |= MONO_INST_INDIRECT;
568 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
574 * Returns the type used in the eval stack when @type is loaded.
575 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
578 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
582 inst->klass = klass = mono_class_from_mono_type (type);
584 inst->type = STACK_MP;
589 switch (type->type) {
591 inst->type = STACK_INV;
595 case MONO_TYPE_BOOLEAN:
601 inst->type = STACK_I4;
606 case MONO_TYPE_FNPTR:
607 inst->type = STACK_PTR;
609 case MONO_TYPE_CLASS:
610 case MONO_TYPE_STRING:
611 case MONO_TYPE_OBJECT:
612 case MONO_TYPE_SZARRAY:
613 case MONO_TYPE_ARRAY:
614 inst->type = STACK_OBJ;
618 inst->type = STACK_I8;
622 inst->type = STACK_R8;
624 case MONO_TYPE_VALUETYPE:
625 if (type->data.klass->enumtype) {
626 type = type->data.klass->enum_basetype;
630 inst->type = STACK_VTYPE;
633 case MONO_TYPE_TYPEDBYREF:
634 inst->klass = mono_defaults.typed_reference_class;
635 inst->type = STACK_VTYPE;
637 case MONO_TYPE_GENERICINST:
638 type = &type->data.generic_class->container_class->byval_arg;
641 case MONO_TYPE_MVAR :
642 /* FIXME: all the arguments must be references for now,
643 * later look inside cfg and see if the arg num is
646 g_assert (cfg->generic_sharing_context);
647 inst->type = STACK_OBJ;
650 g_error ("unknown type 0x%02x in eval stack type", type->type);
655 * The following tables are used to quickly validate the IL code in type_from_op ().
658 bin_num_table [STACK_MAX] [STACK_MAX] = {
659 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
674 /* reduce the size of this table */
676 bin_int_table [STACK_MAX] [STACK_MAX] = {
677 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
678 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
679 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
680 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
681 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
688 bin_comp_table [STACK_MAX] [STACK_MAX] = {
689 /* Inv i L p F & O vt */
691 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
692 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
693 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
694 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
695 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
696 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
697 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
700 /* reduce the size of this table */
702 shift_table [STACK_MAX] [STACK_MAX] = {
703 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
709 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
710 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
714 * Tables to map from the non-specific opcode to the matching
715 * type-specific opcode.
717 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
719 binops_op_map [STACK_MAX] = {
720 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
723 /* handles from CEE_NEG to CEE_CONV_U8 */
725 unops_op_map [STACK_MAX] = {
726 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
729 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
731 ovfops_op_map [STACK_MAX] = {
732 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
735 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
737 ovf2ops_op_map [STACK_MAX] = {
738 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
741 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
743 ovf3ops_op_map [STACK_MAX] = {
744 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
747 /* handles from CEE_BEQ to CEE_BLT_UN */
749 beqops_op_map [STACK_MAX] = {
750 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
753 /* handles from CEE_CEQ to CEE_CLT_UN */
755 ceqops_op_map [STACK_MAX] = {
756 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
760 * Sets ins->type (the type on the eval stack) according to the
761 * type of the opcode and the arguments to it.
762 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
764 * FIXME: this function sets ins->type unconditionally in some cases, but
765 * it should set it to invalid for some types (a conv.x on an object)
768 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
770 switch (ins->opcode) {
777 /* FIXME: check unverifiable args for STACK_MP */
778 ins->type = bin_num_table [src1->type] [src2->type];
779 ins->opcode += binops_op_map [ins->type];
786 ins->type = bin_int_table [src1->type] [src2->type];
787 ins->opcode += binops_op_map [ins->type];
792 ins->type = shift_table [src1->type] [src2->type];
793 ins->opcode += binops_op_map [ins->type];
798 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
799 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
800 ins->opcode = OP_LCOMPARE;
801 else if (src1->type == STACK_R8)
802 ins->opcode = OP_FCOMPARE;
804 ins->opcode = OP_ICOMPARE;
806 case OP_ICOMPARE_IMM:
807 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
808 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
809 ins->opcode = OP_LCOMPARE_IMM;
821 ins->opcode += beqops_op_map [src1->type];
824 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
825 ins->opcode += ceqops_op_map [src1->type];
831 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
832 ins->opcode += ceqops_op_map [src1->type];
836 ins->type = neg_table [src1->type];
837 ins->opcode += unops_op_map [ins->type];
840 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
841 ins->type = src1->type;
843 ins->type = STACK_INV;
844 ins->opcode += unops_op_map [ins->type];
850 ins->type = STACK_I4;
851 ins->opcode += unops_op_map [src1->type];
854 ins->type = STACK_R8;
855 switch (src1->type) {
858 ins->opcode = OP_ICONV_TO_R_UN;
861 ins->opcode = OP_LCONV_TO_R_UN;
865 case CEE_CONV_OVF_I1:
866 case CEE_CONV_OVF_U1:
867 case CEE_CONV_OVF_I2:
868 case CEE_CONV_OVF_U2:
869 case CEE_CONV_OVF_I4:
870 case CEE_CONV_OVF_U4:
871 ins->type = STACK_I4;
872 ins->opcode += ovf3ops_op_map [src1->type];
874 case CEE_CONV_OVF_I_UN:
875 case CEE_CONV_OVF_U_UN:
876 ins->type = STACK_PTR;
877 ins->opcode += ovf2ops_op_map [src1->type];
879 case CEE_CONV_OVF_I1_UN:
880 case CEE_CONV_OVF_I2_UN:
881 case CEE_CONV_OVF_I4_UN:
882 case CEE_CONV_OVF_U1_UN:
883 case CEE_CONV_OVF_U2_UN:
884 case CEE_CONV_OVF_U4_UN:
885 ins->type = STACK_I4;
886 ins->opcode += ovf2ops_op_map [src1->type];
889 ins->type = STACK_PTR;
890 switch (src1->type) {
892 ins->opcode = OP_MOVE;
896 #if SIZEOF_VOID_P == 8
897 ins->opcode = OP_LCONV_TO_U;
899 ins->opcode = OP_MOVE;
903 ins->opcode = OP_LCONV_TO_U;
906 ins->opcode = OP_FCONV_TO_U;
912 ins->type = STACK_I8;
913 ins->opcode += unops_op_map [src1->type];
915 case CEE_CONV_OVF_I8:
916 case CEE_CONV_OVF_U8:
917 ins->type = STACK_I8;
918 ins->opcode += ovf3ops_op_map [src1->type];
920 case CEE_CONV_OVF_U8_UN:
921 case CEE_CONV_OVF_I8_UN:
922 ins->type = STACK_I8;
923 ins->opcode += ovf2ops_op_map [src1->type];
927 ins->type = STACK_R8;
928 ins->opcode += unops_op_map [src1->type];
931 ins->type = STACK_R8;
935 ins->type = STACK_I4;
936 ins->opcode += ovfops_op_map [src1->type];
941 ins->type = STACK_PTR;
942 ins->opcode += ovfops_op_map [src1->type];
950 ins->type = bin_num_table [src1->type] [src2->type];
951 ins->opcode += ovfops_op_map [src1->type];
952 if (ins->type == STACK_R8)
953 ins->type = STACK_INV;
955 case OP_LOAD_MEMBASE:
956 ins->type = STACK_PTR;
958 case OP_LOADI1_MEMBASE:
959 case OP_LOADU1_MEMBASE:
960 case OP_LOADI2_MEMBASE:
961 case OP_LOADU2_MEMBASE:
962 case OP_LOADI4_MEMBASE:
963 case OP_LOADU4_MEMBASE:
964 ins->type = STACK_PTR;
966 case OP_LOADI8_MEMBASE:
967 ins->type = STACK_I8;
969 case OP_LOADR4_MEMBASE:
970 case OP_LOADR8_MEMBASE:
971 ins->type = STACK_R8;
974 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
978 if (ins->type == STACK_MP)
979 ins->klass = mono_defaults.object_class;
984 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
990 param_table [STACK_MAX] [STACK_MAX] = {
995 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
999 switch (args->type) {
1009 for (i = 0; i < sig->param_count; ++i) {
1010 switch (args [i].type) {
1014 if (!sig->params [i]->byref)
1018 if (sig->params [i]->byref)
1020 switch (sig->params [i]->type) {
1021 case MONO_TYPE_CLASS:
1022 case MONO_TYPE_STRING:
1023 case MONO_TYPE_OBJECT:
1024 case MONO_TYPE_SZARRAY:
1025 case MONO_TYPE_ARRAY:
1032 if (sig->params [i]->byref)
1034 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1043 /*if (!param_table [args [i].type] [sig->params [i]->type])
1051 * When we need a pointer to the current domain many times in a method, we
1052 * call mono_domain_get() once and we store the result in a local variable.
1053 * This function returns the variable that represents the MonoDomain*.
1055 inline static MonoInst *
1056 mono_get_domainvar (MonoCompile *cfg)
1058 if (!cfg->domainvar)
1059 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1060 return cfg->domainvar;
1064 * The got_var contains the address of the Global Offset Table when AOT
1067 inline static MonoInst *
1068 mono_get_got_var (MonoCompile *cfg)
1070 #ifdef MONO_ARCH_NEED_GOT_VAR
1071 if (!cfg->compile_aot)
1073 if (!cfg->got_var) {
1074 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1076 return cfg->got_var;
1083 mono_get_vtable_var (MonoCompile *cfg)
1085 g_assert (cfg->generic_sharing_context);
1087 if (!cfg->rgctx_var) {
1088 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1089 /* force the var to be stack allocated */
1090 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1093 return cfg->rgctx_var;
1097 type_from_stack_type (MonoInst *ins) {
1098 switch (ins->type) {
1099 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1100 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1101 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1102 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1104 return &ins->klass->this_arg;
1105 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1106 case STACK_VTYPE: return &ins->klass->byval_arg;
1108 g_error ("stack type %d to monotype not handled\n", ins->type);
1113 static G_GNUC_UNUSED int
1114 type_to_stack_type (MonoType *t)
1116 switch (mono_type_get_underlying_type (t)->type) {
1119 case MONO_TYPE_BOOLEAN:
1122 case MONO_TYPE_CHAR:
1129 case MONO_TYPE_FNPTR:
1131 case MONO_TYPE_CLASS:
1132 case MONO_TYPE_STRING:
1133 case MONO_TYPE_OBJECT:
1134 case MONO_TYPE_SZARRAY:
1135 case MONO_TYPE_ARRAY:
1143 case MONO_TYPE_VALUETYPE:
1144 case MONO_TYPE_TYPEDBYREF:
1146 case MONO_TYPE_GENERICINST:
1147 if (mono_type_generic_inst_is_valuetype (t))
1153 g_assert_not_reached ();
1160 array_access_to_klass (int opcode)
1164 return mono_defaults.byte_class;
1166 return mono_defaults.uint16_class;
1169 return mono_defaults.int_class;
1172 return mono_defaults.sbyte_class;
1175 return mono_defaults.int16_class;
1178 return mono_defaults.int32_class;
1180 return mono_defaults.uint32_class;
1183 return mono_defaults.int64_class;
1186 return mono_defaults.single_class;
1189 return mono_defaults.double_class;
1190 case CEE_LDELEM_REF:
1191 case CEE_STELEM_REF:
1192 return mono_defaults.object_class;
1194 g_assert_not_reached ();
1200 * We try to share variables when possible
1203 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1208 /* inlining can result in deeper stacks */
1209 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1210 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1212 pos = ins->type - 1 + slot * STACK_MAX;
1214 switch (ins->type) {
1221 if ((vnum = cfg->intvars [pos]))
1222 return cfg->varinfo [vnum];
1223 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1224 cfg->intvars [pos] = res->inst_c0;
1227 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1233 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1235 if (cfg->compile_aot) {
1236 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1237 jump_info_token->image = image;
1238 jump_info_token->token = token;
1239 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1244 * This function is called to handle items that are left on the evaluation stack
1245 * at basic block boundaries. What happens is that we save the values to local variables
1246 * and we reload them later when first entering the target basic block (with the
1247 * handle_loaded_temps () function).
1248 * A single joint point will use the same variables (stored in the array bb->out_stack or
1249 * bb->in_stack, if the basic block is before or after the joint point).
1251 * This function needs to be called _before_ emitting the last instruction of
1252 * the bb (i.e. before emitting a branch).
1253 * If the stack merge fails at a join point, cfg->unverifiable is set.
1256 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1259 MonoBasicBlock *bb = cfg->cbb;
1260 MonoBasicBlock *outb;
1261 MonoInst *inst, **locals;
1266 if (cfg->verbose_level > 3)
1267 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1268 if (!bb->out_scount) {
1269 bb->out_scount = count;
1270 //printf ("bblock %d has out:", bb->block_num);
1272 for (i = 0; i < bb->out_count; ++i) {
1273 outb = bb->out_bb [i];
1274 /* exception handlers are linked, but they should not be considered for stack args */
1275 if (outb->flags & BB_EXCEPTION_HANDLER)
1277 //printf (" %d", outb->block_num);
1278 if (outb->in_stack) {
1280 bb->out_stack = outb->in_stack;
1286 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1287 for (i = 0; i < count; ++i) {
1289 * try to reuse temps already allocated for this purpouse, if they occupy the same
1290 * stack slot and if they are of the same type.
1291 * This won't cause conflicts since if 'local' is used to
1292 * store one of the values in the in_stack of a bblock, then
1293 * the same variable will be used for the same outgoing stack
1295 * This doesn't work when inlining methods, since the bblocks
1296 * in the inlined methods do not inherit their in_stack from
1297 * the bblock they are inlined to. See bug #58863 for an
1300 if (cfg->inlined_method)
1301 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1303 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1308 for (i = 0; i < bb->out_count; ++i) {
1309 outb = bb->out_bb [i];
1310 /* exception handlers are linked, but they should not be considered for stack args */
1311 if (outb->flags & BB_EXCEPTION_HANDLER)
1313 if (outb->in_scount) {
1314 if (outb->in_scount != bb->out_scount) {
1315 cfg->unverifiable = TRUE;
1318 continue; /* check they are the same locals */
1320 outb->in_scount = count;
1321 outb->in_stack = bb->out_stack;
1324 locals = bb->out_stack;
1326 for (i = 0; i < count; ++i) {
1327 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1328 inst->cil_code = sp [i]->cil_code;
1329 sp [i] = locals [i];
1330 if (cfg->verbose_level > 3)
1331 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1335 * It is possible that the out bblocks already have in_stack assigned, and
1336 * the in_stacks differ. In this case, we will store to all the different
1343 /* Find a bblock which has a different in_stack */
1345 while (bindex < bb->out_count) {
1346 outb = bb->out_bb [bindex];
1347 /* exception handlers are linked, but they should not be considered for stack args */
1348 if (outb->flags & BB_EXCEPTION_HANDLER) {
1352 if (outb->in_stack != locals) {
1353 for (i = 0; i < count; ++i) {
1354 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1355 inst->cil_code = sp [i]->cil_code;
1356 sp [i] = locals [i];
1357 if (cfg->verbose_level > 3)
1358 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1360 locals = outb->in_stack;
1369 /* Emit code which loads interface_offsets [klass->interface_id]
1370 * The array is stored in memory before vtable.
1373 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1375 if (cfg->compile_aot) {
1376 int ioffset_reg = alloc_preg (cfg);
1377 int iid_reg = alloc_preg (cfg);
1379 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1389 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1390 * stored in "klass_reg" implements the interface "klass".
1393 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1395 int ibitmap_reg = alloc_preg (cfg);
1396 int ibitmap_byte_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1400 if (cfg->compile_aot) {
1401 int iid_reg = alloc_preg (cfg);
1402 int shifted_iid_reg = alloc_preg (cfg);
1403 int ibitmap_byte_address_reg = alloc_preg (cfg);
1404 int masked_iid_reg = alloc_preg (cfg);
1405 int iid_one_bit_reg = alloc_preg (cfg);
1406 int iid_bit_reg = alloc_preg (cfg);
1407 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1412 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1414 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1416 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1422 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1423 * stored in "vtable_reg" implements the interface "klass".
1426 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1428 int ibitmap_reg = alloc_preg (cfg);
1429 int ibitmap_byte_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1433 if (cfg->compile_aot) {
1434 int iid_reg = alloc_preg (cfg);
1435 int shifted_iid_reg = alloc_preg (cfg);
1436 int ibitmap_byte_address_reg = alloc_preg (cfg);
1437 int masked_iid_reg = alloc_preg (cfg);
1438 int iid_one_bit_reg = alloc_preg (cfg);
1439 int iid_bit_reg = alloc_preg (cfg);
1440 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1442 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1443 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1445 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1446 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1455 * Emit code which checks whenever the interface id of @klass is smaller than
1456 * than the value given by max_iid_reg.
1459 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 if (cfg->compile_aot) {
1463 int iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1472 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1475 /* Same as above, but obtains max_iid from a vtable */
1477 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 /* Same as above, but obtains max_iid from a klass */
1488 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1489 MonoBasicBlock *false_target)
1491 int max_iid_reg = alloc_preg (cfg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1494 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1498 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1500 int idepth_reg = alloc_preg (cfg);
1501 int stypes_reg = alloc_preg (cfg);
1502 int stype = alloc_preg (cfg);
1504 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1511 if (cfg->compile_aot) {
1512 int const_reg = alloc_preg (cfg);
1513 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1514 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1522 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1524 int intf_reg = alloc_preg (cfg);
1526 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1527 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1528 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1532 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1536 * Variant of the above that takes a register to the class, not the vtable.
1539 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1541 int intf_bit_reg = alloc_preg (cfg);
1543 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1544 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1547 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1549 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1553 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1555 if (cfg->compile_aot) {
1556 int const_reg = alloc_preg (cfg);
1557 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1558 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1562 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1566 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1568 if (cfg->compile_aot) {
1569 int const_reg = alloc_preg (cfg);
1570 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1571 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1579 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1582 int rank_reg = alloc_preg (cfg);
1583 int eclass_reg = alloc_preg (cfg);
1585 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1587 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1588 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1590 if (klass->cast_class == mono_defaults.object_class) {
1591 int parent_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1593 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1594 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1595 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1596 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class) {
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1601 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1603 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1604 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1607 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1608 /* Check that the object is a vector too */
1609 int bounds_reg = alloc_preg (cfg);
1610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1612 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 int idepth_reg = alloc_preg (cfg);
1616 int stypes_reg = alloc_preg (cfg);
1617 int stype = alloc_preg (cfg);
1619 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1620 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1622 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1626 mini_emit_class_check (cfg, stype, klass);
1631 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1635 g_assert (val == 0);
1640 if ((size <= 4) && (size <= align)) {
1643 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1646 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1651 #if SIZEOF_VOID_P == 8
1653 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1659 val_reg = alloc_preg (cfg);
1661 if (sizeof (gpointer) == 8)
1662 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1664 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1667 /* This could be optimized further if neccesary */
1669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1676 #if !NO_UNALIGNED_ACCESS
1677 if (sizeof (gpointer) == 8) {
1679 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1702 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1708 #endif /* DISABLE_JIT */
1711 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1719 /* This could be optimized further if neccesary */
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1730 #if !NO_UNALIGNED_ACCESS
1731 if (sizeof (gpointer) == 8) {
1733 cur_reg = alloc_preg (cfg);
1734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1752 cur_reg = alloc_preg (cfg);
1753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1760 cur_reg = alloc_preg (cfg);
1761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1772 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1775 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1778 type = mini_get_basic_type_from_generic (gsctx, type);
1779 switch (type->type) {
1780 case MONO_TYPE_VOID:
1781 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1784 case MONO_TYPE_BOOLEAN:
1787 case MONO_TYPE_CHAR:
1790 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1794 case MONO_TYPE_FNPTR:
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1796 case MONO_TYPE_CLASS:
1797 case MONO_TYPE_STRING:
1798 case MONO_TYPE_OBJECT:
1799 case MONO_TYPE_SZARRAY:
1800 case MONO_TYPE_ARRAY:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1807 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1808 case MONO_TYPE_VALUETYPE:
1809 if (type->data.klass->enumtype) {
1810 type = type->data.klass->enum_basetype;
1813 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1814 case MONO_TYPE_TYPEDBYREF:
1815 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1816 case MONO_TYPE_GENERICINST:
1817 type = &type->data.generic_class->container_class->byval_arg;
1820 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1826 * target_type_is_incompatible:
1827 * @cfg: MonoCompile context
1829 * Check that the item @arg on the evaluation stack can be stored
1830 * in the target type (can be a local, or field, etc).
1831 * The cfg arg can be used to check if we need verification or just
1834 * Returns: non-0 value if arg can't be stored on a target.
1837 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1839 MonoType *simple_type;
1842 if (target->byref) {
1843 /* FIXME: check that the pointed to types match */
1844 if (arg->type == STACK_MP)
1845 return arg->klass != mono_class_from_mono_type (target);
1846 if (arg->type == STACK_PTR)
1851 simple_type = mono_type_get_underlying_type (target);
1852 switch (simple_type->type) {
1853 case MONO_TYPE_VOID:
1857 case MONO_TYPE_BOOLEAN:
1860 case MONO_TYPE_CHAR:
1863 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1867 /* STACK_MP is needed when setting pinned locals */
1868 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1873 case MONO_TYPE_FNPTR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1877 case MONO_TYPE_CLASS:
1878 case MONO_TYPE_STRING:
1879 case MONO_TYPE_OBJECT:
1880 case MONO_TYPE_SZARRAY:
1881 case MONO_TYPE_ARRAY:
1882 if (arg->type != STACK_OBJ)
1884 /* FIXME: check type compatibility */
1888 if (arg->type != STACK_I8)
1893 if (arg->type != STACK_R8)
1896 case MONO_TYPE_VALUETYPE:
1897 if (arg->type != STACK_VTYPE)
1899 klass = mono_class_from_mono_type (simple_type);
1900 if (klass != arg->klass)
1903 case MONO_TYPE_TYPEDBYREF:
1904 if (arg->type != STACK_VTYPE)
1906 klass = mono_class_from_mono_type (simple_type);
1907 if (klass != arg->klass)
1910 case MONO_TYPE_GENERICINST:
1911 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1919 if (arg->type != STACK_OBJ)
1921 /* FIXME: check type compatibility */
1925 case MONO_TYPE_MVAR:
1926 /* FIXME: all the arguments must be references for now,
1927 * later look inside cfg and see if the arg num is
1928 * really a reference
1930 g_assert (cfg->generic_sharing_context);
1931 if (arg->type != STACK_OBJ)
1935 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1941 * Prepare arguments for passing to a function call.
1942 * Return a non-zero value if the arguments can't be passed to the given
1944 * The type checks are not yet complete and some conversions may need
1945 * casts on 32 or 64 bit architectures.
1947 * FIXME: implement this using target_type_is_incompatible ()
1950 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1952 MonoType *simple_type;
1956 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1960 for (i = 0; i < sig->param_count; ++i) {
1961 if (sig->params [i]->byref) {
1962 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1966 simple_type = sig->params [i];
1967 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1969 switch (simple_type->type) {
1970 case MONO_TYPE_VOID:
1975 case MONO_TYPE_BOOLEAN:
1978 case MONO_TYPE_CHAR:
1981 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1987 case MONO_TYPE_FNPTR:
1988 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1991 case MONO_TYPE_CLASS:
1992 case MONO_TYPE_STRING:
1993 case MONO_TYPE_OBJECT:
1994 case MONO_TYPE_SZARRAY:
1995 case MONO_TYPE_ARRAY:
1996 if (args [i]->type != STACK_OBJ)
2001 if (args [i]->type != STACK_I8)
2006 if (args [i]->type != STACK_R8)
2009 case MONO_TYPE_VALUETYPE:
2010 if (simple_type->data.klass->enumtype) {
2011 simple_type = simple_type->data.klass->enum_basetype;
2014 if (args [i]->type != STACK_VTYPE)
2017 case MONO_TYPE_TYPEDBYREF:
2018 if (args [i]->type != STACK_VTYPE)
2021 case MONO_TYPE_GENERICINST:
2022 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2026 g_error ("unknown type 0x%02x in check_call_signature",
2034 callvirt_to_call (int opcode)
2039 case OP_VOIDCALLVIRT:
2048 g_assert_not_reached ();
2055 callvirt_to_call_membase (int opcode)
2059 return OP_CALL_MEMBASE;
2060 case OP_VOIDCALLVIRT:
2061 return OP_VOIDCALL_MEMBASE;
2063 return OP_FCALL_MEMBASE;
2065 return OP_LCALL_MEMBASE;
2067 return OP_VCALL_MEMBASE;
2069 g_assert_not_reached ();
2075 #ifdef MONO_ARCH_HAVE_IMT
2077 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2079 #ifdef MONO_ARCH_IMT_REG
2080 int method_reg = alloc_preg (cfg);
2083 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2084 } else if (cfg->compile_aot) {
2085 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2088 MONO_INST_NEW (cfg, ins, OP_PCONST);
2089 ins->inst_p0 = call->method;
2090 ins->dreg = method_reg;
2091 MONO_ADD_INS (cfg->cbb, ins);
2094 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2096 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2101 static MonoJumpInfo *
2102 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2104 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2108 ji->data.target = target;
2113 inline static MonoInst*
2114 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2116 inline static MonoCallInst *
2117 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2118 MonoInst **args, int calli, int virtual)
2121 #ifdef MONO_ARCH_SOFT_FLOAT
2125 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2128 call->signature = sig;
2130 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2132 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2133 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2136 temp->backend.is_pinvoke = sig->pinvoke;
2139 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2140 * address of return value to increase optimization opportunities.
2141 * Before vtype decomposition, the dreg of the call ins itself represents the
2142 * fact the call modifies the return value. After decomposition, the call will
2143 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2144 * will be transformed into an LDADDR.
2146 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2147 loada->dreg = alloc_preg (cfg);
2148 loada->inst_p0 = temp;
2149 /* We reference the call too since call->dreg could change during optimization */
2150 loada->inst_p1 = call;
2151 MONO_ADD_INS (cfg->cbb, loada);
2153 call->inst.dreg = temp->dreg;
2155 call->vret_var = loada;
2156 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2157 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2159 #ifdef MONO_ARCH_SOFT_FLOAT
2161 * If the call has a float argument, we would need to do an r8->r4 conversion using
2162 * an icall, but that cannot be done during the call sequence since it would clobber
2163 * the call registers + the stack. So we do it before emitting the call.
2165 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2167 MonoInst *in = call->args [i];
2169 if (i >= sig->hasthis)
2170 t = sig->params [i - sig->hasthis];
2172 t = &mono_defaults.int_class->byval_arg;
2173 t = mono_type_get_underlying_type (t);
2175 if (!t->byref && t->type == MONO_TYPE_R4) {
2176 MonoInst *iargs [1];
2180 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2182 /* The result will be in an int vreg */
2183 call->args [i] = conv;
2188 mono_arch_emit_call (cfg, call);
2190 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2191 cfg->flags |= MONO_CFG_HAS_CALLS;
2196 inline static MonoInst*
2197 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2199 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2201 call->inst.sreg1 = addr->dreg;
2203 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2205 return (MonoInst*)call;
2208 inline static MonoInst*
2209 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2211 #ifdef MONO_ARCH_RGCTX_REG
2216 rgctx_reg = mono_alloc_preg (cfg);
2217 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2219 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2221 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2222 cfg->uses_rgctx_reg = TRUE;
2224 return (MonoInst*)call;
2226 g_assert_not_reached ();
2232 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2233 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2235 gboolean virtual = this != NULL;
2236 gboolean enable_for_aot = TRUE;
2239 if (method->string_ctor) {
2240 /* Create the real signature */
2241 /* FIXME: Cache these */
2242 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2243 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2248 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2250 if (this && sig->hasthis &&
2251 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2252 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2253 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2255 call->method = method;
2257 call->inst.flags |= MONO_INST_HAS_METHOD;
2258 call->inst.inst_left = this;
2261 int vtable_reg, slot_reg, this_reg;
2263 this_reg = this->dreg;
2265 if ((!cfg->compile_aot || enable_for_aot) &&
2266 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2267 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2268 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2270 * the method is not virtual, we just need to ensure this is not null
2271 * and then we can call the method directly.
2273 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2274 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2277 if (!method->string_ctor) {
2278 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2279 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2280 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2283 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2285 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2287 return (MonoInst*)call;
2290 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2291 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2292 /* Make a call to delegate->invoke_impl */
2293 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2294 call->inst.inst_basereg = this_reg;
2295 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2296 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2298 return (MonoInst*)call;
2302 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2303 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2304 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2306 * the method is virtual, but we can statically dispatch since either
2307 * it's class or the method itself are sealed.
2308 * But first we need to ensure it's not a null reference.
2310 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2311 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2312 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2314 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2315 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2317 return (MonoInst*)call;
2320 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2322 /* Initialize method->slot */
2323 mono_class_setup_vtable (method->klass);
2325 vtable_reg = alloc_preg (cfg);
2326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2327 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2329 #ifdef MONO_ARCH_HAVE_IMT
2331 guint32 imt_slot = mono_method_get_imt_slot (method);
2332 emit_imt_argument (cfg, call, imt_arg);
2333 slot_reg = vtable_reg;
2334 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2337 if (slot_reg == -1) {
2338 slot_reg = alloc_preg (cfg);
2339 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2340 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2343 slot_reg = vtable_reg;
2344 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2346 g_assert (mono_method_signature (method)->generic_param_count);
2347 emit_imt_argument (cfg, call, imt_arg);
2351 call->inst.sreg1 = slot_reg;
2352 call->virtual = TRUE;
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2361 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2362 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2369 #ifdef MONO_ARCH_RGCTX_REG
2370 rgctx_reg = mono_alloc_preg (cfg);
2371 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2376 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2378 call = (MonoCallInst*)ins;
2380 #ifdef MONO_ARCH_RGCTX_REG
2381 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2382 cfg->uses_rgctx_reg = TRUE;
2391 static inline MonoInst*
2392 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2394 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2398 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2405 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2413 inline static MonoInst*
2414 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2416 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2420 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2424 * mono_emit_abs_call:
2426 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2428 inline static MonoInst*
2429 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2430 MonoMethodSignature *sig, MonoInst **args)
2432 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2436 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2439 if (cfg->abs_patches == NULL)
2440 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2441 g_hash_table_insert (cfg->abs_patches, ji, ji);
2442 ins = mono_emit_native_call (cfg, ji, sig, args);
2443 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2448 get_memcpy_method (void)
2450 static MonoMethod *memcpy_method = NULL;
2451 if (!memcpy_method) {
2452 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2454 g_error ("Old corlib found. Install a new one");
2456 return memcpy_method;
2460 * Emit code to copy a valuetype of type @klass whose address is stored in
2461 * @src->dreg to memory whose address is stored at @dest->dreg.
2464 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2466 MonoInst *iargs [3];
2469 MonoMethod *memcpy_method;
2473 * This check breaks with spilled vars... need to handle it during verification anyway.
2474 * g_assert (klass && klass == src->klass && klass == dest->klass);
2478 n = mono_class_native_size (klass, &align);
2480 n = mono_class_value_size (klass, &align);
2482 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2483 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2484 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2488 EMIT_NEW_ICONST (cfg, iargs [2], n);
2490 memcpy_method = get_memcpy_method ();
2491 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2496 get_memset_method (void)
2498 static MonoMethod *memset_method = NULL;
2499 if (!memset_method) {
2500 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2502 g_error ("Old corlib found. Install a new one");
2504 return memset_method;
2508 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2510 MonoInst *iargs [3];
2513 MonoMethod *memset_method;
2515 /* FIXME: Optimize this for the case when dest is an LDADDR */
2517 mono_class_init (klass);
2518 n = mono_class_value_size (klass, &align);
2520 if (n <= sizeof (gpointer) * 5) {
2521 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2524 memset_method = get_memset_method ();
2526 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2527 EMIT_NEW_ICONST (cfg, iargs [2], n);
2528 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2533 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2535 MonoInst *this = NULL;
2537 g_assert (cfg->generic_sharing_context);
2539 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2540 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2541 !method->klass->valuetype)
2542 EMIT_NEW_ARGLOAD (cfg, this, 0);
2544 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2545 MonoInst *mrgctx_loc, *mrgctx_var;
2548 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2550 mrgctx_loc = mono_get_vtable_var (cfg);
2551 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2554 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2555 MonoInst *vtable_loc, *vtable_var;
2559 vtable_loc = mono_get_vtable_var (cfg);
2560 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2562 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2563 MonoInst *mrgctx_var = vtable_var;
2566 vtable_reg = alloc_preg (cfg);
2567 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2568 vtable_var->type = STACK_PTR;
2574 int vtable_reg, res_reg;
2576 vtable_reg = alloc_preg (cfg);
2577 res_reg = alloc_preg (cfg);
2578 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2583 static MonoJumpInfoRgctxEntry *
2584 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2586 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2587 res->method = method;
2588 res->in_mrgctx = in_mrgctx;
2589 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2590 res->data->type = patch_type;
2591 res->data->data.target = patch_data;
2592 res->info_type = info_type;
2597 static inline MonoInst*
2598 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2600 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2604 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2605 MonoClass *klass, int rgctx_type)
2607 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2608 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2610 return emit_rgctx_fetch (cfg, rgctx, entry);
2614 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2615 MonoMethod *cmethod, int rgctx_type)
2617 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2618 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2620 return emit_rgctx_fetch (cfg, rgctx, entry);
2624 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2625 MonoClassField *field, int rgctx_type)
2627 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2628 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2630 return emit_rgctx_fetch (cfg, rgctx, entry);
2634 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2636 int vtable_reg = alloc_preg (cfg);
2637 int context_used = 0;
2639 if (cfg->generic_sharing_context)
2640 context_used = mono_class_check_context_used (array_class);
2642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2644 if (cfg->opt & MONO_OPT_SHARED) {
2645 int class_reg = alloc_preg (cfg);
2646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2647 if (cfg->compile_aot) {
2648 int klass_reg = alloc_preg (cfg);
2649 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2650 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2654 } else if (context_used) {
2655 MonoInst *vtable_ins;
2657 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2660 if (cfg->compile_aot) {
2661 int vt_reg = alloc_preg (cfg);
2662 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2663 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2669 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2673 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2675 if (mini_get_debug_options ()->better_cast_details) {
2676 int to_klass_reg = alloc_preg (cfg);
2677 int vtable_reg = alloc_preg (cfg);
2678 int klass_reg = alloc_preg (cfg);
2679 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2682 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2686 MONO_ADD_INS (cfg->cbb, tls_get);
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2691 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2697 reset_cast_details (MonoCompile *cfg)
2699 /* Reset the variables holding the cast details */
2700 if (mini_get_debug_options ()->better_cast_details) {
2701 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2703 MONO_ADD_INS (cfg->cbb, tls_get);
2704 /* It is enough to reset the from field */
2705 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2710 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2711 * generic code is generated.
2714 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2716 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2719 MonoInst *rgctx, *addr;
2721 /* FIXME: What if the class is shared? We might not
2722 have to get the address of the method from the
2724 addr = emit_get_rgctx_method (cfg, context_used, method,
2725 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2727 rgctx = emit_get_rgctx (cfg, method, context_used);
2729 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2731 return mono_emit_method_call (cfg, method, &val, NULL);
2736 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2740 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2741 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2742 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2743 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2745 obj_reg = sp [0]->dreg;
2746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2749 /* FIXME: generics */
2750 g_assert (klass->rank == 0);
2753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2754 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2760 MonoInst *element_class;
2762 /* This assertion is from the unboxcast insn */
2763 g_assert (klass->rank == 0);
2765 element_class = emit_get_rgctx_klass (cfg, context_used,
2766 klass->element_class, MONO_RGCTX_INFO_KLASS);
2768 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2769 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2771 save_cast_details (cfg, klass->element_class, obj_reg);
2772 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2773 reset_cast_details (cfg);
2776 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2777 MONO_ADD_INS (cfg->cbb, add);
2778 add->type = STACK_MP;
2785 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2787 MonoInst *iargs [2];
2790 if (cfg->opt & MONO_OPT_SHARED) {
2791 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2792 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2794 alloc_ftn = mono_object_new;
2795 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2796 /* This happens often in argument checking code, eg. throw new FooException... */
2797 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2798 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2799 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2801 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2802 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2805 if (managed_alloc) {
2806 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2807 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2809 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2811 guint32 lw = vtable->klass->instance_size;
2812 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2813 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2814 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2817 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2821 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2825 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2828 MonoInst *iargs [2];
2829 MonoMethod *managed_alloc = NULL;
2833 FIXME: we cannot get managed_alloc here because we can't get
2834 the class's vtable (because it's not a closed class)
2836 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2837 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2840 if (cfg->opt & MONO_OPT_SHARED) {
2841 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2842 iargs [1] = data_inst;
2843 alloc_ftn = mono_object_new;
2845 if (managed_alloc) {
2846 iargs [0] = data_inst;
2847 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2850 iargs [0] = data_inst;
2851 alloc_ftn = mono_object_new_specific;
2854 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2858 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2860 MonoInst *alloc, *ins;
2862 if (mono_class_is_nullable (klass)) {
2863 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2864 return mono_emit_method_call (cfg, method, &val, NULL);
2867 alloc = handle_alloc (cfg, klass, TRUE);
2869 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2875 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2877 MonoInst *alloc, *ins;
2879 if (mono_class_is_nullable (klass)) {
2880 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2881 /* FIXME: What if the class is shared? We might not
2882 have to get the method address from the RGCTX. */
2883 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2884 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2885 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2887 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2889 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2891 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2898 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2900 MonoBasicBlock *is_null_bb;
2901 int obj_reg = src->dreg;
2902 int vtable_reg = alloc_preg (cfg);
2904 NEW_BBLOCK (cfg, is_null_bb);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2909 save_cast_details (cfg, klass, obj_reg);
2911 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2913 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2915 int klass_reg = alloc_preg (cfg);
2917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2919 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2920 /* the remoting code is broken, access the class for now */
2922 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2928 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2931 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2935 MONO_START_BB (cfg, is_null_bb);
2937 reset_cast_details (cfg);
2943 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2946 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2947 int obj_reg = src->dreg;
2948 int vtable_reg = alloc_preg (cfg);
2949 int res_reg = alloc_preg (cfg);
2951 NEW_BBLOCK (cfg, is_null_bb);
2952 NEW_BBLOCK (cfg, false_bb);
2953 NEW_BBLOCK (cfg, end_bb);
2955 /* Do the assignment at the beginning, so the other assignment can be if converted */
2956 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2957 ins->type = STACK_OBJ;
2960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2963 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2965 /* the is_null_bb target simply copies the input register to the output */
2966 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2968 int klass_reg = alloc_preg (cfg);
2970 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2973 int rank_reg = alloc_preg (cfg);
2974 int eclass_reg = alloc_preg (cfg);
2976 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2978 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2980 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2981 if (klass->cast_class == mono_defaults.object_class) {
2982 int parent_reg = alloc_preg (cfg);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2984 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2985 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2987 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2988 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2989 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2990 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2991 } else if (klass->cast_class == mono_defaults.enum_class) {
2992 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2994 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2995 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2997 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2998 /* Check that the object is a vector too */
2999 int bounds_reg = alloc_preg (cfg);
3000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3005 /* the is_null_bb target simply copies the input register to the output */
3006 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3008 } else if (mono_class_is_nullable (klass)) {
3009 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3010 /* the is_null_bb target simply copies the input register to the output */
3011 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3013 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3014 /* the remoting code is broken, access the class for now */
3016 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3022 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3023 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3025 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3026 /* the is_null_bb target simply copies the input register to the output */
3027 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3032 MONO_START_BB (cfg, false_bb);
3034 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3035 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3037 MONO_START_BB (cfg, is_null_bb);
3039 MONO_START_BB (cfg, end_bb);
3045 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3047 /* This opcode takes as input an object reference and a class, and returns:
3048 0) if the object is an instance of the class,
3049 1) if the object is not instance of the class,
3050 2) if the object is a proxy whose type cannot be determined */
3053 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3054 int obj_reg = src->dreg;
3055 int dreg = alloc_ireg (cfg);
3057 int klass_reg = alloc_preg (cfg);
3059 NEW_BBLOCK (cfg, true_bb);
3060 NEW_BBLOCK (cfg, false_bb);
3061 NEW_BBLOCK (cfg, false2_bb);
3062 NEW_BBLOCK (cfg, end_bb);
3063 NEW_BBLOCK (cfg, no_proxy_bb);
3065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3068 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3069 NEW_BBLOCK (cfg, interface_fail_bb);
3071 tmp_reg = alloc_preg (cfg);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3073 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3074 MONO_START_BB (cfg, interface_fail_bb);
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3077 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3079 tmp_reg = alloc_preg (cfg);
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3084 tmp_reg = alloc_preg (cfg);
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3088 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3089 tmp_reg = alloc_preg (cfg);
3090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3093 tmp_reg = alloc_preg (cfg);
3094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3096 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3098 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3101 MONO_START_BB (cfg, no_proxy_bb);
3103 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3106 MONO_START_BB (cfg, false_bb);
3108 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3111 MONO_START_BB (cfg, false2_bb);
3113 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3116 MONO_START_BB (cfg, true_bb);
3118 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3120 MONO_START_BB (cfg, end_bb);
3123 MONO_INST_NEW (cfg, ins, OP_ICONST);
3125 ins->type = STACK_I4;
3131 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3133 /* This opcode takes as input an object reference and a class, and returns:
3134 0) if the object is an instance of the class,
3135 1) if the object is a proxy whose type cannot be determined
3136 an InvalidCastException exception is thrown otherwhise*/
3139 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3140 int obj_reg = src->dreg;
3141 int dreg = alloc_ireg (cfg);
3142 int tmp_reg = alloc_preg (cfg);
3143 int klass_reg = alloc_preg (cfg);
3145 NEW_BBLOCK (cfg, end_bb);
3146 NEW_BBLOCK (cfg, ok_result_bb);
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3151 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3152 NEW_BBLOCK (cfg, interface_fail_bb);
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3155 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3156 MONO_START_BB (cfg, interface_fail_bb);
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3159 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3161 tmp_reg = alloc_preg (cfg);
3162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3164 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3166 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3167 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3170 NEW_BBLOCK (cfg, no_proxy_bb);
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3174 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3176 tmp_reg = alloc_preg (cfg);
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3180 tmp_reg = alloc_preg (cfg);
3181 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3182 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3183 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3185 NEW_BBLOCK (cfg, fail_1_bb);
3187 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3189 MONO_START_BB (cfg, fail_1_bb);
3191 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3194 MONO_START_BB (cfg, no_proxy_bb);
3196 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3199 MONO_START_BB (cfg, ok_result_bb);
3201 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3203 MONO_START_BB (cfg, end_bb);
3206 MONO_INST_NEW (cfg, ins, OP_ICONST);
3208 ins->type = STACK_I4;
3213 static G_GNUC_UNUSED MonoInst*
3214 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3216 gpointer *trampoline;
3217 MonoInst *obj, *method_ins, *tramp_ins;
3221 obj = handle_alloc (cfg, klass, FALSE);
3223 /* Inline the contents of mono_delegate_ctor */
3225 /* Set target field */
3226 /* Optimize away setting of NULL target */
3227 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3228 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3230 /* Set method field */
3231 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3232 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3235 * To avoid looking up the compiled code belonging to the target method
3236 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3237 * store it, and we fill it after the method has been compiled.
3239 if (!cfg->compile_aot && !method->dynamic) {
3240 MonoInst *code_slot_ins;
3242 domain = mono_domain_get ();
3243 mono_domain_lock (domain);
3244 if (!domain_jit_info (domain)->method_code_hash)
3245 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3246 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3248 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3249 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3251 mono_domain_unlock (domain);
3253 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3254 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3257 /* Set invoke_impl field */
3258 trampoline = mono_create_delegate_trampoline (klass);
3259 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3260 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3262 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3268 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3270 MonoJitICallInfo *info;
3272 /* Need to register the icall so it gets an icall wrapper */
3273 info = mono_get_array_new_va_icall (rank);
3275 cfg->flags |= MONO_CFG_HAS_VARARGS;
3277 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3278 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3282 mono_emit_load_got_addr (MonoCompile *cfg)
3284 MonoInst *getaddr, *dummy_use;
3286 if (!cfg->got_var || cfg->got_var_allocated)
3289 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3290 getaddr->dreg = cfg->got_var->dreg;
3292 /* Add it to the start of the first bblock */
3293 if (cfg->bb_entry->code) {
3294 getaddr->next = cfg->bb_entry->code;
3295 cfg->bb_entry->code = getaddr;
3298 MONO_ADD_INS (cfg->bb_entry, getaddr);
3300 cfg->got_var_allocated = TRUE;
3303 * Add a dummy use to keep the got_var alive, since real uses might
3304 * only be generated by the back ends.
3305 * Add it to end_bblock, so the variable's lifetime covers the whole
3307 * It would be better to make the usage of the got var explicit in all
3308 * cases when the backend needs it (i.e. calls, throw etc.), so this
3309 * wouldn't be needed.
3311 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3312 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3316 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3318 MonoMethodHeader *header = mono_method_get_header (method);
3320 #ifdef MONO_ARCH_SOFT_FLOAT
3321 MonoMethodSignature *sig = mono_method_signature (method);
3325 if (cfg->generic_sharing_context)
3328 #ifdef MONO_ARCH_HAVE_LMF_OPS
3329 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3330 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3331 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3335 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3336 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3337 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3338 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3339 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3340 (method->klass->marshalbyref) ||
3341 !header || header->num_clauses)
3344 /* also consider num_locals? */
3345 /* Do the size check early to avoid creating vtables */
3346 if (getenv ("MONO_INLINELIMIT")) {
3347 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3350 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3354 * if we can initialize the class of the method right away, we do,
3355 * otherwise we don't allow inlining if the class needs initialization,
3356 * since it would mean inserting a call to mono_runtime_class_init()
3357 * inside the inlined code
3359 if (!(cfg->opt & MONO_OPT_SHARED)) {
3360 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3361 if (cfg->run_cctors && method->klass->has_cctor) {
3362 if (!method->klass->runtime_info)
3363 /* No vtable created yet */
3365 vtable = mono_class_vtable (cfg->domain, method->klass);
3368 /* This makes so that inline cannot trigger */
3369 /* .cctors: too many apps depend on them */
3370 /* running with a specific order... */
3371 if (! vtable->initialized)
3373 mono_runtime_class_init (vtable);
3375 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3376 if (!method->klass->runtime_info)
3377 /* No vtable created yet */
3379 vtable = mono_class_vtable (cfg->domain, method->klass);
3382 if (!vtable->initialized)
3387 * If we're compiling for shared code
3388 * the cctor will need to be run at aot method load time, for example,
3389 * or at the end of the compilation of the inlining method.
3391 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3396 * CAS - do not inline methods with declarative security
3397 * Note: this has to be before any possible return TRUE;
3399 if (mono_method_has_declsec (method))
3402 #ifdef MONO_ARCH_SOFT_FLOAT
3404 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3406 for (i = 0; i < sig->param_count; ++i)
3407 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3415 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3417 if (vtable->initialized && !cfg->compile_aot)
3420 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3423 if (!mono_class_needs_cctor_run (vtable->klass, method))
3426 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3427 /* The initialization is already done before the method is called */
3434 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3438 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3440 mono_class_init (klass);
3441 size = mono_class_array_element_size (klass);
3443 mult_reg = alloc_preg (cfg);
3444 array_reg = arr->dreg;
3445 index_reg = index->dreg;
3447 #if SIZEOF_VOID_P == 8
3448 /* The array reg is 64 bits but the index reg is only 32 */
3449 index2_reg = alloc_preg (cfg);
3450 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3452 index2_reg = index_reg;
3455 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3457 #if defined(__i386__) || defined(__x86_64__)
3458 if (size == 1 || size == 2 || size == 4 || size == 8) {
3459 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3461 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3462 ins->type = STACK_PTR;
3468 add_reg = alloc_preg (cfg);
3470 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3471 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3472 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3473 ins->type = STACK_PTR;
3474 MONO_ADD_INS (cfg->cbb, ins);
3479 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3481 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3483 int bounds_reg = alloc_preg (cfg);
3484 int add_reg = alloc_preg (cfg);
3485 int mult_reg = alloc_preg (cfg);
3486 int mult2_reg = alloc_preg (cfg);
3487 int low1_reg = alloc_preg (cfg);
3488 int low2_reg = alloc_preg (cfg);
3489 int high1_reg = alloc_preg (cfg);
3490 int high2_reg = alloc_preg (cfg);
3491 int realidx1_reg = alloc_preg (cfg);
3492 int realidx2_reg = alloc_preg (cfg);
3493 int sum_reg = alloc_preg (cfg);
3498 mono_class_init (klass);
3499 size = mono_class_array_element_size (klass);
3501 index1 = index_ins1->dreg;
3502 index2 = index_ins2->dreg;
3504 /* range checking */
3505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3506 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3509 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3510 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3512 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3514 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3517 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3518 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3520 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3521 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3522 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3524 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3525 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3527 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3528 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3530 ins->type = STACK_MP;
3532 MONO_ADD_INS (cfg->cbb, ins);
3539 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3543 MonoMethod *addr_method;
3546 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3549 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3551 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3552 /* emit_ldelema_2 depends on OP_LMUL */
3553 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3554 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3558 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3559 addr_method = mono_marshal_get_array_address (rank, element_size);
3560 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3566 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3568 MonoInst *ins = NULL;
3570 static MonoClass *runtime_helpers_class = NULL;
3571 if (! runtime_helpers_class)
3572 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3573 "System.Runtime.CompilerServices", "RuntimeHelpers");
3575 if (cmethod->klass == mono_defaults.string_class) {
3576 if (strcmp (cmethod->name, "get_Chars") == 0) {
3577 int dreg = alloc_ireg (cfg);
3578 int index_reg = alloc_preg (cfg);
3579 int mult_reg = alloc_preg (cfg);
3580 int add_reg = alloc_preg (cfg);
3582 #if SIZEOF_VOID_P == 8
3583 /* The array reg is 64 bits but the index reg is only 32 */
3584 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3586 index_reg = args [1]->dreg;
3588 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3590 #if defined(__i386__) || defined(__x86_64__)
3591 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3592 add_reg = ins->dreg;
3593 /* Avoid a warning */
3595 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3599 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3600 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3601 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3603 type_from_op (ins, NULL, NULL);
3605 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3606 int dreg = alloc_ireg (cfg);
3607 /* Decompose later to allow more optimizations */
3608 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3609 ins->type = STACK_I4;
3610 cfg->cbb->has_array_access = TRUE;
3611 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3614 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3615 int mult_reg = alloc_preg (cfg);
3616 int add_reg = alloc_preg (cfg);
3618 /* The corlib functions check for oob already. */
3619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3620 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3621 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3624 } else if (cmethod->klass == mono_defaults.object_class) {
3626 if (strcmp (cmethod->name, "GetType") == 0) {
3627 int dreg = alloc_preg (cfg);
3628 int vt_reg = alloc_preg (cfg);
3629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3630 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3631 type_from_op (ins, NULL, NULL);
3634 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3635 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3636 int dreg = alloc_ireg (cfg);
3637 int t1 = alloc_ireg (cfg);
3639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3640 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3641 ins->type = STACK_I4;
3645 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3646 MONO_INST_NEW (cfg, ins, OP_NOP);
3647 MONO_ADD_INS (cfg->cbb, ins);
3651 } else if (cmethod->klass == mono_defaults.array_class) {
3652 if (cmethod->name [0] != 'g')
3655 if (strcmp (cmethod->name, "get_Rank") == 0) {
3656 int dreg = alloc_ireg (cfg);
3657 int vtable_reg = alloc_preg (cfg);
3658 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3659 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3660 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3661 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3662 type_from_op (ins, NULL, NULL);
3665 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3666 int dreg = alloc_ireg (cfg);
3668 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3669 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3670 type_from_op (ins, NULL, NULL);
3675 } else if (cmethod->klass == runtime_helpers_class) {
3677 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3678 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3682 } else if (cmethod->klass == mono_defaults.thread_class) {
3683 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3684 ins->dreg = alloc_preg (cfg);
3685 ins->type = STACK_OBJ;
3686 MONO_ADD_INS (cfg->cbb, ins);
3688 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3689 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3690 MONO_ADD_INS (cfg->cbb, ins);
3692 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3693 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3694 MONO_ADD_INS (cfg->cbb, ins);
3697 } else if (mini_class_is_system_array (cmethod->klass) &&
3698 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3699 MonoInst *addr, *store, *load;
3700 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3702 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3703 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3704 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3706 } else if (cmethod->klass->image == mono_defaults.corlib &&
3707 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3708 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3711 #if SIZEOF_VOID_P == 8
3712 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3713 /* 64 bit reads are already atomic */
3714 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3715 ins->dreg = mono_alloc_preg (cfg);
3716 ins->inst_basereg = args [0]->dreg;
3717 ins->inst_offset = 0;
3718 MONO_ADD_INS (cfg->cbb, ins);
3722 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3723 if (strcmp (cmethod->name, "Increment") == 0) {
3724 MonoInst *ins_iconst;
3727 if (fsig->params [0]->type == MONO_TYPE_I4)
3728 opcode = OP_ATOMIC_ADD_NEW_I4;
3729 #if SIZEOF_VOID_P == 8
3730 else if (fsig->params [0]->type == MONO_TYPE_I8)
3731 opcode = OP_ATOMIC_ADD_NEW_I8;
3734 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3735 ins_iconst->inst_c0 = 1;
3736 ins_iconst->dreg = mono_alloc_ireg (cfg);
3737 MONO_ADD_INS (cfg->cbb, ins_iconst);
3739 MONO_INST_NEW (cfg, ins, opcode);
3740 ins->dreg = mono_alloc_ireg (cfg);
3741 ins->inst_basereg = args [0]->dreg;
3742 ins->inst_offset = 0;
3743 ins->sreg2 = ins_iconst->dreg;
3744 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3745 MONO_ADD_INS (cfg->cbb, ins);
3747 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3748 MonoInst *ins_iconst;
3751 if (fsig->params [0]->type == MONO_TYPE_I4)
3752 opcode = OP_ATOMIC_ADD_NEW_I4;
3753 #if SIZEOF_VOID_P == 8
3754 else if (fsig->params [0]->type == MONO_TYPE_I8)
3755 opcode = OP_ATOMIC_ADD_NEW_I8;
3758 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3759 ins_iconst->inst_c0 = -1;
3760 ins_iconst->dreg = mono_alloc_ireg (cfg);
3761 MONO_ADD_INS (cfg->cbb, ins_iconst);
3763 MONO_INST_NEW (cfg, ins, opcode);
3764 ins->dreg = mono_alloc_ireg (cfg);
3765 ins->inst_basereg = args [0]->dreg;
3766 ins->inst_offset = 0;
3767 ins->sreg2 = ins_iconst->dreg;
3768 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3769 MONO_ADD_INS (cfg->cbb, ins);
3771 } else if (strcmp (cmethod->name, "Add") == 0) {
3774 if (fsig->params [0]->type == MONO_TYPE_I4)
3775 opcode = OP_ATOMIC_ADD_NEW_I4;
3776 #if SIZEOF_VOID_P == 8
3777 else if (fsig->params [0]->type == MONO_TYPE_I8)
3778 opcode = OP_ATOMIC_ADD_NEW_I8;
3782 MONO_INST_NEW (cfg, ins, opcode);
3783 ins->dreg = mono_alloc_ireg (cfg);
3784 ins->inst_basereg = args [0]->dreg;
3785 ins->inst_offset = 0;
3786 ins->sreg2 = args [1]->dreg;
3787 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3788 MONO_ADD_INS (cfg->cbb, ins);
3791 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3793 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3794 if (strcmp (cmethod->name, "Exchange") == 0) {
3797 if (fsig->params [0]->type == MONO_TYPE_I4)
3798 opcode = OP_ATOMIC_EXCHANGE_I4;
3799 #if SIZEOF_VOID_P == 8
3800 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3801 (fsig->params [0]->type == MONO_TYPE_I) ||
3802 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3803 opcode = OP_ATOMIC_EXCHANGE_I8;
3805 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3806 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3807 opcode = OP_ATOMIC_EXCHANGE_I4;
3812 MONO_INST_NEW (cfg, ins, opcode);
3813 ins->dreg = mono_alloc_ireg (cfg);
3814 ins->inst_basereg = args [0]->dreg;
3815 ins->inst_offset = 0;
3816 ins->sreg2 = args [1]->dreg;
3817 MONO_ADD_INS (cfg->cbb, ins);
3819 switch (fsig->params [0]->type) {
3821 ins->type = STACK_I4;
3825 ins->type = STACK_I8;
3827 case MONO_TYPE_OBJECT:
3828 ins->type = STACK_OBJ;
3831 g_assert_not_reached ();
3834 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3836 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3838 * Can't implement CompareExchange methods this way since they have
3839 * three arguments. We can implement one of the common cases, where the new
3840 * value is a constant.
3842 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3843 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3844 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3845 ins->dreg = alloc_ireg (cfg);
3846 ins->sreg1 = args [0]->dreg;
3847 ins->sreg2 = args [1]->dreg;
3848 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3849 ins->type = STACK_I4;
3850 MONO_ADD_INS (cfg->cbb, ins);
3852 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3854 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3858 } else if (cmethod->klass->image == mono_defaults.corlib) {
3859 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3860 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3861 MONO_INST_NEW (cfg, ins, OP_BREAK);
3862 MONO_ADD_INS (cfg->cbb, ins);
3865 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3866 && strcmp (cmethod->klass->name, "Environment") == 0) {
3867 #ifdef PLATFORM_WIN32
3868 EMIT_NEW_ICONST (cfg, ins, 1);
3870 EMIT_NEW_ICONST (cfg, ins, 0);
3874 } else if (cmethod->klass == mono_defaults.math_class) {
3876 * There is general branches code for Min/Max, but it does not work for
3878 * http://everything2.com/?node_id=1051618
3882 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3886 * This entry point could be used later for arbitrary method
3889 inline static MonoInst*
3890 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3891 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3893 if (method->klass == mono_defaults.string_class) {
3894 /* managed string allocation support */
3895 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3896 MonoInst *iargs [2];
3897 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3898 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3901 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3902 iargs [1] = args [0];
3903 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3910 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3912 MonoInst *store, *temp;
3915 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3916 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3919 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3920 * would be different than the MonoInst's used to represent arguments, and
3921 * the ldelema implementation can't deal with that.
3922 * Solution: When ldelema is used on an inline argument, create a var for
3923 * it, emit ldelema on that var, and emit the saving code below in
3924 * inline_method () if needed.
3926 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3927 cfg->args [i] = temp;
3928 /* This uses cfg->args [i] which is set by the preceeding line */
3929 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3930 store->cil_code = sp [0]->cil_code;
3935 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3936 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3938 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3940 check_inline_called_method_name_limit (MonoMethod *called_method)
3943 static char *limit = NULL;
3945 if (limit == NULL) {
3946 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3948 if (limit_string != NULL)
3949 limit = limit_string;
3951 limit = (char *) "";
3954 if (limit [0] != '\0') {
3955 char *called_method_name = mono_method_full_name (called_method, TRUE);
3957 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3958 g_free (called_method_name);
3960 //return (strncmp_result <= 0);
3961 return (strncmp_result == 0);
3968 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3970 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3973 static char *limit = NULL;
3975 if (limit == NULL) {
3976 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3977 if (limit_string != NULL) {
3978 limit = limit_string;
3980 limit = (char *) "";
3984 if (limit [0] != '\0') {
3985 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3987 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
3988 g_free (caller_method_name);
3990 //return (strncmp_result <= 0);
3991 return (strncmp_result == 0);
3999 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4000 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4002 MonoInst *ins, *rvar = NULL;
4003 MonoMethodHeader *cheader;
4004 MonoBasicBlock *ebblock, *sbblock;
4006 MonoMethod *prev_inlined_method;
4007 MonoInst **prev_locals, **prev_args;
4008 MonoType **prev_arg_types;
4009 guint prev_real_offset;
4010 GHashTable *prev_cbb_hash;
4011 MonoBasicBlock **prev_cil_offset_to_bb;
4012 MonoBasicBlock *prev_cbb;
4013 unsigned char* prev_cil_start;
4014 guint32 prev_cil_offset_to_bb_len;
4015 MonoMethod *prev_current_method;
4016 MonoGenericContext *prev_generic_context;
4018 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4020 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4021 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4024 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4025 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4029 if (cfg->verbose_level > 2)
4030 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4032 if (!cmethod->inline_info) {
4033 mono_jit_stats.inlineable_methods++;
4034 cmethod->inline_info = 1;
4036 /* allocate space to store the return value */
4037 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4038 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4041 /* allocate local variables */
4042 cheader = mono_method_get_header (cmethod);
4043 prev_locals = cfg->locals;
4044 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4045 for (i = 0; i < cheader->num_locals; ++i)
4046 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4048 /* allocate start and end blocks */
4049 /* This is needed so if the inline is aborted, we can clean up */
4050 NEW_BBLOCK (cfg, sbblock);
4051 sbblock->real_offset = real_offset;
4053 NEW_BBLOCK (cfg, ebblock);
4054 ebblock->block_num = cfg->num_bblocks++;
4055 ebblock->real_offset = real_offset;
4057 prev_args = cfg->args;
4058 prev_arg_types = cfg->arg_types;
4059 prev_inlined_method = cfg->inlined_method;
4060 cfg->inlined_method = cmethod;
4061 cfg->ret_var_set = FALSE;
4062 prev_real_offset = cfg->real_offset;
4063 prev_cbb_hash = cfg->cbb_hash;
4064 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4065 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4066 prev_cil_start = cfg->cil_start;
4067 prev_cbb = cfg->cbb;
4068 prev_current_method = cfg->current_method;
4069 prev_generic_context = cfg->generic_context;
4071 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4073 cfg->inlined_method = prev_inlined_method;
4074 cfg->real_offset = prev_real_offset;
4075 cfg->cbb_hash = prev_cbb_hash;
4076 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4077 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4078 cfg->cil_start = prev_cil_start;
4079 cfg->locals = prev_locals;
4080 cfg->args = prev_args;
4081 cfg->arg_types = prev_arg_types;
4082 cfg->current_method = prev_current_method;
4083 cfg->generic_context = prev_generic_context;
4085 if ((costs >= 0 && costs < 60) || inline_allways) {
4086 if (cfg->verbose_level > 2)
4087 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4089 mono_jit_stats.inlined_methods++;
4091 /* always add some code to avoid block split failures */
4092 MONO_INST_NEW (cfg, ins, OP_NOP);
4093 MONO_ADD_INS (prev_cbb, ins);
4095 prev_cbb->next_bb = sbblock;
4096 link_bblock (cfg, prev_cbb, sbblock);
4099 * Get rid of the begin and end bblocks if possible to aid local
4102 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4104 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4105 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4107 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4108 MonoBasicBlock *prev = ebblock->in_bb [0];
4109 mono_merge_basic_blocks (cfg, prev, ebblock);
4117 * If the inlined method contains only a throw, then the ret var is not
4118 * set, so set it to a dummy value.
4120 if (!cfg->ret_var_set) {
4121 static double r8_0 = 0.0;
4123 switch (rvar->type) {
4125 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4128 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4133 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4136 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4137 ins->type = STACK_R8;
4138 ins->inst_p0 = (void*)&r8_0;
4139 ins->dreg = rvar->dreg;
4140 MONO_ADD_INS (cfg->cbb, ins);
4143 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4146 g_assert_not_reached ();
4150 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4155 if (cfg->verbose_level > 2)
4156 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4157 cfg->exception_type = MONO_EXCEPTION_NONE;
4158 mono_loader_clear_error ();
4160 /* This gets rid of the newly added bblocks */
4161 cfg->cbb = prev_cbb;
4167 * Some of these comments may well be out-of-date.
4168 * Design decisions: we do a single pass over the IL code (and we do bblock
4169 * splitting/merging in the few cases when it's required: a back jump to an IL
4170 * address that was not already seen as bblock starting point).
4171 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4172 * Complex operations are decomposed in simpler ones right away. We need to let the
4173 * arch-specific code peek and poke inside this process somehow (except when the
4174 * optimizations can take advantage of the full semantic info of coarse opcodes).
4175 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4176 * MonoInst->opcode initially is the IL opcode or some simplification of that
4177 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4178 * opcode with value bigger than OP_LAST.
4179 * At this point the IR can be handed over to an interpreter, a dumb code generator
4180 * or to the optimizing code generator that will translate it to SSA form.
4182 * Profiling directed optimizations.
4183 * We may compile by default with few or no optimizations and instrument the code
4184 * or the user may indicate what methods to optimize the most either in a config file
4185 * or through repeated runs where the compiler applies offline the optimizations to
4186 * each method and then decides if it was worth it.
4189 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4190 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4191 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4192 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4193 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4194 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4195 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4196 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4198 /* offset from br.s -> br like opcodes */
4199 #define BIG_BRANCH_OFFSET 13
4202 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4204 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4206 return b == NULL || b == bb;
4210 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4212 unsigned char *ip = start;
4213 unsigned char *target;
4216 MonoBasicBlock *bblock;
4217 const MonoOpcode *opcode;
4220 cli_addr = ip - start;
4221 i = mono_opcode_value ((const guint8 **)&ip, end);
4224 opcode = &mono_opcodes [i];
4225 switch (opcode->argument) {
4226 case MonoInlineNone:
4229 case MonoInlineString:
4230 case MonoInlineType:
4231 case MonoInlineField:
4232 case MonoInlineMethod:
4235 case MonoShortInlineR:
4242 case MonoShortInlineVar:
4243 case MonoShortInlineI:
4246 case MonoShortInlineBrTarget:
4247 target = start + cli_addr + 2 + (signed char)ip [1];
4248 GET_BBLOCK (cfg, bblock, target);
4251 GET_BBLOCK (cfg, bblock, ip);
4253 case MonoInlineBrTarget:
4254 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4255 GET_BBLOCK (cfg, bblock, target);
4258 GET_BBLOCK (cfg, bblock, ip);
4260 case MonoInlineSwitch: {
4261 guint32 n = read32 (ip + 1);
4264 cli_addr += 5 + 4 * n;
4265 target = start + cli_addr;
4266 GET_BBLOCK (cfg, bblock, target);
4268 for (j = 0; j < n; ++j) {
4269 target = start + cli_addr + (gint32)read32 (ip);
4270 GET_BBLOCK (cfg, bblock, target);
4280 g_assert_not_reached ();
4283 if (i == CEE_THROW) {
4284 unsigned char *bb_start = ip - 1;
4286 /* Find the start of the bblock containing the throw */
4288 while ((bb_start >= start) && !bblock) {
4289 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4293 bblock->out_of_line = 1;
4302 static inline MonoMethod *
4303 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4307 if (m->wrapper_type != MONO_WRAPPER_NONE)
4308 return mono_method_get_wrapper_data (m, token);
4310 method = mono_get_method_full (m->klass->image, token, klass, context);
4315 static inline MonoMethod *
4316 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4318 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4320 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4326 static inline MonoClass*
4327 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4331 if (method->wrapper_type != MONO_WRAPPER_NONE)
4332 klass = mono_method_get_wrapper_data (method, token);
4334 klass = mono_class_get_full (method->klass->image, token, context);
4336 mono_class_init (klass);
4341 * Returns TRUE if the JIT should abort inlining because "callee"
4342 * is influenced by security attributes.
4345 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4349 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4353 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4354 if (result == MONO_JIT_SECURITY_OK)
4357 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4358 /* Generate code to throw a SecurityException before the actual call/link */
4359 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4362 NEW_ICONST (cfg, args [0], 4);
4363 NEW_METHODCONST (cfg, args [1], caller);
4364 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4365 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4366 /* don't hide previous results */
4367 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4368 cfg->exception_data = result;
4376 method_access_exception (void)
4378 static MonoMethod *method = NULL;
4381 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4382 method = mono_class_get_method_from_name (secman->securitymanager,
4383 "MethodAccessException", 2);
4390 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4391 MonoBasicBlock *bblock, unsigned char *ip)
4393 MonoMethod *thrower = method_access_exception ();
4396 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4397 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4398 mono_emit_method_call (cfg, thrower, args, NULL);
4402 verification_exception (void)
4404 static MonoMethod *method = NULL;
4407 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4408 method = mono_class_get_method_from_name (secman->securitymanager,
4409 "VerificationException", 0);
4416 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4418 MonoMethod *thrower = verification_exception ();
4420 mono_emit_method_call (cfg, thrower, NULL, NULL);
4424 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4425 MonoBasicBlock *bblock, unsigned char *ip)
4427 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4428 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4429 gboolean is_safe = TRUE;
4431 if (!(caller_level >= callee_level ||
4432 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4433 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4438 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4442 method_is_safe (MonoMethod *method)
4445 if (strcmp (method->name, "unsafeMethod") == 0)
4452 * Check that the IL instructions at ip are the array initialization
4453 * sequence and return the pointer to the data and the size.
4456 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4459 * newarr[System.Int32]
4461 * ldtoken field valuetype ...
4462 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4464 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4465 guint32 token = read32 (ip + 7);
4466 guint32 field_token = read32 (ip + 2);
4467 guint32 field_index = field_token & 0xffffff;
4469 const char *data_ptr;
4471 MonoMethod *cmethod;
4472 MonoClass *dummy_class;
4473 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4479 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4482 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4484 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4485 case MONO_TYPE_BOOLEAN:
4489 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4490 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4491 case MONO_TYPE_CHAR:
4501 return NULL; /* stupid ARM FP swapped format */
4511 if (size > mono_type_size (field->type, &dummy_align))
4514 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4515 if (!method->klass->image->dynamic) {
4516 field_index = read32 (ip + 2) & 0xffffff;
4517 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4518 data_ptr = mono_image_rva_map (method->klass->image, rva);
4519 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4520 /* for aot code we do the lookup on load */
4521 if (aot && data_ptr)
4522 return GUINT_TO_POINTER (rva);
4524 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4526 data_ptr = field->data;
4534 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4536 char *method_fname = mono_method_full_name (method, TRUE);
4539 if (mono_method_get_header (method)->code_size == 0)
4540 method_code = g_strdup ("method body is empty.");
4542 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4543 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4544 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4545 g_free (method_fname);
4546 g_free (method_code);
4550 set_exception_object (MonoCompile *cfg, MonoException *exception)
4552 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4553 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4554 cfg->exception_ptr = exception;
4558 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4562 if (cfg->generic_sharing_context)
4563 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4565 type = &klass->byval_arg;
4566 return MONO_TYPE_IS_REFERENCE (type);
4570 * mono_decompose_array_access_opts:
4572 * Decompose array access opcodes.
4573 * This should be in decompose.c, but it emits calls so it has to stay here until
4574 * the old JIT is gone.
4577 mono_decompose_array_access_opts (MonoCompile *cfg)
4579 MonoBasicBlock *bb, *first_bb;
4582 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4583 * can be executed anytime. It should be run before decompose_long
4587 * Create a dummy bblock and emit code into it so we can use the normal
4588 * code generation macros.
4590 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4591 first_bb = cfg->cbb;
4593 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4595 MonoInst *prev = NULL;
4597 MonoInst *iargs [3];
4600 if (!bb->has_array_access)
4603 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4605 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4611 for (ins = bb->code; ins; ins = ins->next) {
4612 switch (ins->opcode) {
4614 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4615 G_STRUCT_OFFSET (MonoArray, max_length));
4616 MONO_ADD_INS (cfg->cbb, dest);
4618 case OP_BOUNDS_CHECK:
4619 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4622 if (cfg->opt & MONO_OPT_SHARED) {
4623 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4624 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4625 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4626 iargs [2]->dreg = ins->sreg1;
4628 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4629 dest->dreg = ins->dreg;
4631 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4634 NEW_VTABLECONST (cfg, iargs [0], vtable);
4635 MONO_ADD_INS (cfg->cbb, iargs [0]);
4636 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4637 iargs [1]->dreg = ins->sreg1;
4639 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4640 dest->dreg = ins->dreg;
4644 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4645 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4646 MONO_ADD_INS (cfg->cbb, dest);
4652 g_assert (cfg->cbb == first_bb);
4654 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4655 /* Replace the original instruction with the new code sequence */
4657 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4658 first_bb->code = first_bb->last_ins = NULL;
4659 first_bb->in_count = first_bb->out_count = 0;
4660 cfg->cbb = first_bb;
4667 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4677 #ifdef MONO_ARCH_SOFT_FLOAT
4680 * mono_decompose_soft_float:
4682 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4683 * similar to long support on 32 bit platforms. 32 bit float values require special
4684 * handling when used as locals, arguments, and in calls.
4685 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4688 mono_decompose_soft_float (MonoCompile *cfg)
4690 MonoBasicBlock *bb, *first_bb;
4693 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4697 * Create a dummy bblock and emit code into it so we can use the normal
4698 * code generation macros.
4700 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4701 first_bb = cfg->cbb;
4703 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4705 MonoInst *prev = NULL;
4708 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4710 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4716 for (ins = bb->code; ins; ins = ins->next) {
4717 const char *spec = INS_INFO (ins->opcode);
4719 /* Most fp operations are handled automatically by opcode emulation */
4721 switch (ins->opcode) {
4724 d.vald = *(double*)ins->inst_p0;
4725 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4730 /* We load the r8 value */
4731 d.vald = *(float*)ins->inst_p0;
4732 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4736 ins->opcode = OP_LMOVE;
4739 ins->opcode = OP_MOVE;
4740 ins->sreg1 = ins->sreg1 + 1;
4743 ins->opcode = OP_MOVE;
4744 ins->sreg1 = ins->sreg1 + 2;
4747 int reg = ins->sreg1;
4749 ins->opcode = OP_SETLRET;
4751 ins->sreg1 = reg + 1;
4752 ins->sreg2 = reg + 2;
4755 case OP_LOADR8_MEMBASE:
4756 ins->opcode = OP_LOADI8_MEMBASE;
4758 case OP_STORER8_MEMBASE_REG:
4759 ins->opcode = OP_STOREI8_MEMBASE_REG;
4761 case OP_STORER4_MEMBASE_REG: {
4762 MonoInst *iargs [2];
4765 /* Arg 1 is the double value */
4766 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4767 iargs [0]->dreg = ins->sreg1;
4769 /* Arg 2 is the address to store to */
4770 addr_reg = mono_alloc_preg (cfg);
4771 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4772 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4776 case OP_LOADR4_MEMBASE: {
4777 MonoInst *iargs [1];
4781 addr_reg = mono_alloc_preg (cfg);
4782 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4783 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4784 conv->dreg = ins->dreg;
4789 case OP_FCALL_MEMBASE: {
4790 MonoCallInst *call = (MonoCallInst*)ins;
4791 if (call->signature->ret->type == MONO_TYPE_R4) {
4792 MonoCallInst *call2;
4793 MonoInst *iargs [1];
4796 /* Convert the call into a call returning an int */
4797 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4798 memcpy (call2, call, sizeof (MonoCallInst));
4799 switch (ins->opcode) {
4801 call2->inst.opcode = OP_CALL;
4804 call2->inst.opcode = OP_CALL_REG;
4806 case OP_FCALL_MEMBASE:
4807 call2->inst.opcode = OP_CALL_MEMBASE;
4810 g_assert_not_reached ();
4812 call2->inst.dreg = mono_alloc_ireg (cfg);
4813 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4815 /* FIXME: Optimize this */
4817 /* Emit an r4->r8 conversion */
4818 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4819 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4820 conv->dreg = ins->dreg;
4822 switch (ins->opcode) {
4824 ins->opcode = OP_LCALL;
4827 ins->opcode = OP_LCALL_REG;
4829 case OP_FCALL_MEMBASE:
4830 ins->opcode = OP_LCALL_MEMBASE;
4833 g_assert_not_reached ();
4839 MonoJitICallInfo *info;
4840 MonoInst *iargs [2];
4841 MonoInst *call, *cmp, *br;
4843 /* Convert fcompare+fbcc to icall+icompare+beq */
4845 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4848 /* Create dummy MonoInst's for the arguments */
4849 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4850 iargs [0]->dreg = ins->sreg1;
4851 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4852 iargs [1]->dreg = ins->sreg2;
4854 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4856 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4857 cmp->sreg1 = call->dreg;
4859 MONO_ADD_INS (cfg->cbb, cmp);
4861 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4862 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4863 br->inst_true_bb = ins->next->inst_true_bb;
4864 br->inst_false_bb = ins->next->inst_false_bb;
4865 MONO_ADD_INS (cfg->cbb, br);
4867 /* The call sequence might include fp ins */
4870 /* Skip fbcc or fccc */
4871 NULLIFY_INS (ins->next);
4879 MonoJitICallInfo *info;
4880 MonoInst *iargs [2];
4883 /* Convert fccc to icall+icompare+iceq */
4885 info = mono_find_jit_opcode_emulation (ins->opcode);
4888 /* Create dummy MonoInst's for the arguments */
4889 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4890 iargs [0]->dreg = ins->sreg1;
4891 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4892 iargs [1]->dreg = ins->sreg2;
4894 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4897 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4899 /* The call sequence might include fp ins */
4904 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4905 mono_print_ins (ins);
4906 g_assert_not_reached ();
4911 g_assert (cfg->cbb == first_bb);
4913 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4914 /* Replace the original instruction with the new code sequence */
4916 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4917 first_bb->code = first_bb->last_ins = NULL;
4918 first_bb->in_count = first_bb->out_count = 0;
4919 cfg->cbb = first_bb;
4926 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4929 mono_decompose_long_opts (cfg);
4935 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4938 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4939 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4940 /* Optimize reg-reg moves away */
4942 * Can't optimize other opcodes, since sp[0] might point to
4943 * the last ins of a decomposed opcode.
4945 sp [0]->dreg = (cfg)->locals [n]->dreg;
4947 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4952 * ldloca inhibits many optimizations so try to get rid of it in common
4955 static inline unsigned char *
4956 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4965 local = read16 (ip + 2);
4969 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4970 gboolean skip = FALSE;
4972 /* From the INITOBJ case */
4973 token = read32 (ip + 2);
4974 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
4975 CHECK_TYPELOAD (klass);
4976 if (generic_class_is_reference_type (cfg, klass)) {
4977 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4978 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
4979 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
4980 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
4981 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
4994 * mono_method_to_ir: translates IL into basic blocks containing trees
4997 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4998 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4999 guint inline_offset, gboolean is_virtual_call)
5001 MonoInst *ins, **sp, **stack_start;
5002 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5003 MonoMethod *cmethod, *method_definition;
5004 MonoInst **arg_array;
5005 MonoMethodHeader *header;
5007 guint32 token, ins_flag;
5009 MonoClass *constrained_call = NULL;
5010 unsigned char *ip, *end, *target, *err_pos;
5011 static double r8_0 = 0.0;
5012 MonoMethodSignature *sig;
5013 MonoGenericContext *generic_context = NULL;
5014 MonoGenericContainer *generic_container = NULL;
5015 MonoType **param_types;
5016 int i, n, start_new_bblock, dreg;
5017 int num_calls = 0, inline_costs = 0;
5018 int breakpoint_id = 0;
5020 MonoBoolean security, pinvoke;
5021 MonoSecurityManager* secman = NULL;
5022 MonoDeclSecurityActions actions;
5023 GSList *class_inits = NULL;
5024 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5027 /* serialization and xdomain stuff may need access to private fields and methods */
5028 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5029 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5030 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5031 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5032 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5033 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5035 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5037 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5038 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5039 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5040 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5042 image = method->klass->image;
5043 header = mono_method_get_header (method);
5044 generic_container = mono_method_get_generic_container (method);
5045 sig = mono_method_signature (method);
5046 num_args = sig->hasthis + sig->param_count;
5047 ip = (unsigned char*)header->code;
5048 cfg->cil_start = ip;
5049 end = ip + header->code_size;
5050 mono_jit_stats.cil_code_size += header->code_size;
5052 method_definition = method;
5053 while (method_definition->is_inflated) {
5054 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5055 method_definition = imethod->declaring;
5058 /* SkipVerification is not allowed if core-clr is enabled */
5059 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5061 dont_verify_stloc = TRUE;
5064 if (!dont_verify && mini_method_verify (cfg, method_definition))
5065 goto exception_exit;
5067 if (sig->is_inflated)
5068 generic_context = mono_method_get_context (method);
5069 else if (generic_container)
5070 generic_context = &generic_container->context;
5071 cfg->generic_context = generic_context;
5073 if (!cfg->generic_sharing_context)
5074 g_assert (!sig->has_type_parameters);
5076 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5077 g_assert (method->is_inflated);
5078 g_assert (mono_method_get_context (method)->method_inst);
5080 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5081 g_assert (sig->generic_param_count);
5083 if (cfg->method == method) {
5084 cfg->real_offset = 0;
5086 cfg->real_offset = inline_offset;
5089 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5090 cfg->cil_offset_to_bb_len = header->code_size;
5092 cfg->current_method = method;
5094 if (cfg->verbose_level > 2)
5095 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5097 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5099 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5100 for (n = 0; n < sig->param_count; ++n)
5101 param_types [n + sig->hasthis] = sig->params [n];
5102 cfg->arg_types = param_types;
5104 dont_inline = g_list_prepend (dont_inline, method);
5105 if (cfg->method == method) {
5107 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5108 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5111 NEW_BBLOCK (cfg, start_bblock);
5112 cfg->bb_entry = start_bblock;
5113 start_bblock->cil_code = NULL;
5114 start_bblock->cil_length = 0;
5117 NEW_BBLOCK (cfg, end_bblock);
5118 cfg->bb_exit = end_bblock;
5119 end_bblock->cil_code = NULL;
5120 end_bblock->cil_length = 0;
5121 g_assert (cfg->num_bblocks == 2);
5123 arg_array = cfg->args;
5125 if (header->num_clauses) {
5126 cfg->spvars = g_hash_table_new (NULL, NULL);
5127 cfg->exvars = g_hash_table_new (NULL, NULL);
5129 /* handle exception clauses */
5130 for (i = 0; i < header->num_clauses; ++i) {
5131 MonoBasicBlock *try_bb;
5132 MonoExceptionClause *clause = &header->clauses [i];
5133 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5134 try_bb->real_offset = clause->try_offset;
5135 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5136 tblock->real_offset = clause->handler_offset;
5137 tblock->flags |= BB_EXCEPTION_HANDLER;
5139 link_bblock (cfg, try_bb, tblock);
5141 if (*(ip + clause->handler_offset) == CEE_POP)
5142 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5144 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5145 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5146 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5147 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5148 MONO_ADD_INS (tblock, ins);
5150 /* todo: is a fault block unsafe to optimize? */
5151 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5152 tblock->flags |= BB_EXCEPTION_UNSAFE;
5156 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5158 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5160 /* catch and filter blocks get the exception object on the stack */
5161 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5162 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5163 MonoInst *dummy_use;
5165 /* mostly like handle_stack_args (), but just sets the input args */
5166 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5167 tblock->in_scount = 1;
5168 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5169 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5172 * Add a dummy use for the exvar so its liveness info will be
5176 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5178 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5179 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5180 tblock->real_offset = clause->data.filter_offset;
5181 tblock->in_scount = 1;
5182 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5183 /* The filter block shares the exvar with the handler block */
5184 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5185 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5186 MONO_ADD_INS (tblock, ins);
5190 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5191 clause->data.catch_class &&
5192 cfg->generic_sharing_context &&
5193 mono_class_check_context_used (clause->data.catch_class)) {
5194 if (mono_method_get_context (method)->method_inst)
5195 GENERIC_SHARING_FAILURE (CEE_NOP);
5198 * In shared generic code with catch
5199 * clauses containing type variables
5200 * the exception handling code has to
5201 * be able to get to the rgctx.
5202 * Therefore we have to make sure that
5203 * the vtable/mrgctx argument (for
5204 * static or generic methods) or the
5205 * "this" argument (for non-static
5206 * methods) are live.
5208 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5209 mini_method_get_context (method)->method_inst ||
5210 method->klass->valuetype) {
5211 mono_get_vtable_var (cfg);
5213 MonoInst *dummy_use;
5215 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5220 arg_array = alloca (sizeof (MonoInst *) * num_args);
5221 cfg->cbb = start_bblock;
5222 cfg->args = arg_array;
5223 mono_save_args (cfg, sig, inline_args);
5226 /* FIRST CODE BLOCK */
5227 NEW_BBLOCK (cfg, bblock);
5228 bblock->cil_code = ip;
5232 ADD_BBLOCK (cfg, bblock);
5234 if (cfg->method == method) {
5235 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5236 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5237 MONO_INST_NEW (cfg, ins, OP_BREAK);
5238 MONO_ADD_INS (bblock, ins);
5242 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5243 secman = mono_security_manager_get_methods ();
5245 security = (secman && mono_method_has_declsec (method));
5246 /* at this point having security doesn't mean we have any code to generate */
5247 if (security && (cfg->method == method)) {
5248 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5249 * And we do not want to enter the next section (with allocation) if we
5250 * have nothing to generate */
5251 security = mono_declsec_get_demands (method, &actions);
5254 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5255 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5257 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5258 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5259 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5261 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5262 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5266 mono_custom_attrs_free (custom);
5269 custom = mono_custom_attrs_from_class (wrapped->klass);
5270 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5274 mono_custom_attrs_free (custom);
5277 /* not a P/Invoke after all */
5282 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5283 /* we use a separate basic block for the initialization code */
5284 NEW_BBLOCK (cfg, init_localsbb);
5285 cfg->bb_init = init_localsbb;
5286 init_localsbb->real_offset = cfg->real_offset;
5287 start_bblock->next_bb = init_localsbb;
5288 init_localsbb->next_bb = bblock;
5289 link_bblock (cfg, start_bblock, init_localsbb);
5290 link_bblock (cfg, init_localsbb, bblock);
5292 cfg->cbb = init_localsbb;
5294 start_bblock->next_bb = bblock;
5295 link_bblock (cfg, start_bblock, bblock);
5298 /* at this point we know, if security is TRUE, that some code needs to be generated */
5299 if (security && (cfg->method == method)) {
5302 mono_jit_stats.cas_demand_generation++;
5304 if (actions.demand.blob) {
5305 /* Add code for SecurityAction.Demand */
5306 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5307 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5308 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5309 mono_emit_method_call (cfg, secman->demand, args, NULL);
5311 if (actions.noncasdemand.blob) {
5312 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5313 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5314 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5315 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5316 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5317 mono_emit_method_call (cfg, secman->demand, args, NULL);
5319 if (actions.demandchoice.blob) {
5320 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5321 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5322 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5323 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5324 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5328 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5330 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5333 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5334 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5335 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5336 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5337 if (!(method->klass && method->klass->image &&
5338 mono_security_core_clr_is_platform_image (method->klass->image))) {
5339 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5343 if (!method_is_safe (method))
5344 emit_throw_verification_exception (cfg, bblock, ip);
5347 if (header->code_size == 0)
5350 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5355 if (cfg->method == method)
5356 mono_debug_init_method (cfg, bblock, breakpoint_id);
5358 for (n = 0; n < header->num_locals; ++n) {
5359 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5364 /* add a check for this != NULL to inlined methods */
5365 if (is_virtual_call) {
5368 NEW_ARGLOAD (cfg, arg_ins, 0);
5369 MONO_ADD_INS (cfg->cbb, arg_ins);
5370 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5371 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5372 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5375 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5376 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5379 start_new_bblock = 0;
5383 if (cfg->method == method)
5384 cfg->real_offset = ip - header->code;
5386 cfg->real_offset = inline_offset;
5391 if (start_new_bblock) {
5392 bblock->cil_length = ip - bblock->cil_code;
5393 if (start_new_bblock == 2) {
5394 g_assert (ip == tblock->cil_code);
5396 GET_BBLOCK (cfg, tblock, ip);
5398 bblock->next_bb = tblock;
5401 start_new_bblock = 0;
5402 for (i = 0; i < bblock->in_scount; ++i) {
5403 if (cfg->verbose_level > 3)
5404 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5405 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5409 g_slist_free (class_inits);
5412 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5413 link_bblock (cfg, bblock, tblock);
5414 if (sp != stack_start) {
5415 handle_stack_args (cfg, stack_start, sp - stack_start);
5417 CHECK_UNVERIFIABLE (cfg);
5419 bblock->next_bb = tblock;
5422 for (i = 0; i < bblock->in_scount; ++i) {
5423 if (cfg->verbose_level > 3)
5424 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5425 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5428 g_slist_free (class_inits);
5433 bblock->real_offset = cfg->real_offset;
5435 if ((cfg->method == method) && cfg->coverage_info) {
5436 guint32 cil_offset = ip - header->code;
5437 cfg->coverage_info->data [cil_offset].cil_code = ip;
5439 /* TODO: Use an increment here */
5440 #if defined(__i386__)
5441 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5442 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5444 MONO_ADD_INS (cfg->cbb, ins);
5446 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5447 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5451 if (cfg->verbose_level > 3)
5452 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5457 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5459 MONO_ADD_INS (bblock, ins);
5465 CHECK_STACK_OVF (1);
5466 n = (*ip)-CEE_LDARG_0;
5468 EMIT_NEW_ARGLOAD (cfg, ins, n);
5476 CHECK_STACK_OVF (1);
5477 n = (*ip)-CEE_LDLOC_0;
5479 EMIT_NEW_LOCLOAD (cfg, ins, n);
5488 n = (*ip)-CEE_STLOC_0;
5491 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5493 emit_stloc_ir (cfg, sp, header, n);
5500 CHECK_STACK_OVF (1);
5503 EMIT_NEW_ARGLOAD (cfg, ins, n);
5509 CHECK_STACK_OVF (1);
5512 NEW_ARGLOADA (cfg, ins, n);
5513 MONO_ADD_INS (cfg->cbb, ins);
5523 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5525 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5530 CHECK_STACK_OVF (1);
5533 EMIT_NEW_LOCLOAD (cfg, ins, n);
5537 case CEE_LDLOCA_S: {
5538 unsigned char *tmp_ip;
5540 CHECK_STACK_OVF (1);
5541 CHECK_LOCAL (ip [1]);
5543 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5549 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5558 CHECK_LOCAL (ip [1]);
5559 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5561 emit_stloc_ir (cfg, sp, header, ip [1]);
5566 CHECK_STACK_OVF (1);
5567 EMIT_NEW_PCONST (cfg, ins, NULL);
5568 ins->type = STACK_OBJ;
5573 CHECK_STACK_OVF (1);
5574 EMIT_NEW_ICONST (cfg, ins, -1);
5587 CHECK_STACK_OVF (1);
5588 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5594 CHECK_STACK_OVF (1);
5596 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5602 CHECK_STACK_OVF (1);
5603 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5609 CHECK_STACK_OVF (1);
5610 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5611 ins->type = STACK_I8;
5612 ins->dreg = alloc_dreg (cfg, STACK_I8);
5614 ins->inst_l = (gint64)read64 (ip);
5615 MONO_ADD_INS (bblock, ins);
5621 /* FIXME: we should really allocate this only late in the compilation process */
5622 mono_domain_lock (cfg->domain);
5623 f = mono_domain_alloc (cfg->domain, sizeof (float));
5624 mono_domain_unlock (cfg->domain);
5626 CHECK_STACK_OVF (1);
5627 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5628 ins->type = STACK_R8;
5629 ins->dreg = alloc_dreg (cfg, STACK_R8);
5633 MONO_ADD_INS (bblock, ins);
5641 /* FIXME: we should really allocate this only late in the compilation process */
5642 mono_domain_lock (cfg->domain);
5643 d = mono_domain_alloc (cfg->domain, sizeof (double));
5644 mono_domain_unlock (cfg->domain);
5646 CHECK_STACK_OVF (1);
5647 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5648 ins->type = STACK_R8;
5649 ins->dreg = alloc_dreg (cfg, STACK_R8);
5653 MONO_ADD_INS (bblock, ins);
5660 MonoInst *temp, *store;
5662 CHECK_STACK_OVF (1);
5666 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5667 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5669 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5672 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5685 if (sp [0]->type == STACK_R8)
5686 /* we need to pop the value from the x86 FP stack */
5687 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5694 if (stack_start != sp)
5696 token = read32 (ip + 1);
5697 /* FIXME: check the signature matches */
5698 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5703 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5704 GENERIC_SHARING_FAILURE (CEE_JMP);
5706 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5707 if (check_linkdemand (cfg, method, cmethod))
5709 CHECK_CFG_EXCEPTION;
5714 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5717 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5719 /* Handle tail calls similarly to calls */
5720 n = fsig->param_count + fsig->hasthis;
5722 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5723 call->method = cmethod;
5724 call->tail_call = TRUE;
5725 call->signature = mono_method_signature (cmethod);
5726 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5727 call->inst.inst_p0 = cmethod;
5728 for (i = 0; i < n; ++i)
5729 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5731 mono_arch_emit_call (cfg, call);
5732 MONO_ADD_INS (bblock, (MonoInst*)call);
5735 for (i = 0; i < num_args; ++i)
5736 /* Prevent arguments from being optimized away */
5737 arg_array [i]->flags |= MONO_INST_VOLATILE;
5739 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5740 ins = (MonoInst*)call;
5741 ins->inst_p0 = cmethod;
5742 MONO_ADD_INS (bblock, ins);
5746 start_new_bblock = 1;
5751 case CEE_CALLVIRT: {
5752 MonoInst *addr = NULL;
5753 MonoMethodSignature *fsig = NULL;
5755 int virtual = *ip == CEE_CALLVIRT;
5756 int calli = *ip == CEE_CALLI;
5757 gboolean pass_imt_from_rgctx = FALSE;
5758 MonoInst *imt_arg = NULL;
5759 gboolean pass_vtable = FALSE;
5760 gboolean pass_mrgctx = FALSE;
5761 MonoInst *vtable_arg = NULL;
5762 gboolean check_this = FALSE;
5765 token = read32 (ip + 1);
5772 if (method->wrapper_type != MONO_WRAPPER_NONE)
5773 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5775 fsig = mono_metadata_parse_signature (image, token);
5777 n = fsig->param_count + fsig->hasthis;
5779 MonoMethod *cil_method;
5781 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5782 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5783 cil_method = cmethod;
5784 } else if (constrained_call) {
5785 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5787 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5788 cil_method = cmethod;
5793 if (!dont_verify && !cfg->skip_visibility) {
5794 MonoMethod *target_method = cil_method;
5795 if (method->is_inflated) {
5796 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5798 if (!mono_method_can_access_method (method_definition, target_method) &&
5799 !mono_method_can_access_method (method, cil_method))
5800 METHOD_ACCESS_FAILURE;
5803 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5804 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5806 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5807 /* MS.NET seems to silently convert this to a callvirt */
5810 if (!cmethod->klass->inited)
5811 if (!mono_class_init (cmethod->klass))
5814 if (mono_method_signature (cmethod)->pinvoke) {
5815 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
5816 fsig = mono_method_signature (wrapper);
5817 } else if (constrained_call) {
5818 fsig = mono_method_signature (cmethod);
5820 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5823 mono_save_token_info (cfg, image, token, cmethod);
5825 n = fsig->param_count + fsig->hasthis;
5827 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5828 if (check_linkdemand (cfg, method, cmethod))
5830 CHECK_CFG_EXCEPTION;
5833 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5834 mini_class_is_system_array (cmethod->klass)) {
5835 array_rank = cmethod->klass->rank;
5838 if (cmethod->string_ctor)
5839 g_assert_not_reached ();
5842 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5845 if (!cfg->generic_sharing_context && cmethod)
5846 g_assert (!mono_method_check_context_used (cmethod));
5850 //g_assert (!virtual || fsig->hasthis);
5854 if (constrained_call) {
5856 * We have the `constrained.' prefix opcode.
5858 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5862 * The type parameter is instantiated as a valuetype,
5863 * but that type doesn't override the method we're
5864 * calling, so we need to box `this'.
5866 dreg = alloc_dreg (cfg, STACK_VTYPE);
5867 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5868 ins->klass = constrained_call;
5869 sp [0] = handle_box (cfg, ins, constrained_call);
5870 } else if (!constrained_call->valuetype) {
5871 int dreg = alloc_preg (cfg);
5874 * The type parameter is instantiated as a reference
5875 * type. We have a managed pointer on the stack, so
5876 * we need to dereference it here.
5878 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5879 ins->type = STACK_OBJ;
5881 } else if (cmethod->klass->valuetype)
5883 constrained_call = NULL;
5886 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5890 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5891 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5892 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5893 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5894 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5897 * Pass vtable iff target method might
5898 * be shared, which means that sharing
5899 * is enabled for its class and its
5900 * context is sharable (and it's not a
5903 if (sharing_enabled && context_sharable &&
5904 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5908 if (cmethod && mini_method_get_context (cmethod) &&
5909 mini_method_get_context (cmethod)->method_inst) {
5910 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5911 MonoGenericContext *context = mini_method_get_context (cmethod);
5912 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5914 g_assert (!pass_vtable);
5916 if (sharing_enabled && context_sharable)
5920 if (cfg->generic_sharing_context && cmethod) {
5921 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5923 context_used = mono_method_check_context_used (cmethod);
5925 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5926 /* Generic method interface
5927 calls are resolved via a
5928 helper function and don't
5930 if (!cmethod_context || !cmethod_context->method_inst)
5931 pass_imt_from_rgctx = TRUE;
5935 * If a shared method calls another
5936 * shared method then the caller must
5937 * have a generic sharing context
5938 * because the magic trampoline
5939 * requires it. FIXME: We shouldn't
5940 * have to force the vtable/mrgctx
5941 * variable here. Instead there
5942 * should be a flag in the cfg to
5943 * request a generic sharing context.
5946 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5947 mono_get_vtable_var (cfg);
5952 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5954 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5956 CHECK_TYPELOAD (cmethod->klass);
5957 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5962 g_assert (!vtable_arg);
5965 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5967 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
5970 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5971 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5978 if (pass_imt_from_rgctx) {
5979 g_assert (!pass_vtable);
5982 imt_arg = emit_get_rgctx_method (cfg, context_used,
5983 cmethod, MONO_RGCTX_INFO_METHOD);
5989 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5990 check->sreg1 = sp [0]->dreg;
5991 MONO_ADD_INS (cfg->cbb, check);
5994 /* Calling virtual generic methods */
5995 if (cmethod && virtual &&
5996 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5997 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5998 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5999 mono_method_signature (cmethod)->generic_param_count) {
6000 MonoInst *this_temp, *this_arg_temp, *store;
6001 MonoInst *iargs [4];
6003 g_assert (mono_method_signature (cmethod)->is_inflated);
6005 /* Prevent inlining of methods that contain indirect calls */
6008 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6009 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6010 g_assert (!imt_arg);
6012 imt_arg = emit_get_rgctx_method (cfg, context_used,
6013 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6016 g_assert (cmethod->is_inflated);
6017 EMIT_NEW_PCONST (cfg, imt_arg,
6018 ((MonoMethodInflated*)cmethod)->context.method_inst);
6020 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6024 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6025 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6026 MONO_ADD_INS (bblock, store);
6028 /* FIXME: This should be a managed pointer */
6029 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6031 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6033 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6034 cmethod, MONO_RGCTX_INFO_METHOD);
6035 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6036 addr = mono_emit_jit_icall (cfg,
6037 mono_helper_compile_generic_method, iargs);
6039 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6040 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6041 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6044 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6046 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6049 if (!MONO_TYPE_IS_VOID (fsig->ret))
6058 /* FIXME: runtime generic context pointer for jumps? */
6059 /* FIXME: handle this for generic sharing eventually */
6060 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6061 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6064 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6067 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6068 call->tail_call = TRUE;
6069 call->method = cmethod;
6070 call->signature = mono_method_signature (cmethod);
6073 /* Handle tail calls similarly to calls */
6074 call->inst.opcode = OP_TAILCALL;
6076 mono_arch_emit_call (cfg, call);
6079 * We implement tail calls by storing the actual arguments into the
6080 * argument variables, then emitting a CEE_JMP.
6082 for (i = 0; i < n; ++i) {
6083 /* Prevent argument from being register allocated */
6084 arg_array [i]->flags |= MONO_INST_VOLATILE;
6085 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6089 ins = (MonoInst*)call;
6090 ins->inst_p0 = cmethod;
6091 ins->inst_p1 = arg_array [0];
6092 MONO_ADD_INS (bblock, ins);
6093 link_bblock (cfg, bblock, end_bblock);
6094 start_new_bblock = 1;
6095 /* skip CEE_RET as well */
6101 /* Conversion to a JIT intrinsic */
6102 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6103 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6104 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6115 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6116 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6117 mono_method_check_inlining (cfg, cmethod) &&
6118 !g_list_find (dont_inline, cmethod)) {
6120 gboolean allways = FALSE;
6122 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6123 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6124 /* Prevent inlining of methods that call wrappers */
6126 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6130 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6132 cfg->real_offset += 5;
6135 if (!MONO_TYPE_IS_VOID (fsig->ret))
6136 /* *sp is already set by inline_method */
6139 inline_costs += costs;
6145 inline_costs += 10 * num_calls++;
6147 /* Tail recursion elimination */
6148 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6149 gboolean has_vtargs = FALSE;
6152 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6155 /* keep it simple */
6156 for (i = fsig->param_count - 1; i >= 0; i--) {
6157 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6162 for (i = 0; i < n; ++i)
6163 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6164 MONO_INST_NEW (cfg, ins, OP_BR);
6165 MONO_ADD_INS (bblock, ins);
6166 tblock = start_bblock->out_bb [0];
6167 link_bblock (cfg, bblock, tblock);
6168 ins->inst_target_bb = tblock;
6169 start_new_bblock = 1;
6171 /* skip the CEE_RET, too */
6172 if (ip_in_bb (cfg, bblock, ip + 5))
6182 /* Generic sharing */
6183 /* FIXME: only do this for generic methods if
6184 they are not shared! */
6185 if (context_used && !imt_arg &&
6186 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6187 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6188 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6189 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6192 g_assert (cfg->generic_sharing_context && cmethod);
6196 * We are compiling a call to a
6197 * generic method from shared code,
6198 * which means that we have to look up
6199 * the method in the rgctx and do an
6202 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6205 /* Indirect calls */
6207 g_assert (!imt_arg);
6209 if (*ip == CEE_CALL)
6210 g_assert (context_used);
6211 else if (*ip == CEE_CALLI)
6212 g_assert (!vtable_arg);
6214 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6215 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6217 /* Prevent inlining of methods with indirect calls */
6221 #ifdef MONO_ARCH_RGCTX_REG
6223 int rgctx_reg = mono_alloc_preg (cfg);
6225 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6226 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6227 call = (MonoCallInst*)ins;
6228 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6229 cfg->uses_rgctx_reg = TRUE;
6234 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6236 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6237 if (fsig->pinvoke && !fsig->ret->byref) {
6241 * Native code might return non register sized integers
6242 * without initializing the upper bits.
6244 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6245 case OP_LOADI1_MEMBASE:
6246 widen_op = OP_ICONV_TO_I1;
6248 case OP_LOADU1_MEMBASE:
6249 widen_op = OP_ICONV_TO_U1;
6251 case OP_LOADI2_MEMBASE:
6252 widen_op = OP_ICONV_TO_I2;
6254 case OP_LOADU2_MEMBASE:
6255 widen_op = OP_ICONV_TO_U2;
6261 if (widen_op != -1) {
6262 int dreg = alloc_preg (cfg);
6265 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6266 widen->type = ins->type;
6283 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6284 if (sp [fsig->param_count]->type == STACK_OBJ) {
6285 MonoInst *iargs [2];
6288 iargs [1] = sp [fsig->param_count];
6290 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6293 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6294 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6295 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6296 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6298 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6301 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6302 if (!cmethod->klass->element_class->valuetype && !readonly)
6303 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6306 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6309 g_assert_not_reached ();
6317 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6319 if (!MONO_TYPE_IS_VOID (fsig->ret))
6330 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6332 } else if (imt_arg) {
6333 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6335 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6338 if (!MONO_TYPE_IS_VOID (fsig->ret))
6346 if (cfg->method != method) {
6347 /* return from inlined method */
6349 * If in_count == 0, that means the ret is unreachable due to
6350 * being preceeded by a throw. In that case, inline_method () will
6351 * handle setting the return value
6352 * (test case: test_0_inline_throw ()).
6354 if (return_var && cfg->cbb->in_count) {
6358 //g_assert (returnvar != -1);
6359 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6360 cfg->ret_var_set = TRUE;
6364 MonoType *ret_type = mono_method_signature (method)->ret;
6366 g_assert (!return_var);
6369 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6372 if (!cfg->vret_addr) {
6375 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6377 EMIT_NEW_RETLOADA (cfg, ret_addr);
6379 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6380 ins->klass = mono_class_from_mono_type (ret_type);
6383 #ifdef MONO_ARCH_SOFT_FLOAT
6384 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6385 MonoInst *iargs [1];
6389 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6390 mono_arch_emit_setret (cfg, method, conv);
6392 mono_arch_emit_setret (cfg, method, *sp);
6395 mono_arch_emit_setret (cfg, method, *sp);
6400 if (sp != stack_start)
6402 MONO_INST_NEW (cfg, ins, OP_BR);
6404 ins->inst_target_bb = end_bblock;
6405 MONO_ADD_INS (bblock, ins);
6406 link_bblock (cfg, bblock, end_bblock);
6407 start_new_bblock = 1;
6411 MONO_INST_NEW (cfg, ins, OP_BR);
6413 target = ip + 1 + (signed char)(*ip);
6415 GET_BBLOCK (cfg, tblock, target);
6416 link_bblock (cfg, bblock, tblock);
6417 ins->inst_target_bb = tblock;
6418 if (sp != stack_start) {
6419 handle_stack_args (cfg, stack_start, sp - stack_start);
6421 CHECK_UNVERIFIABLE (cfg);
6423 MONO_ADD_INS (bblock, ins);
6424 start_new_bblock = 1;
6425 inline_costs += BRANCH_COST;
6439 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6441 target = ip + 1 + *(signed char*)ip;
6447 inline_costs += BRANCH_COST;
6451 MONO_INST_NEW (cfg, ins, OP_BR);
6454 target = ip + 4 + (gint32)read32(ip);
6456 GET_BBLOCK (cfg, tblock, target);
6457 link_bblock (cfg, bblock, tblock);
6458 ins->inst_target_bb = tblock;
6459 if (sp != stack_start) {
6460 handle_stack_args (cfg, stack_start, sp - stack_start);
6462 CHECK_UNVERIFIABLE (cfg);
6465 MONO_ADD_INS (bblock, ins);
6467 start_new_bblock = 1;
6468 inline_costs += BRANCH_COST;
6475 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6476 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6477 guint32 opsize = is_short ? 1 : 4;
6479 CHECK_OPSIZE (opsize);
6481 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6484 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6489 GET_BBLOCK (cfg, tblock, target);
6490 link_bblock (cfg, bblock, tblock);
6491 GET_BBLOCK (cfg, tblock, ip);
6492 link_bblock (cfg, bblock, tblock);
6494 if (sp != stack_start) {
6495 handle_stack_args (cfg, stack_start, sp - stack_start);
6496 CHECK_UNVERIFIABLE (cfg);
6499 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6500 cmp->sreg1 = sp [0]->dreg;
6501 type_from_op (cmp, sp [0], NULL);
6504 #if SIZEOF_VOID_P == 4
6505 if (cmp->opcode == OP_LCOMPARE_IMM) {
6506 /* Convert it to OP_LCOMPARE */
6507 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6508 ins->type = STACK_I8;
6509 ins->dreg = alloc_dreg (cfg, STACK_I8);
6511 MONO_ADD_INS (bblock, ins);
6512 cmp->opcode = OP_LCOMPARE;
6513 cmp->sreg2 = ins->dreg;
6516 MONO_ADD_INS (bblock, cmp);
6518 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6519 type_from_op (ins, sp [0], NULL);
6520 MONO_ADD_INS (bblock, ins);
6521 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6522 GET_BBLOCK (cfg, tblock, target);
6523 ins->inst_true_bb = tblock;
6524 GET_BBLOCK (cfg, tblock, ip);
6525 ins->inst_false_bb = tblock;
6526 start_new_bblock = 2;
6529 inline_costs += BRANCH_COST;
6544 MONO_INST_NEW (cfg, ins, *ip);
6546 target = ip + 4 + (gint32)read32(ip);
6552 inline_costs += BRANCH_COST;
6556 MonoBasicBlock **targets;
6557 MonoBasicBlock *default_bblock;
6558 MonoJumpInfoBBTable *table;
6560 int offset_reg = alloc_preg (cfg);
6561 int target_reg = alloc_preg (cfg);
6562 int table_reg = alloc_preg (cfg);
6563 int sum_reg = alloc_preg (cfg);
6568 n = read32 (ip + 1);
6571 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6575 CHECK_OPSIZE (n * sizeof (guint32));
6576 target = ip + n * sizeof (guint32);
6578 GET_BBLOCK (cfg, default_bblock, target);
6580 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6581 for (i = 0; i < n; ++i) {
6582 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6583 targets [i] = tblock;
6587 if (sp != stack_start) {
6589 * Link the current bb with the targets as well, so handle_stack_args
6590 * will set their in_stack correctly.
6592 link_bblock (cfg, bblock, default_bblock);
6593 for (i = 0; i < n; ++i)
6594 link_bblock (cfg, bblock, targets [i]);
6596 handle_stack_args (cfg, stack_start, sp - stack_start);
6598 CHECK_UNVERIFIABLE (cfg);
6601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6605 for (i = 0; i < n; ++i)
6606 link_bblock (cfg, bblock, targets [i]);
6608 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6609 table->table = targets;
6610 table->table_size = n;
6613 /* ARM implements SWITCH statements differently */
6614 /* FIXME: Make it use the generic implementation */
6615 /* the backend code will deal with aot vs normal case */
6616 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6617 ins->sreg1 = src1->dreg;
6618 ins->inst_p0 = table;
6619 ins->inst_many_bb = targets;
6620 ins->klass = GUINT_TO_POINTER (n);
6621 MONO_ADD_INS (cfg->cbb, ins);
6623 if (sizeof (gpointer) == 8)
6624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6628 #if SIZEOF_VOID_P == 8
6629 /* The upper word might not be zero, and we add it to a 64 bit address later */
6630 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6633 if (cfg->compile_aot) {
6634 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6636 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6637 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6638 ins->inst_p0 = table;
6639 ins->dreg = table_reg;
6640 MONO_ADD_INS (cfg->cbb, ins);
6643 /* FIXME: Use load_memindex */
6644 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6646 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6648 start_new_bblock = 1;
6649 inline_costs += (BRANCH_COST * 2);
6669 dreg = alloc_freg (cfg);
6672 dreg = alloc_lreg (cfg);
6675 dreg = alloc_preg (cfg);
6678 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6679 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6680 ins->flags |= ins_flag;
6682 MONO_ADD_INS (bblock, ins);
6697 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6698 ins->flags |= ins_flag;
6700 MONO_ADD_INS (bblock, ins);
6708 MONO_INST_NEW (cfg, ins, (*ip));
6710 ins->sreg1 = sp [0]->dreg;
6711 ins->sreg2 = sp [1]->dreg;
6712 type_from_op (ins, sp [0], sp [1]);
6714 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6716 /* Use the immediate opcodes if possible */
6717 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6718 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6719 if (imm_opcode != -1) {
6720 ins->opcode = imm_opcode;
6721 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6724 sp [1]->opcode = OP_NOP;
6728 MONO_ADD_INS ((cfg)->cbb, (ins));
6731 mono_decompose_opcode (cfg, ins);
6748 MONO_INST_NEW (cfg, ins, (*ip));
6750 ins->sreg1 = sp [0]->dreg;
6751 ins->sreg2 = sp [1]->dreg;
6752 type_from_op (ins, sp [0], sp [1]);
6754 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6755 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6757 /* FIXME: Pass opcode to is_inst_imm */
6759 /* Use the immediate opcodes if possible */
6760 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6763 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6764 if (imm_opcode != -1) {
6765 ins->opcode = imm_opcode;
6766 if (sp [1]->opcode == OP_I8CONST) {
6767 #if SIZEOF_VOID_P == 8
6768 ins->inst_imm = sp [1]->inst_l;
6770 ins->inst_ls_word = sp [1]->inst_ls_word;
6771 ins->inst_ms_word = sp [1]->inst_ms_word;
6775 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6778 sp [1]->opcode = OP_NOP;
6781 MONO_ADD_INS ((cfg)->cbb, (ins));
6784 mono_decompose_opcode (cfg, ins);
6797 case CEE_CONV_OVF_I8:
6798 case CEE_CONV_OVF_U8:
6802 /* Special case this earlier so we have long constants in the IR */
6803 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6804 int data = sp [-1]->inst_c0;
6805 sp [-1]->opcode = OP_I8CONST;
6806 sp [-1]->type = STACK_I8;
6807 #if SIZEOF_VOID_P == 8
6808 if ((*ip) == CEE_CONV_U8)
6809 sp [-1]->inst_c0 = (guint32)data;
6811 sp [-1]->inst_c0 = data;
6813 sp [-1]->inst_ls_word = data;
6814 if ((*ip) == CEE_CONV_U8)
6815 sp [-1]->inst_ms_word = 0;
6817 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6819 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6826 case CEE_CONV_OVF_I4:
6827 case CEE_CONV_OVF_I1:
6828 case CEE_CONV_OVF_I2:
6829 case CEE_CONV_OVF_I:
6830 case CEE_CONV_OVF_U:
6833 if (sp [-1]->type == STACK_R8) {
6834 ADD_UNOP (CEE_CONV_OVF_I8);
6841 case CEE_CONV_OVF_U1:
6842 case CEE_CONV_OVF_U2:
6843 case CEE_CONV_OVF_U4:
6846 if (sp [-1]->type == STACK_R8) {
6847 ADD_UNOP (CEE_CONV_OVF_U8);
6854 case CEE_CONV_OVF_I1_UN:
6855 case CEE_CONV_OVF_I2_UN:
6856 case CEE_CONV_OVF_I4_UN:
6857 case CEE_CONV_OVF_I8_UN:
6858 case CEE_CONV_OVF_U1_UN:
6859 case CEE_CONV_OVF_U2_UN:
6860 case CEE_CONV_OVF_U4_UN:
6861 case CEE_CONV_OVF_U8_UN:
6862 case CEE_CONV_OVF_I_UN:
6863 case CEE_CONV_OVF_U_UN:
6873 case CEE_ADD_OVF_UN:
6875 case CEE_MUL_OVF_UN:
6877 case CEE_SUB_OVF_UN:
6885 token = read32 (ip + 1);
6886 klass = mini_get_class (method, token, generic_context);
6887 CHECK_TYPELOAD (klass);
6889 if (generic_class_is_reference_type (cfg, klass)) {
6890 MonoInst *store, *load;
6891 int dreg = alloc_preg (cfg);
6893 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6894 load->flags |= ins_flag;
6895 MONO_ADD_INS (cfg->cbb, load);
6897 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6898 store->flags |= ins_flag;
6899 MONO_ADD_INS (cfg->cbb, store);
6901 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6913 token = read32 (ip + 1);
6914 klass = mini_get_class (method, token, generic_context);
6915 CHECK_TYPELOAD (klass);
6917 /* Optimize the common ldobj+stloc combination */
6927 loc_index = ip [5] - CEE_STLOC_0;
6934 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6935 CHECK_LOCAL (loc_index);
6937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6938 ins->dreg = cfg->locals [loc_index]->dreg;
6944 /* Optimize the ldobj+stobj combination */
6945 /* The reference case ends up being a load+store anyway */
6946 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6951 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6958 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6967 CHECK_STACK_OVF (1);
6969 n = read32 (ip + 1);
6971 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6972 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6973 ins->type = STACK_OBJ;
6976 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6977 MonoInst *iargs [1];
6979 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6980 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6982 if (cfg->opt & MONO_OPT_SHARED) {
6983 MonoInst *iargs [3];
6985 if (cfg->compile_aot) {
6986 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6988 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6989 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6990 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6991 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6992 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6994 if (bblock->out_of_line) {
6995 MonoInst *iargs [2];
6997 if (cfg->method->klass->image == mono_defaults.corlib) {
6999 * Avoid relocations in AOT and save some space by using a
7000 * version of helper_ldstr specialized to mscorlib.
7002 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7003 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7005 /* Avoid creating the string object */
7006 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7007 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7008 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7012 if (cfg->compile_aot) {
7013 NEW_LDSTRCONST (cfg, ins, image, n);
7015 MONO_ADD_INS (bblock, ins);
7018 NEW_PCONST (cfg, ins, NULL);
7019 ins->type = STACK_OBJ;
7020 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7022 MONO_ADD_INS (bblock, ins);
7031 MonoInst *iargs [2];
7032 MonoMethodSignature *fsig;
7035 MonoInst *vtable_arg = NULL;
7038 token = read32 (ip + 1);
7039 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7042 fsig = mono_method_get_signature (cmethod, image, token);
7044 mono_save_token_info (cfg, image, token, cmethod);
7046 if (!mono_class_init (cmethod->klass))
7049 if (cfg->generic_sharing_context)
7050 context_used = mono_method_check_context_used (cmethod);
7052 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7053 if (check_linkdemand (cfg, method, cmethod))
7055 CHECK_CFG_EXCEPTION;
7056 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7057 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7060 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7061 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7062 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7064 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7065 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7067 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7071 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7072 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7074 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7076 CHECK_TYPELOAD (cmethod->klass);
7077 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7082 n = fsig->param_count;
7086 * Generate smaller code for the common newobj <exception> instruction in
7087 * argument checking code.
7089 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7090 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7091 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7092 MonoInst *iargs [3];
7094 g_assert (!vtable_arg);
7098 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7101 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7105 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7110 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7113 g_assert_not_reached ();
7121 /* move the args to allow room for 'this' in the first position */
7127 /* check_call_signature () requires sp[0] to be set */
7128 this_ins.type = STACK_OBJ;
7130 if (check_call_signature (cfg, fsig, sp))
7135 if (mini_class_is_system_array (cmethod->klass)) {
7137 GENERIC_SHARING_FAILURE (*ip);
7138 g_assert (!context_used);
7139 g_assert (!vtable_arg);
7140 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7142 /* Avoid varargs in the common case */
7143 if (fsig->param_count == 1)
7144 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7145 else if (fsig->param_count == 2)
7146 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7148 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7149 } else if (cmethod->string_ctor) {
7150 g_assert (!context_used);
7151 g_assert (!vtable_arg);
7152 /* we simply pass a null pointer */
7153 EMIT_NEW_PCONST (cfg, *sp, NULL);
7154 /* now call the string ctor */
7155 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7157 MonoInst* callvirt_this_arg = NULL;
7159 if (cmethod->klass->valuetype) {
7160 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7161 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7162 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7167 * The code generated by mini_emit_virtual_call () expects
7168 * iargs [0] to be a boxed instance, but luckily the vcall
7169 * will be transformed into a normal call there.
7171 } else if (context_used) {
7175 if (cfg->opt & MONO_OPT_SHARED)
7176 rgctx_info = MONO_RGCTX_INFO_KLASS;
7178 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7179 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7181 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7184 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7186 CHECK_TYPELOAD (cmethod->klass);
7189 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7190 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7191 * As a workaround, we call class cctors before allocating objects.
7193 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7194 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7195 if (cfg->verbose_level > 2)
7196 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7197 class_inits = g_slist_prepend (class_inits, vtable);
7200 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7205 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7207 /* Now call the actual ctor */
7208 /* Avoid virtual calls to ctors if possible */
7209 if (cmethod->klass->marshalbyref)
7210 callvirt_this_arg = sp [0];
7212 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7213 mono_method_check_inlining (cfg, cmethod) &&
7214 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7215 !g_list_find (dont_inline, cmethod)) {
7218 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7219 cfg->real_offset += 5;
7222 inline_costs += costs - 5;
7225 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7227 } else if (context_used &&
7228 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7229 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7230 MonoInst *cmethod_addr;
7232 g_assert (!callvirt_this_arg);
7234 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7235 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7237 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7240 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7241 callvirt_this_arg, NULL, vtable_arg);
7245 if (alloc == NULL) {
7247 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7248 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7262 token = read32 (ip + 1);
7263 klass = mini_get_class (method, token, generic_context);
7264 CHECK_TYPELOAD (klass);
7265 if (sp [0]->type != STACK_OBJ)
7268 if (cfg->generic_sharing_context)
7269 context_used = mono_class_check_context_used (klass);
7278 args [1] = emit_get_rgctx_klass (cfg, context_used,
7279 klass, MONO_RGCTX_INFO_KLASS);
7281 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7285 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7286 MonoMethod *mono_castclass;
7287 MonoInst *iargs [1];
7290 mono_castclass = mono_marshal_get_castclass (klass);
7293 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7294 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7295 g_assert (costs > 0);
7298 cfg->real_offset += 5;
7303 inline_costs += costs;
7306 ins = handle_castclass (cfg, klass, *sp);
7316 token = read32 (ip + 1);
7317 klass = mini_get_class (method, token, generic_context);
7318 CHECK_TYPELOAD (klass);
7319 if (sp [0]->type != STACK_OBJ)
7322 if (cfg->generic_sharing_context)
7323 context_used = mono_class_check_context_used (klass);
7332 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7334 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7338 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7339 MonoMethod *mono_isinst;
7340 MonoInst *iargs [1];
7343 mono_isinst = mono_marshal_get_isinst (klass);
7346 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7347 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7348 g_assert (costs > 0);
7351 cfg->real_offset += 5;
7356 inline_costs += costs;
7359 ins = handle_isinst (cfg, klass, *sp);
7366 case CEE_UNBOX_ANY: {
7370 token = read32 (ip + 1);
7371 klass = mini_get_class (method, token, generic_context);
7372 CHECK_TYPELOAD (klass);
7374 mono_save_token_info (cfg, image, token, klass);
7376 if (cfg->generic_sharing_context)
7377 context_used = mono_class_check_context_used (klass);
7379 if (generic_class_is_reference_type (cfg, klass)) {
7382 MonoInst *iargs [2];
7387 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7388 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7392 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7393 MonoMethod *mono_castclass;
7394 MonoInst *iargs [1];
7397 mono_castclass = mono_marshal_get_castclass (klass);
7400 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7401 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7403 g_assert (costs > 0);
7406 cfg->real_offset += 5;
7410 inline_costs += costs;
7412 ins = handle_castclass (cfg, klass, *sp);
7420 if (mono_class_is_nullable (klass)) {
7421 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7428 ins = handle_unbox (cfg, klass, sp, context_used);
7434 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7447 token = read32 (ip + 1);
7448 klass = mini_get_class (method, token, generic_context);
7449 CHECK_TYPELOAD (klass);
7451 mono_save_token_info (cfg, image, token, klass);
7453 if (cfg->generic_sharing_context)
7454 context_used = mono_class_check_context_used (klass);
7456 if (generic_class_is_reference_type (cfg, klass)) {
7462 if (klass == mono_defaults.void_class)
7464 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7466 /* frequent check in generic code: box (struct), brtrue */
7467 if (!mono_class_is_nullable (klass) &&
7468 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7469 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7471 MONO_INST_NEW (cfg, ins, OP_BR);
7472 if (*ip == CEE_BRTRUE_S) {
7475 target = ip + 1 + (signed char)(*ip);
7480 target = ip + 4 + (gint)(read32 (ip));
7483 GET_BBLOCK (cfg, tblock, target);
7484 link_bblock (cfg, bblock, tblock);
7485 ins->inst_target_bb = tblock;
7486 GET_BBLOCK (cfg, tblock, ip);
7488 * This leads to some inconsistency, since the two bblocks are not
7489 * really connected, but it is needed for handling stack arguments
7490 * correct (See test_0_box_brtrue_opt_regress_81102).
7492 link_bblock (cfg, bblock, tblock);
7493 if (sp != stack_start) {
7494 handle_stack_args (cfg, stack_start, sp - stack_start);
7496 CHECK_UNVERIFIABLE (cfg);
7498 MONO_ADD_INS (bblock, ins);
7499 start_new_bblock = 1;
7507 if (cfg->opt & MONO_OPT_SHARED)
7508 rgctx_info = MONO_RGCTX_INFO_KLASS;
7510 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7511 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7512 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7514 *sp++ = handle_box (cfg, val, klass);
7525 token = read32 (ip + 1);
7526 klass = mini_get_class (method, token, generic_context);
7527 CHECK_TYPELOAD (klass);
7529 mono_save_token_info (cfg, image, token, klass);
7531 if (cfg->generic_sharing_context)
7532 context_used = mono_class_check_context_used (klass);
7534 if (mono_class_is_nullable (klass)) {
7537 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7538 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7542 ins = handle_unbox (cfg, klass, sp, context_used);
7552 MonoClassField *field;
7556 if (*ip == CEE_STFLD) {
7563 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7565 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7568 token = read32 (ip + 1);
7569 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7570 field = mono_method_get_wrapper_data (method, token);
7571 klass = field->parent;
7574 field = mono_field_from_token (image, token, &klass, generic_context);
7578 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7579 FIELD_ACCESS_FAILURE;
7580 mono_class_init (klass);
7582 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7583 if (*ip == CEE_STFLD) {
7584 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7586 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7587 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7588 MonoInst *iargs [5];
7591 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7592 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7593 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7597 if (cfg->opt & MONO_OPT_INLINE) {
7598 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7599 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7600 g_assert (costs > 0);
7603 cfg->real_offset += 5;
7606 inline_costs += costs;
7609 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7614 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7616 store->flags |= ins_flag;
7623 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7624 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7625 MonoInst *iargs [4];
7628 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7629 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7630 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7631 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7632 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7633 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7635 g_assert (costs > 0);
7638 cfg->real_offset += 5;
7642 inline_costs += costs;
7645 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7649 if (sp [0]->type == STACK_VTYPE) {
7652 /* Have to compute the address of the variable */
7654 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7656 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7658 g_assert (var->klass == klass);
7660 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7664 if (*ip == CEE_LDFLDA) {
7665 dreg = alloc_preg (cfg);
7667 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7668 ins->klass = mono_class_from_mono_type (field->type);
7669 ins->type = STACK_MP;
7674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7675 load->flags |= ins_flag;
7686 MonoClassField *field;
7687 gpointer addr = NULL;
7688 gboolean is_special_static;
7691 token = read32 (ip + 1);
7693 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7694 field = mono_method_get_wrapper_data (method, token);
7695 klass = field->parent;
7698 field = mono_field_from_token (image, token, &klass, generic_context);
7701 mono_class_init (klass);
7702 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7703 FIELD_ACCESS_FAILURE;
7706 * We can only support shared generic static
7707 * field access on architectures where the
7708 * trampoline code has been extended to handle
7709 * the generic class init.
7711 #ifndef MONO_ARCH_VTABLE_REG
7712 GENERIC_SHARING_FAILURE (*ip);
7715 if (cfg->generic_sharing_context)
7716 context_used = mono_class_check_context_used (klass);
7718 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7720 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7721 * to be called here.
7723 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7724 mono_class_vtable (cfg->domain, klass);
7725 CHECK_TYPELOAD (klass);
7727 mono_domain_lock (cfg->domain);
7728 if (cfg->domain->special_static_fields)
7729 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7730 mono_domain_unlock (cfg->domain);
7732 is_special_static = mono_class_field_is_special_static (field);
7734 /* Generate IR to compute the field address */
7736 if ((cfg->opt & MONO_OPT_SHARED) ||
7737 (cfg->compile_aot && is_special_static) ||
7738 (context_used && is_special_static)) {
7739 MonoInst *iargs [2];
7741 g_assert (field->parent);
7742 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7744 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7745 field, MONO_RGCTX_INFO_CLASS_FIELD);
7747 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7749 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7750 } else if (context_used) {
7751 MonoInst *static_data;
7754 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7755 method->klass->name_space, method->klass->name, method->name,
7756 depth, field->offset);
7759 if (mono_class_needs_cctor_run (klass, method)) {
7763 vtable = emit_get_rgctx_klass (cfg, context_used,
7764 klass, MONO_RGCTX_INFO_VTABLE);
7766 // FIXME: This doesn't work since it tries to pass the argument
7767 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7769 * The vtable pointer is always passed in a register regardless of
7770 * the calling convention, so assign it manually, and make a call
7771 * using a signature without parameters.
7773 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7774 #ifdef MONO_ARCH_VTABLE_REG
7775 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7776 cfg->uses_vtable_reg = TRUE;
7783 * The pointer we're computing here is
7785 * super_info.static_data + field->offset
7787 static_data = emit_get_rgctx_klass (cfg, context_used,
7788 klass, MONO_RGCTX_INFO_STATIC_DATA);
7790 if (field->offset == 0) {
7793 int addr_reg = mono_alloc_preg (cfg);
7794 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7796 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7797 MonoInst *iargs [2];
7799 g_assert (field->parent);
7800 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7801 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7802 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7804 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7806 CHECK_TYPELOAD (klass);
7808 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7809 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7810 if (cfg->verbose_level > 2)
7811 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7812 class_inits = g_slist_prepend (class_inits, vtable);
7814 if (cfg->run_cctors) {
7816 /* This makes so that inline cannot trigger */
7817 /* .cctors: too many apps depend on them */
7818 /* running with a specific order... */
7819 if (! vtable->initialized)
7821 ex = mono_runtime_class_init_full (vtable, FALSE);
7823 set_exception_object (cfg, ex);
7824 goto exception_exit;
7828 addr = (char*)vtable->data + field->offset;
7830 if (cfg->compile_aot)
7831 EMIT_NEW_SFLDACONST (cfg, ins, field);
7833 EMIT_NEW_PCONST (cfg, ins, addr);
7836 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7837 * This could be later optimized to do just a couple of
7838 * memory dereferences with constant offsets.
7840 MonoInst *iargs [1];
7841 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7842 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7846 /* Generate IR to do the actual load/store operation */
7848 if (*ip == CEE_LDSFLDA) {
7849 ins->klass = mono_class_from_mono_type (field->type);
7851 } else if (*ip == CEE_STSFLD) {
7856 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7857 store->flags |= ins_flag;
7860 gboolean is_const = FALSE;
7861 MonoVTable *vtable = NULL;
7863 if (!context_used) {
7864 vtable = mono_class_vtable (cfg->domain, klass);
7865 CHECK_TYPELOAD (klass);
7867 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7868 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7869 gpointer addr = (char*)vtable->data + field->offset;
7870 int ro_type = field->type->type;
7871 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7872 ro_type = field->type->data.klass->enum_basetype->type;
7874 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7877 case MONO_TYPE_BOOLEAN:
7879 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7883 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7886 case MONO_TYPE_CHAR:
7888 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7892 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7897 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7901 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7906 case MONO_TYPE_STRING:
7907 case MONO_TYPE_OBJECT:
7908 case MONO_TYPE_CLASS:
7909 case MONO_TYPE_SZARRAY:
7911 case MONO_TYPE_FNPTR:
7912 case MONO_TYPE_ARRAY:
7913 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7914 type_to_eval_stack_type ((cfg), field->type, *sp);
7919 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7924 case MONO_TYPE_VALUETYPE:
7934 CHECK_STACK_OVF (1);
7936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7937 load->flags |= ins_flag;
7949 token = read32 (ip + 1);
7950 klass = mini_get_class (method, token, generic_context);
7951 CHECK_TYPELOAD (klass);
7952 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7953 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7964 const char *data_ptr;
7971 token = read32 (ip + 1);
7973 klass = mini_get_class (method, token, generic_context);
7974 CHECK_TYPELOAD (klass);
7976 if (cfg->generic_sharing_context)
7977 context_used = mono_class_check_context_used (klass);
7982 /* FIXME: Decompose later to help abcrem */
7985 args [0] = emit_get_rgctx_klass (cfg, context_used,
7986 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
7991 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
7993 if (cfg->opt & MONO_OPT_SHARED) {
7994 /* Decompose now to avoid problems with references to the domainvar */
7995 MonoInst *iargs [3];
7997 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7998 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8001 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8003 /* Decompose later since it is needed by abcrem */
8004 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8005 ins->dreg = alloc_preg (cfg);
8006 ins->sreg1 = sp [0]->dreg;
8007 ins->inst_newa_class = klass;
8008 ins->type = STACK_OBJ;
8010 MONO_ADD_INS (cfg->cbb, ins);
8011 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8012 cfg->cbb->has_array_access = TRUE;
8014 /* Needed so mono_emit_load_get_addr () gets called */
8015 mono_get_got_var (cfg);
8025 * we inline/optimize the initialization sequence if possible.
8026 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8027 * for small sizes open code the memcpy
8028 * ensure the rva field is big enough
8030 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
8031 MonoMethod *memcpy_method = get_memcpy_method ();
8032 MonoInst *iargs [3];
8033 int add_reg = alloc_preg (cfg);
8035 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8036 if (cfg->compile_aot) {
8037 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
8039 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8041 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8042 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8051 if (sp [0]->type != STACK_OBJ)
8054 dreg = alloc_preg (cfg);
8055 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8056 ins->dreg = alloc_preg (cfg);
8057 ins->sreg1 = sp [0]->dreg;
8058 ins->type = STACK_I4;
8059 MONO_ADD_INS (cfg->cbb, ins);
8060 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8061 cfg->cbb->has_array_access = TRUE;
8069 if (sp [0]->type != STACK_OBJ)
8072 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8074 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8075 CHECK_TYPELOAD (klass);
8076 /* we need to make sure that this array is exactly the type it needs
8077 * to be for correctness. the wrappers are lax with their usage
8078 * so we need to ignore them here
8080 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8081 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8084 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8088 case CEE_LDELEM_ANY:
8099 case CEE_LDELEM_REF: {
8105 if (*ip == CEE_LDELEM_ANY) {
8107 token = read32 (ip + 1);
8108 klass = mini_get_class (method, token, generic_context);
8109 CHECK_TYPELOAD (klass);
8110 mono_class_init (klass);
8113 klass = array_access_to_klass (*ip);
8115 if (sp [0]->type != STACK_OBJ)
8118 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8120 if (sp [1]->opcode == OP_ICONST) {
8121 int array_reg = sp [0]->dreg;
8122 int index_reg = sp [1]->dreg;
8123 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8125 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8128 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8129 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8132 if (*ip == CEE_LDELEM_ANY)
8145 case CEE_STELEM_REF:
8146 case CEE_STELEM_ANY: {
8152 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8154 if (*ip == CEE_STELEM_ANY) {
8156 token = read32 (ip + 1);
8157 klass = mini_get_class (method, token, generic_context);
8158 CHECK_TYPELOAD (klass);
8159 mono_class_init (klass);
8162 klass = array_access_to_klass (*ip);
8164 if (sp [0]->type != STACK_OBJ)
8167 /* storing a NULL doesn't need any of the complex checks in stelemref */
8168 if (generic_class_is_reference_type (cfg, klass) &&
8169 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8170 MonoMethod* helper = mono_marshal_get_stelemref ();
8171 MonoInst *iargs [3];
8173 if (sp [0]->type != STACK_OBJ)
8175 if (sp [2]->type != STACK_OBJ)
8182 mono_emit_method_call (cfg, helper, iargs, NULL);
8184 if (sp [1]->opcode == OP_ICONST) {
8185 int array_reg = sp [0]->dreg;
8186 int index_reg = sp [1]->dreg;
8187 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8189 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8190 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8192 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8193 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8197 if (*ip == CEE_STELEM_ANY)
8204 case CEE_CKFINITE: {
8208 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8209 ins->sreg1 = sp [0]->dreg;
8210 ins->dreg = alloc_freg (cfg);
8211 ins->type = STACK_R8;
8212 MONO_ADD_INS (bblock, ins);
8215 mono_decompose_opcode (cfg, ins);
8220 case CEE_REFANYVAL: {
8221 MonoInst *src_var, *src;
8223 int klass_reg = alloc_preg (cfg);
8224 int dreg = alloc_preg (cfg);
8227 MONO_INST_NEW (cfg, ins, *ip);
8230 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8231 CHECK_TYPELOAD (klass);
8232 mono_class_init (klass);
8234 if (cfg->generic_sharing_context)
8235 context_used = mono_class_check_context_used (klass);
8238 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8240 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8241 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8245 MonoInst *klass_ins;
8247 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8248 klass, MONO_RGCTX_INFO_KLASS);
8251 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8252 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8254 mini_emit_class_check (cfg, klass_reg, klass);
8256 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8257 ins->type = STACK_MP;
8262 case CEE_MKREFANY: {
8263 MonoInst *loc, *addr;
8266 MONO_INST_NEW (cfg, ins, *ip);
8269 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8270 CHECK_TYPELOAD (klass);
8271 mono_class_init (klass);
8273 if (cfg->generic_sharing_context)
8274 context_used = mono_class_check_context_used (klass);
8276 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8277 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8280 MonoInst *const_ins;
8281 int type_reg = alloc_preg (cfg);
8283 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8284 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8286 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8287 } else if (cfg->compile_aot) {
8288 int const_reg = alloc_preg (cfg);
8289 int type_reg = alloc_preg (cfg);
8291 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8294 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8296 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8297 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8301 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8302 ins->type = STACK_VTYPE;
8303 ins->klass = mono_defaults.typed_reference_class;
8310 MonoClass *handle_class;
8312 CHECK_STACK_OVF (1);
8315 n = read32 (ip + 1);
8317 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8318 handle = mono_method_get_wrapper_data (method, n);
8319 handle_class = mono_method_get_wrapper_data (method, n + 1);
8320 if (handle_class == mono_defaults.typehandle_class)
8321 handle = &((MonoClass*)handle)->byval_arg;
8324 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8328 mono_class_init (handle_class);
8329 if (cfg->generic_sharing_context) {
8330 if (handle_class == mono_defaults.typehandle_class) {
8331 /* If we get a MONO_TYPE_CLASS
8332 then we need to provide the
8334 instantiation of it. */
8335 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8338 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8339 } else if (handle_class == mono_defaults.fieldhandle_class)
8340 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8341 else if (handle_class == mono_defaults.methodhandle_class)
8342 context_used = mono_method_check_context_used (handle);
8344 g_assert_not_reached ();
8347 if (cfg->opt & MONO_OPT_SHARED) {
8348 MonoInst *addr, *vtvar, *iargs [3];
8349 int method_context_used;
8351 if (cfg->generic_sharing_context)
8352 method_context_used = mono_method_check_context_used (method);
8354 method_context_used = 0;
8356 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8358 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8359 EMIT_NEW_ICONST (cfg, iargs [1], n);
8360 if (method_context_used) {
8361 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8362 method, MONO_RGCTX_INFO_METHOD);
8363 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8365 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8366 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8368 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8372 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8374 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8375 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8376 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8377 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8378 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8379 MonoClass *tclass = mono_class_from_mono_type (handle);
8381 mono_class_init (tclass);
8383 ins = emit_get_rgctx_klass (cfg, context_used,
8384 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8385 } else if (cfg->compile_aot) {
8386 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8388 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8390 ins->type = STACK_OBJ;
8391 ins->klass = cmethod->klass;
8394 MonoInst *addr, *vtvar;
8396 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8399 if (handle_class == mono_defaults.typehandle_class) {
8400 ins = emit_get_rgctx_klass (cfg, context_used,
8401 mono_class_from_mono_type (handle),
8402 MONO_RGCTX_INFO_TYPE);
8403 } else if (handle_class == mono_defaults.methodhandle_class) {
8404 ins = emit_get_rgctx_method (cfg, context_used,
8405 handle, MONO_RGCTX_INFO_METHOD);
8406 } else if (handle_class == mono_defaults.fieldhandle_class) {
8407 ins = emit_get_rgctx_field (cfg, context_used,
8408 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8410 g_assert_not_reached ();
8412 } else if (cfg->compile_aot) {
8413 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8415 EMIT_NEW_PCONST (cfg, ins, handle);
8417 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8418 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8419 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8429 MONO_INST_NEW (cfg, ins, OP_THROW);
8431 ins->sreg1 = sp [0]->dreg;
8433 bblock->out_of_line = TRUE;
8434 MONO_ADD_INS (bblock, ins);
8435 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8436 MONO_ADD_INS (bblock, ins);
8439 link_bblock (cfg, bblock, end_bblock);
8440 start_new_bblock = 1;
8442 case CEE_ENDFINALLY:
8443 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8444 MONO_ADD_INS (bblock, ins);
8446 start_new_bblock = 1;
8449 * Control will leave the method so empty the stack, otherwise
8450 * the next basic block will start with a nonempty stack.
8452 while (sp != stack_start) {
8460 if (*ip == CEE_LEAVE) {
8462 target = ip + 5 + (gint32)read32(ip + 1);
8465 target = ip + 2 + (signed char)(ip [1]);
8468 /* empty the stack */
8469 while (sp != stack_start) {
8474 * If this leave statement is in a catch block, check for a
8475 * pending exception, and rethrow it if necessary.
8477 for (i = 0; i < header->num_clauses; ++i) {
8478 MonoExceptionClause *clause = &header->clauses [i];
8481 * Use <= in the final comparison to handle clauses with multiple
8482 * leave statements, like in bug #78024.
8483 * The ordering of the exception clauses guarantees that we find the
8486 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8488 MonoBasicBlock *dont_throw;
8493 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8496 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8498 NEW_BBLOCK (cfg, dont_throw);
8501 * Currently, we allways rethrow the abort exception, despite the
8502 * fact that this is not correct. See thread6.cs for an example.
8503 * But propagating the abort exception is more important than
8504 * getting the sematics right.
8506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8508 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8510 MONO_START_BB (cfg, dont_throw);
8515 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8517 for (tmp = handlers; tmp; tmp = tmp->next) {
8519 link_bblock (cfg, bblock, tblock);
8520 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8521 ins->inst_target_bb = tblock;
8522 MONO_ADD_INS (bblock, ins);
8524 g_list_free (handlers);
8527 MONO_INST_NEW (cfg, ins, OP_BR);
8528 MONO_ADD_INS (bblock, ins);
8529 GET_BBLOCK (cfg, tblock, target);
8530 link_bblock (cfg, bblock, tblock);
8531 ins->inst_target_bb = tblock;
8532 start_new_bblock = 1;
8534 if (*ip == CEE_LEAVE)
8543 * Mono specific opcodes
8545 case MONO_CUSTOM_PREFIX: {
8547 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8551 case CEE_MONO_ICALL: {
8553 MonoJitICallInfo *info;
8555 token = read32 (ip + 2);
8556 func = mono_method_get_wrapper_data (method, token);
8557 info = mono_find_jit_icall_by_addr (func);
8560 CHECK_STACK (info->sig->param_count);
8561 sp -= info->sig->param_count;
8563 ins = mono_emit_jit_icall (cfg, info->func, sp);
8564 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8568 inline_costs += 10 * num_calls++;
8572 case CEE_MONO_LDPTR: {
8575 CHECK_STACK_OVF (1);
8577 token = read32 (ip + 2);
8579 ptr = mono_method_get_wrapper_data (method, token);
8580 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8581 MonoJitICallInfo *callinfo;
8582 const char *icall_name;
8584 icall_name = method->name + strlen ("__icall_wrapper_");
8585 g_assert (icall_name);
8586 callinfo = mono_find_jit_icall_by_name (icall_name);
8587 g_assert (callinfo);
8589 if (ptr == callinfo->func) {
8590 /* Will be transformed into an AOTCONST later */
8591 EMIT_NEW_PCONST (cfg, ins, ptr);
8597 /* FIXME: Generalize this */
8598 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8599 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8604 EMIT_NEW_PCONST (cfg, ins, ptr);
8607 inline_costs += 10 * num_calls++;
8608 /* Can't embed random pointers into AOT code */
8609 cfg->disable_aot = 1;
8612 case CEE_MONO_ICALL_ADDR: {
8613 MonoMethod *cmethod;
8616 CHECK_STACK_OVF (1);
8618 token = read32 (ip + 2);
8620 cmethod = mono_method_get_wrapper_data (method, token);
8622 if (cfg->compile_aot) {
8623 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8625 ptr = mono_lookup_internal_call (cmethod);
8627 EMIT_NEW_PCONST (cfg, ins, ptr);
8633 case CEE_MONO_VTADDR: {
8634 MonoInst *src_var, *src;
8640 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8641 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8646 case CEE_MONO_NEWOBJ: {
8647 MonoInst *iargs [2];
8649 CHECK_STACK_OVF (1);
8651 token = read32 (ip + 2);
8652 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8653 mono_class_init (klass);
8654 NEW_DOMAINCONST (cfg, iargs [0]);
8655 MONO_ADD_INS (cfg->cbb, iargs [0]);
8656 NEW_CLASSCONST (cfg, iargs [1], klass);
8657 MONO_ADD_INS (cfg->cbb, iargs [1]);
8658 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8660 inline_costs += 10 * num_calls++;
8663 case CEE_MONO_OBJADDR:
8666 MONO_INST_NEW (cfg, ins, OP_MOVE);
8667 ins->dreg = alloc_preg (cfg);
8668 ins->sreg1 = sp [0]->dreg;
8669 ins->type = STACK_MP;
8670 MONO_ADD_INS (cfg->cbb, ins);
8674 case CEE_MONO_LDNATIVEOBJ:
8676 * Similar to LDOBJ, but instead load the unmanaged
8677 * representation of the vtype to the stack.
8682 token = read32 (ip + 2);
8683 klass = mono_method_get_wrapper_data (method, token);
8684 g_assert (klass->valuetype);
8685 mono_class_init (klass);
8688 MonoInst *src, *dest, *temp;
8691 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8692 temp->backend.is_pinvoke = 1;
8693 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8694 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8696 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8697 dest->type = STACK_VTYPE;
8698 dest->klass = klass;
8704 case CEE_MONO_RETOBJ: {
8706 * Same as RET, but return the native representation of a vtype
8709 g_assert (cfg->ret);
8710 g_assert (mono_method_signature (method)->pinvoke);
8715 token = read32 (ip + 2);
8716 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8718 if (!cfg->vret_addr) {
8719 g_assert (cfg->ret_var_is_local);
8721 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8723 EMIT_NEW_RETLOADA (cfg, ins);
8725 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8727 if (sp != stack_start)
8730 MONO_INST_NEW (cfg, ins, OP_BR);
8731 ins->inst_target_bb = end_bblock;
8732 MONO_ADD_INS (bblock, ins);
8733 link_bblock (cfg, bblock, end_bblock);
8734 start_new_bblock = 1;
8738 case CEE_MONO_CISINST:
8739 case CEE_MONO_CCASTCLASS: {
8744 token = read32 (ip + 2);
8745 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8746 if (ip [1] == CEE_MONO_CISINST)
8747 ins = handle_cisinst (cfg, klass, sp [0]);
8749 ins = handle_ccastclass (cfg, klass, sp [0]);
8755 case CEE_MONO_SAVE_LMF:
8756 case CEE_MONO_RESTORE_LMF:
8757 #ifdef MONO_ARCH_HAVE_LMF_OPS
8758 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8759 MONO_ADD_INS (bblock, ins);
8760 cfg->need_lmf_area = TRUE;
8764 case CEE_MONO_CLASSCONST:
8765 CHECK_STACK_OVF (1);
8767 token = read32 (ip + 2);
8768 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8771 inline_costs += 10 * num_calls++;
8773 case CEE_MONO_NOT_TAKEN:
8774 bblock->out_of_line = TRUE;
8778 CHECK_STACK_OVF (1);
8780 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8781 ins->dreg = alloc_preg (cfg);
8782 ins->inst_offset = (gint32)read32 (ip + 2);
8783 ins->type = STACK_PTR;
8784 MONO_ADD_INS (bblock, ins);
8789 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8799 /* somewhat similar to LDTOKEN */
8800 MonoInst *addr, *vtvar;
8801 CHECK_STACK_OVF (1);
8802 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8804 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8805 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8807 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8808 ins->type = STACK_VTYPE;
8809 ins->klass = mono_defaults.argumenthandle_class;
8822 * The following transforms:
8823 * CEE_CEQ into OP_CEQ
8824 * CEE_CGT into OP_CGT
8825 * CEE_CGT_UN into OP_CGT_UN
8826 * CEE_CLT into OP_CLT
8827 * CEE_CLT_UN into OP_CLT_UN
8829 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8831 MONO_INST_NEW (cfg, ins, cmp->opcode);
8833 cmp->sreg1 = sp [0]->dreg;
8834 cmp->sreg2 = sp [1]->dreg;
8835 type_from_op (cmp, sp [0], sp [1]);
8837 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8838 cmp->opcode = OP_LCOMPARE;
8839 else if (sp [0]->type == STACK_R8)
8840 cmp->opcode = OP_FCOMPARE;
8842 cmp->opcode = OP_ICOMPARE;
8843 MONO_ADD_INS (bblock, cmp);
8844 ins->type = STACK_I4;
8845 ins->dreg = alloc_dreg (cfg, ins->type);
8846 type_from_op (ins, sp [0], sp [1]);
8848 if (cmp->opcode == OP_FCOMPARE) {
8850 * The backends expect the fceq opcodes to do the
8853 cmp->opcode = OP_NOP;
8854 ins->sreg1 = cmp->sreg1;
8855 ins->sreg2 = cmp->sreg2;
8857 MONO_ADD_INS (bblock, ins);
8864 MonoMethod *cil_method, *ctor_method;
8865 gboolean needs_static_rgctx_invoke;
8867 CHECK_STACK_OVF (1);
8869 n = read32 (ip + 2);
8870 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8873 mono_class_init (cmethod->klass);
8875 mono_save_token_info (cfg, image, n, cmethod);
8877 if (cfg->generic_sharing_context)
8878 context_used = mono_method_check_context_used (cmethod);
8880 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8882 cil_method = cmethod;
8883 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8884 METHOD_ACCESS_FAILURE;
8886 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8887 if (check_linkdemand (cfg, method, cmethod))
8889 CHECK_CFG_EXCEPTION;
8890 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8891 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8895 * Optimize the common case of ldftn+delegate creation
8897 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8898 /* FIXME: SGEN support */
8899 /* FIXME: handle shared static generic methods */
8900 /* FIXME: handle this in shared code */
8901 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8902 MonoInst *target_ins;
8905 if (cfg->verbose_level > 3)
8906 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8907 target_ins = sp [-1];
8909 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8917 if (needs_static_rgctx_invoke)
8918 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8920 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8921 } else if (needs_static_rgctx_invoke) {
8922 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8924 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8926 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8930 inline_costs += 10 * num_calls++;
8933 case CEE_LDVIRTFTN: {
8938 n = read32 (ip + 2);
8939 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8942 mono_class_init (cmethod->klass);
8944 if (cfg->generic_sharing_context)
8945 context_used = mono_method_check_context_used (cmethod);
8947 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8948 if (check_linkdemand (cfg, method, cmethod))
8950 CHECK_CFG_EXCEPTION;
8951 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8952 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8959 args [1] = emit_get_rgctx_method (cfg, context_used,
8960 cmethod, MONO_RGCTX_INFO_METHOD);
8961 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
8963 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8964 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8968 inline_costs += 10 * num_calls++;
8972 CHECK_STACK_OVF (1);
8974 n = read16 (ip + 2);
8976 EMIT_NEW_ARGLOAD (cfg, ins, n);
8981 CHECK_STACK_OVF (1);
8983 n = read16 (ip + 2);
8985 NEW_ARGLOADA (cfg, ins, n);
8986 MONO_ADD_INS (cfg->cbb, ins);
8994 n = read16 (ip + 2);
8996 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8998 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9002 CHECK_STACK_OVF (1);
9004 n = read16 (ip + 2);
9006 EMIT_NEW_LOCLOAD (cfg, ins, n);
9011 unsigned char *tmp_ip;
9012 CHECK_STACK_OVF (1);
9014 n = read16 (ip + 2);
9017 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9023 EMIT_NEW_LOCLOADA (cfg, ins, n);
9032 n = read16 (ip + 2);
9034 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9036 emit_stloc_ir (cfg, sp, header, n);
9043 if (sp != stack_start)
9045 if (cfg->method != method)
9047 * Inlining this into a loop in a parent could lead to
9048 * stack overflows which is different behavior than the
9049 * non-inlined case, thus disable inlining in this case.
9051 goto inline_failure;
9053 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9054 ins->dreg = alloc_preg (cfg);
9055 ins->sreg1 = sp [0]->dreg;
9056 ins->type = STACK_PTR;
9057 MONO_ADD_INS (cfg->cbb, ins);
9059 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9060 if (header->init_locals)
9061 ins->flags |= MONO_INST_INIT;
9066 case CEE_ENDFILTER: {
9067 MonoExceptionClause *clause, *nearest;
9068 int cc, nearest_num;
9072 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9074 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9075 ins->sreg1 = (*sp)->dreg;
9076 MONO_ADD_INS (bblock, ins);
9077 start_new_bblock = 1;
9082 for (cc = 0; cc < header->num_clauses; ++cc) {
9083 clause = &header->clauses [cc];
9084 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9085 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9086 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9092 if ((ip - header->code) != nearest->handler_offset)
9097 case CEE_UNALIGNED_:
9098 ins_flag |= MONO_INST_UNALIGNED;
9099 /* FIXME: record alignment? we can assume 1 for now */
9104 ins_flag |= MONO_INST_VOLATILE;
9108 ins_flag |= MONO_INST_TAILCALL;
9109 cfg->flags |= MONO_CFG_HAS_TAIL;
9110 /* Can't inline tail calls at this time */
9111 inline_costs += 100000;
9118 token = read32 (ip + 2);
9119 klass = mini_get_class (method, token, generic_context);
9120 CHECK_TYPELOAD (klass);
9121 if (generic_class_is_reference_type (cfg, klass))
9122 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9124 mini_emit_initobj (cfg, *sp, NULL, klass);
9128 case CEE_CONSTRAINED_:
9130 token = read32 (ip + 2);
9131 constrained_call = mono_class_get_full (image, token, generic_context);
9132 CHECK_TYPELOAD (constrained_call);
9137 MonoInst *iargs [3];
9141 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9142 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9143 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9144 /* emit_memset only works when val == 0 */
9145 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9150 if (ip [1] == CEE_CPBLK) {
9151 MonoMethod *memcpy_method = get_memcpy_method ();
9152 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9154 MonoMethod *memset_method = get_memset_method ();
9155 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9165 ins_flag |= MONO_INST_NOTYPECHECK;
9167 ins_flag |= MONO_INST_NORANGECHECK;
9168 /* we ignore the no-nullcheck for now since we
9169 * really do it explicitly only when doing callvirt->call
9175 int handler_offset = -1;
9177 for (i = 0; i < header->num_clauses; ++i) {
9178 MonoExceptionClause *clause = &header->clauses [i];
9179 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9180 handler_offset = clause->handler_offset;
9185 bblock->flags |= BB_EXCEPTION_UNSAFE;
9187 g_assert (handler_offset != -1);
9189 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9190 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9191 ins->sreg1 = load->dreg;
9192 MONO_ADD_INS (bblock, ins);
9194 link_bblock (cfg, bblock, end_bblock);
9195 start_new_bblock = 1;
9203 CHECK_STACK_OVF (1);
9205 token = read32 (ip + 2);
9206 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9207 MonoType *type = mono_type_create_from_typespec (image, token);
9208 token = mono_type_size (type, &ialign);
9210 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9211 CHECK_TYPELOAD (klass);
9212 mono_class_init (klass);
9213 token = mono_class_value_size (klass, &align);
9215 EMIT_NEW_ICONST (cfg, ins, token);
9220 case CEE_REFANYTYPE: {
9221 MonoInst *src_var, *src;
9227 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9229 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9230 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9231 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9241 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9246 g_error ("opcode 0x%02x not handled", *ip);
9249 if (start_new_bblock != 1)
9252 bblock->cil_length = ip - bblock->cil_code;
9253 bblock->next_bb = end_bblock;
9255 if (cfg->method == method && cfg->domainvar) {
9257 MonoInst *get_domain;
9259 cfg->cbb = init_localsbb;
9261 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9262 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9265 get_domain->dreg = alloc_preg (cfg);
9266 MONO_ADD_INS (cfg->cbb, get_domain);
9268 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9269 MONO_ADD_INS (cfg->cbb, store);
9272 if (cfg->method == method && cfg->got_var)
9273 mono_emit_load_got_addr (cfg);
9275 if (header->init_locals) {
9278 cfg->cbb = init_localsbb;
9279 cfg->ip = header->code;
9280 for (i = 0; i < header->num_locals; ++i) {
9281 MonoType *ptype = header->locals [i];
9282 int t = ptype->type;
9283 dreg = cfg->locals [i]->dreg;
9285 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9286 t = ptype->data.klass->enum_basetype->type;
9288 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9289 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9290 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9291 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9292 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9293 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9294 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9295 ins->type = STACK_R8;
9296 ins->inst_p0 = (void*)&r8_0;
9297 ins->dreg = alloc_dreg (cfg, STACK_R8);
9298 MONO_ADD_INS (init_localsbb, ins);
9299 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9300 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9301 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9302 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9304 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9311 if (cfg->method == method) {
9313 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9314 bb->region = mono_find_block_region (cfg, bb->real_offset);
9316 mono_create_spvar_for_region (cfg, bb->region);
9317 if (cfg->verbose_level > 2)
9318 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9322 g_slist_free (class_inits);
9323 dont_inline = g_list_remove (dont_inline, method);
9325 if (inline_costs < 0) {
9328 /* Method is too large */
9329 mname = mono_method_full_name (method, TRUE);
9330 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9331 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9336 if ((cfg->verbose_level > 2) && (cfg->method == method))
9337 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9339 return inline_costs;
9342 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9343 g_slist_free (class_inits);
9344 dont_inline = g_list_remove (dont_inline, method);
9348 g_slist_free (class_inits);
9349 dont_inline = g_list_remove (dont_inline, method);
9353 g_slist_free (class_inits);
9354 dont_inline = g_list_remove (dont_inline, method);
9355 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9359 g_slist_free (class_inits);
9360 dont_inline = g_list_remove (dont_inline, method);
9361 set_exception_type_from_invalid_il (cfg, method, ip);
9366 store_membase_reg_to_store_membase_imm (int opcode)
9369 case OP_STORE_MEMBASE_REG:
9370 return OP_STORE_MEMBASE_IMM;
9371 case OP_STOREI1_MEMBASE_REG:
9372 return OP_STOREI1_MEMBASE_IMM;
9373 case OP_STOREI2_MEMBASE_REG:
9374 return OP_STOREI2_MEMBASE_IMM;
9375 case OP_STOREI4_MEMBASE_REG:
9376 return OP_STOREI4_MEMBASE_IMM;
9377 case OP_STOREI8_MEMBASE_REG:
9378 return OP_STOREI8_MEMBASE_IMM;
9380 g_assert_not_reached ();
9386 #endif /* DISABLE_JIT */
9389 mono_op_to_op_imm (int opcode)
9399 return OP_IDIV_UN_IMM;
9403 return OP_IREM_UN_IMM;
9417 return OP_ISHR_UN_IMM;
9434 return OP_LSHR_UN_IMM;
9437 return OP_COMPARE_IMM;
9439 return OP_ICOMPARE_IMM;
9441 return OP_LCOMPARE_IMM;
9443 case OP_STORE_MEMBASE_REG:
9444 return OP_STORE_MEMBASE_IMM;
9445 case OP_STOREI1_MEMBASE_REG:
9446 return OP_STOREI1_MEMBASE_IMM;
9447 case OP_STOREI2_MEMBASE_REG:
9448 return OP_STOREI2_MEMBASE_IMM;
9449 case OP_STOREI4_MEMBASE_REG:
9450 return OP_STOREI4_MEMBASE_IMM;
9452 #if defined(__i386__) || defined (__x86_64__)
9454 return OP_X86_PUSH_IMM;
9455 case OP_X86_COMPARE_MEMBASE_REG:
9456 return OP_X86_COMPARE_MEMBASE_IMM;
9458 #if defined(__x86_64__)
9459 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9460 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9462 case OP_VOIDCALL_REG:
9471 return OP_LOCALLOC_IMM;
9478 ldind_to_load_membase (int opcode)
9482 return OP_LOADI1_MEMBASE;
9484 return OP_LOADU1_MEMBASE;
9486 return OP_LOADI2_MEMBASE;
9488 return OP_LOADU2_MEMBASE;
9490 return OP_LOADI4_MEMBASE;
9492 return OP_LOADU4_MEMBASE;
9494 return OP_LOAD_MEMBASE;
9496 return OP_LOAD_MEMBASE;
9498 return OP_LOADI8_MEMBASE;
9500 return OP_LOADR4_MEMBASE;
9502 return OP_LOADR8_MEMBASE;
9504 g_assert_not_reached ();
9511 stind_to_store_membase (int opcode)
9515 return OP_STOREI1_MEMBASE_REG;
9517 return OP_STOREI2_MEMBASE_REG;
9519 return OP_STOREI4_MEMBASE_REG;
9522 return OP_STORE_MEMBASE_REG;
9524 return OP_STOREI8_MEMBASE_REG;
9526 return OP_STORER4_MEMBASE_REG;
9528 return OP_STORER8_MEMBASE_REG;
9530 g_assert_not_reached ();
9537 mono_load_membase_to_load_mem (int opcode)
9539 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9540 #if defined(__i386__) || defined(__x86_64__)
9542 case OP_LOAD_MEMBASE:
9544 case OP_LOADU1_MEMBASE:
9545 return OP_LOADU1_MEM;
9546 case OP_LOADU2_MEMBASE:
9547 return OP_LOADU2_MEM;
9548 case OP_LOADI4_MEMBASE:
9549 return OP_LOADI4_MEM;
9550 case OP_LOADU4_MEMBASE:
9551 return OP_LOADU4_MEM;
9552 #if SIZEOF_VOID_P == 8
9553 case OP_LOADI8_MEMBASE:
9554 return OP_LOADI8_MEM;
9563 op_to_op_dest_membase (int store_opcode, int opcode)
9565 #if defined(__i386__)
9566 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9571 return OP_X86_ADD_MEMBASE_REG;
9573 return OP_X86_SUB_MEMBASE_REG;
9575 return OP_X86_AND_MEMBASE_REG;
9577 return OP_X86_OR_MEMBASE_REG;
9579 return OP_X86_XOR_MEMBASE_REG;
9582 return OP_X86_ADD_MEMBASE_IMM;
9585 return OP_X86_SUB_MEMBASE_IMM;
9588 return OP_X86_AND_MEMBASE_IMM;
9591 return OP_X86_OR_MEMBASE_IMM;
9594 return OP_X86_XOR_MEMBASE_IMM;
9600 #if defined(__x86_64__)
9601 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9606 return OP_X86_ADD_MEMBASE_REG;
9608 return OP_X86_SUB_MEMBASE_REG;
9610 return OP_X86_AND_MEMBASE_REG;
9612 return OP_X86_OR_MEMBASE_REG;
9614 return OP_X86_XOR_MEMBASE_REG;
9616 return OP_X86_ADD_MEMBASE_IMM;
9618 return OP_X86_SUB_MEMBASE_IMM;
9620 return OP_X86_AND_MEMBASE_IMM;
9622 return OP_X86_OR_MEMBASE_IMM;
9624 return OP_X86_XOR_MEMBASE_IMM;
9626 return OP_AMD64_ADD_MEMBASE_REG;
9628 return OP_AMD64_SUB_MEMBASE_REG;
9630 return OP_AMD64_AND_MEMBASE_REG;
9632 return OP_AMD64_OR_MEMBASE_REG;
9634 return OP_AMD64_XOR_MEMBASE_REG;
9637 return OP_AMD64_ADD_MEMBASE_IMM;
9640 return OP_AMD64_SUB_MEMBASE_IMM;
9643 return OP_AMD64_AND_MEMBASE_IMM;
9646 return OP_AMD64_OR_MEMBASE_IMM;
9649 return OP_AMD64_XOR_MEMBASE_IMM;
9659 op_to_op_store_membase (int store_opcode, int opcode)
9661 #if defined(__i386__) || defined(__x86_64__)
9664 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9665 return OP_X86_SETEQ_MEMBASE;
9667 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9668 return OP_X86_SETNE_MEMBASE;
9676 op_to_op_src1_membase (int load_opcode, int opcode)
9679 /* FIXME: This has sign extension issues */
9681 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9682 return OP_X86_COMPARE_MEMBASE8_IMM;
9685 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9690 return OP_X86_PUSH_MEMBASE;
9691 case OP_COMPARE_IMM:
9692 case OP_ICOMPARE_IMM:
9693 return OP_X86_COMPARE_MEMBASE_IMM;
9696 return OP_X86_COMPARE_MEMBASE_REG;
9701 /* FIXME: This has sign extension issues */
9703 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9704 return OP_X86_COMPARE_MEMBASE8_IMM;
9709 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9710 return OP_X86_PUSH_MEMBASE;
9712 /* FIXME: This only works for 32 bit immediates
9713 case OP_COMPARE_IMM:
9714 case OP_LCOMPARE_IMM:
9715 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9716 return OP_AMD64_COMPARE_MEMBASE_IMM;
9718 case OP_ICOMPARE_IMM:
9719 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9720 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9724 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9725 return OP_AMD64_COMPARE_MEMBASE_REG;
9728 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9729 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9738 op_to_op_src2_membase (int load_opcode, int opcode)
9741 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9747 return OP_X86_COMPARE_REG_MEMBASE;
9749 return OP_X86_ADD_REG_MEMBASE;
9751 return OP_X86_SUB_REG_MEMBASE;
9753 return OP_X86_AND_REG_MEMBASE;
9755 return OP_X86_OR_REG_MEMBASE;
9757 return OP_X86_XOR_REG_MEMBASE;
9764 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9765 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9769 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9770 return OP_AMD64_COMPARE_REG_MEMBASE;
9773 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9774 return OP_X86_ADD_REG_MEMBASE;
9776 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9777 return OP_X86_SUB_REG_MEMBASE;
9779 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9780 return OP_X86_AND_REG_MEMBASE;
9782 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9783 return OP_X86_OR_REG_MEMBASE;
9785 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9786 return OP_X86_XOR_REG_MEMBASE;
9788 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9789 return OP_AMD64_ADD_REG_MEMBASE;
9791 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9792 return OP_AMD64_SUB_REG_MEMBASE;
9794 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9795 return OP_AMD64_AND_REG_MEMBASE;
9797 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9798 return OP_AMD64_OR_REG_MEMBASE;
9800 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9801 return OP_AMD64_XOR_REG_MEMBASE;
9809 mono_op_to_op_imm_noemul (int opcode)
9812 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9817 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9825 return mono_op_to_op_imm (opcode);
9832 * mono_handle_global_vregs:
9834 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9838 mono_handle_global_vregs (MonoCompile *cfg)
9844 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9846 /* Find local vregs used in more than one bb */
9847 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9848 MonoInst *ins = bb->code;
9849 int block_num = bb->block_num;
9851 if (cfg->verbose_level > 2)
9852 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9855 for (; ins; ins = ins->next) {
9856 const char *spec = INS_INFO (ins->opcode);
9857 int regtype, regindex;
9860 if (G_UNLIKELY (cfg->verbose_level > 2))
9861 mono_print_ins (ins);
9863 g_assert (ins->opcode >= MONO_CEE_LAST);
9865 for (regindex = 0; regindex < 3; regindex ++) {
9868 if (regindex == 0) {
9869 regtype = spec [MONO_INST_DEST];
9873 } else if (regindex == 1) {
9874 regtype = spec [MONO_INST_SRC1];
9879 regtype = spec [MONO_INST_SRC2];
9885 #if SIZEOF_VOID_P == 4
9886 if (regtype == 'l') {
9888 * Since some instructions reference the original long vreg,
9889 * and some reference the two component vregs, it is quite hard
9890 * to determine when it needs to be global. So be conservative.
9892 if (!get_vreg_to_inst (cfg, vreg)) {
9893 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9895 if (cfg->verbose_level > 2)
9896 printf ("LONG VREG R%d made global.\n", vreg);
9900 * Make the component vregs volatile since the optimizations can
9901 * get confused otherwise.
9903 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9904 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9908 g_assert (vreg != -1);
9910 prev_bb = vreg_to_bb [vreg];
9912 /* 0 is a valid block num */
9913 vreg_to_bb [vreg] = block_num + 1;
9914 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9915 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9918 if (!get_vreg_to_inst (cfg, vreg)) {
9919 if (G_UNLIKELY (cfg->verbose_level > 2))
9920 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9924 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9927 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9930 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9933 g_assert_not_reached ();
9937 /* Flag as having been used in more than one bb */
9938 vreg_to_bb [vreg] = -1;
9944 /* If a variable is used in only one bblock, convert it into a local vreg */
9945 for (i = 0; i < cfg->num_varinfo; i++) {
9946 MonoInst *var = cfg->varinfo [i];
9947 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9949 switch (var->type) {
9955 #if SIZEOF_VOID_P == 8
9958 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9959 /* Enabling this screws up the fp stack on x86 */
9962 /* Arguments are implicitly global */
9963 /* Putting R4 vars into registers doesn't work currently */
9964 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
9966 * Make that the variable's liveness interval doesn't contain a call, since
9967 * that would cause the lvreg to be spilled, making the whole optimization
9970 /* This is too slow for JIT compilation */
9972 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9974 int def_index, call_index, ins_index;
9975 gboolean spilled = FALSE;
9980 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9981 const char *spec = INS_INFO (ins->opcode);
9983 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9984 def_index = ins_index;
9986 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9987 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9988 if (call_index > def_index) {
9994 if (MONO_IS_CALL (ins))
9995 call_index = ins_index;
10005 if (G_UNLIKELY (cfg->verbose_level > 2))
10006 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10007 var->flags |= MONO_INST_IS_DEAD;
10008 cfg->vreg_to_inst [var->dreg] = NULL;
10015 * Compress the varinfo and vars tables so the liveness computation is faster and
10016 * takes up less space.
10019 for (i = 0; i < cfg->num_varinfo; ++i) {
10020 MonoInst *var = cfg->varinfo [i];
10021 if (pos < i && cfg->locals_start == i)
10022 cfg->locals_start = pos;
10023 if (!(var->flags & MONO_INST_IS_DEAD)) {
10025 cfg->varinfo [pos] = cfg->varinfo [i];
10026 cfg->varinfo [pos]->inst_c0 = pos;
10027 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10028 cfg->vars [pos].idx = pos;
10029 #if SIZEOF_VOID_P == 4
10030 if (cfg->varinfo [pos]->type == STACK_I8) {
10031 /* Modify the two component vars too */
10034 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10035 var1->inst_c0 = pos;
10036 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10037 var1->inst_c0 = pos;
10044 cfg->num_varinfo = pos;
10045 if (cfg->locals_start > cfg->num_varinfo)
10046 cfg->locals_start = cfg->num_varinfo;
10050 * mono_spill_global_vars:
10052 * Generate spill code for variables which are not allocated to registers,
10053 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10054 * code is generated which could be optimized by the local optimization passes.
10057 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10059 MonoBasicBlock *bb;
10061 int orig_next_vreg;
10062 guint32 *vreg_to_lvreg;
10064 guint32 i, lvregs_len;
10065 gboolean dest_has_lvreg = FALSE;
10066 guint32 stacktypes [128];
10068 *need_local_opts = FALSE;
10070 memset (spec2, 0, sizeof (spec2));
10072 /* FIXME: Move this function to mini.c */
10073 stacktypes ['i'] = STACK_PTR;
10074 stacktypes ['l'] = STACK_I8;
10075 stacktypes ['f'] = STACK_R8;
10077 #if SIZEOF_VOID_P == 4
10078 /* Create MonoInsts for longs */
10079 for (i = 0; i < cfg->num_varinfo; i++) {
10080 MonoInst *ins = cfg->varinfo [i];
10082 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10083 switch (ins->type) {
10084 #ifdef MONO_ARCH_SOFT_FLOAT
10090 g_assert (ins->opcode == OP_REGOFFSET);
10092 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10094 tree->opcode = OP_REGOFFSET;
10095 tree->inst_basereg = ins->inst_basereg;
10096 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10098 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10100 tree->opcode = OP_REGOFFSET;
10101 tree->inst_basereg = ins->inst_basereg;
10102 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10112 /* FIXME: widening and truncation */
10115 * As an optimization, when a variable allocated to the stack is first loaded into
10116 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10117 * the variable again.
10119 orig_next_vreg = cfg->next_vreg;
10120 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10121 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10124 /* Add spill loads/stores */
10125 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10128 if (cfg->verbose_level > 2)
10129 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10131 /* Clear vreg_to_lvreg array */
10132 for (i = 0; i < lvregs_len; i++)
10133 vreg_to_lvreg [lvregs [i]] = 0;
10137 MONO_BB_FOR_EACH_INS (bb, ins) {
10138 const char *spec = INS_INFO (ins->opcode);
10139 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10140 gboolean store, no_lvreg;
10142 if (G_UNLIKELY (cfg->verbose_level > 2))
10143 mono_print_ins (ins);
10145 if (ins->opcode == OP_NOP)
10149 * We handle LDADDR here as well, since it can only be decomposed
10150 * when variable addresses are known.
10152 if (ins->opcode == OP_LDADDR) {
10153 MonoInst *var = ins->inst_p0;
10155 if (var->opcode == OP_VTARG_ADDR) {
10156 /* Happens on SPARC/S390 where vtypes are passed by reference */
10157 MonoInst *vtaddr = var->inst_left;
10158 if (vtaddr->opcode == OP_REGVAR) {
10159 ins->opcode = OP_MOVE;
10160 ins->sreg1 = vtaddr->dreg;
10162 else if (var->inst_left->opcode == OP_REGOFFSET) {
10163 ins->opcode = OP_LOAD_MEMBASE;
10164 ins->inst_basereg = vtaddr->inst_basereg;
10165 ins->inst_offset = vtaddr->inst_offset;
10169 g_assert (var->opcode == OP_REGOFFSET);
10171 ins->opcode = OP_ADD_IMM;
10172 ins->sreg1 = var->inst_basereg;
10173 ins->inst_imm = var->inst_offset;
10176 *need_local_opts = TRUE;
10177 spec = INS_INFO (ins->opcode);
10180 if (ins->opcode < MONO_CEE_LAST) {
10181 mono_print_ins (ins);
10182 g_assert_not_reached ();
10186 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10190 if (MONO_IS_STORE_MEMBASE (ins)) {
10191 tmp_reg = ins->dreg;
10192 ins->dreg = ins->sreg2;
10193 ins->sreg2 = tmp_reg;
10196 spec2 [MONO_INST_DEST] = ' ';
10197 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10198 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10200 } else if (MONO_IS_STORE_MEMINDEX (ins))
10201 g_assert_not_reached ();
10206 if (G_UNLIKELY (cfg->verbose_level > 2))
10207 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10212 regtype = spec [MONO_INST_DEST];
10213 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10216 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10217 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10218 MonoInst *store_ins;
10221 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10223 if (var->opcode == OP_REGVAR) {
10224 ins->dreg = var->dreg;
10225 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10227 * Instead of emitting a load+store, use a _membase opcode.
10229 g_assert (var->opcode == OP_REGOFFSET);
10230 if (ins->opcode == OP_MOVE) {
10233 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10234 ins->inst_basereg = var->inst_basereg;
10235 ins->inst_offset = var->inst_offset;
10238 spec = INS_INFO (ins->opcode);
10242 g_assert (var->opcode == OP_REGOFFSET);
10244 prev_dreg = ins->dreg;
10246 /* Invalidate any previous lvreg for this vreg */
10247 vreg_to_lvreg [ins->dreg] = 0;
10251 #ifdef MONO_ARCH_SOFT_FLOAT
10252 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10254 store_opcode = OP_STOREI8_MEMBASE_REG;
10258 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10260 if (regtype == 'l') {
10261 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10262 mono_bblock_insert_after_ins (bb, ins, store_ins);
10263 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10264 mono_bblock_insert_after_ins (bb, ins, store_ins);
10267 g_assert (store_opcode != OP_STOREV_MEMBASE);
10269 /* Try to fuse the store into the instruction itself */
10270 /* FIXME: Add more instructions */
10271 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10272 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10273 ins->inst_imm = ins->inst_c0;
10274 ins->inst_destbasereg = var->inst_basereg;
10275 ins->inst_offset = var->inst_offset;
10276 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10277 ins->opcode = store_opcode;
10278 ins->inst_destbasereg = var->inst_basereg;
10279 ins->inst_offset = var->inst_offset;
10283 tmp_reg = ins->dreg;
10284 ins->dreg = ins->sreg2;
10285 ins->sreg2 = tmp_reg;
10288 spec2 [MONO_INST_DEST] = ' ';
10289 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10290 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10292 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10293 // FIXME: The backends expect the base reg to be in inst_basereg
10294 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10296 ins->inst_basereg = var->inst_basereg;
10297 ins->inst_offset = var->inst_offset;
10298 spec = INS_INFO (ins->opcode);
10300 /* printf ("INS: "); mono_print_ins (ins); */
10301 /* Create a store instruction */
10302 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10304 /* Insert it after the instruction */
10305 mono_bblock_insert_after_ins (bb, ins, store_ins);
10308 * We can't assign ins->dreg to var->dreg here, since the
10309 * sregs could use it. So set a flag, and do it after
10312 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10313 dest_has_lvreg = TRUE;
10322 for (srcindex = 0; srcindex < 2; ++srcindex) {
10323 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10324 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10326 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10327 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10328 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10329 MonoInst *load_ins;
10330 guint32 load_opcode;
10332 if (var->opcode == OP_REGVAR) {
10334 ins->sreg1 = var->dreg;
10336 ins->sreg2 = var->dreg;
10340 g_assert (var->opcode == OP_REGOFFSET);
10342 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10344 g_assert (load_opcode != OP_LOADV_MEMBASE);
10346 if (vreg_to_lvreg [sreg]) {
10347 /* The variable is already loaded to an lvreg */
10348 if (G_UNLIKELY (cfg->verbose_level > 2))
10349 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10351 ins->sreg1 = vreg_to_lvreg [sreg];
10353 ins->sreg2 = vreg_to_lvreg [sreg];
10357 /* Try to fuse the load into the instruction */
10358 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10359 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10360 ins->inst_basereg = var->inst_basereg;
10361 ins->inst_offset = var->inst_offset;
10362 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10363 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10364 ins->sreg2 = var->inst_basereg;
10365 ins->inst_offset = var->inst_offset;
10367 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10368 ins->opcode = OP_NOP;
10371 //printf ("%d ", srcindex); mono_print_ins (ins);
10373 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10375 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10376 if (var->dreg == prev_dreg) {
10378 * sreg refers to the value loaded by the load
10379 * emitted below, but we need to use ins->dreg
10380 * since it refers to the store emitted earlier.
10384 vreg_to_lvreg [var->dreg] = sreg;
10385 g_assert (lvregs_len < 1024);
10386 lvregs [lvregs_len ++] = var->dreg;
10395 if (regtype == 'l') {
10396 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10397 mono_bblock_insert_before_ins (bb, ins, load_ins);
10398 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10399 mono_bblock_insert_before_ins (bb, ins, load_ins);
10402 #if SIZEOF_VOID_P == 4
10403 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10405 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10406 mono_bblock_insert_before_ins (bb, ins, load_ins);
10412 if (dest_has_lvreg) {
10413 vreg_to_lvreg [prev_dreg] = ins->dreg;
10414 g_assert (lvregs_len < 1024);
10415 lvregs [lvregs_len ++] = prev_dreg;
10416 dest_has_lvreg = FALSE;
10420 tmp_reg = ins->dreg;
10421 ins->dreg = ins->sreg2;
10422 ins->sreg2 = tmp_reg;
10425 if (MONO_IS_CALL (ins)) {
10426 /* Clear vreg_to_lvreg array */
10427 for (i = 0; i < lvregs_len; i++)
10428 vreg_to_lvreg [lvregs [i]] = 0;
10432 if (cfg->verbose_level > 2)
10433 mono_print_ins_index (1, ins);
10440 * - use 'iadd' instead of 'int_add'
10441 * - handling ovf opcodes: decompose in method_to_ir.
10442 * - unify iregs/fregs
10443 * -> partly done, the missing parts are:
10444 * - a more complete unification would involve unifying the hregs as well, so
10445 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10446 * would no longer map to the machine hregs, so the code generators would need to
10447 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10448 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10449 * fp/non-fp branches speeds it up by about 15%.
10450 * - use sext/zext opcodes instead of shifts
10452 * - get rid of TEMPLOADs if possible and use vregs instead
10453 * - clean up usage of OP_P/OP_ opcodes
10454 * - cleanup usage of DUMMY_USE
10455 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10457 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10458 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10459 * - make sure handle_stack_args () is called before the branch is emitted
10460 * - when the new IR is done, get rid of all unused stuff
10461 * - COMPARE/BEQ as separate instructions or unify them ?
10462 * - keeping them separate allows specialized compare instructions like
10463 * compare_imm, compare_membase
10464 * - most back ends unify fp compare+branch, fp compare+ceq
10465 * - integrate mono_save_args into inline_method
10466 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10467 * - handle long shift opts on 32 bit platforms somehow: they require
10468 * 3 sregs (2 for arg1 and 1 for arg2)
10469 * - make byref a 'normal' type.
10470 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10471 * variable if needed.
10472 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10473 * like inline_method.
10474 * - remove inlining restrictions
10475 * - fix LNEG and enable cfold of INEG
10476 * - generalize x86 optimizations like ldelema as a peephole optimization
10477 * - add store_mem_imm for amd64
10478 * - optimize the loading of the interruption flag in the managed->native wrappers
10479 * - avoid special handling of OP_NOP in passes
10480 * - move code inserting instructions into one function/macro.
10481 * - try a coalescing phase after liveness analysis
10482 * - add float -> vreg conversion + local optimizations on !x86
10483 * - figure out how to handle decomposed branches during optimizations, ie.
10484 * compare+branch, op_jump_table+op_br etc.
10485 * - promote RuntimeXHandles to vregs
10486 * - vtype cleanups:
10487 * - add a NEW_VARLOADA_VREG macro
10488 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10489 * accessing vtype fields.
10490 * - get rid of I8CONST on 64 bit platforms
10491 * - dealing with the increase in code size due to branches created during opcode
10493 * - use extended basic blocks
10494 * - all parts of the JIT
10495 * - handle_global_vregs () && local regalloc
10496 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10497 * - sources of increase in code size:
10500 * - isinst and castclass
10501 * - lvregs not allocated to global registers even if used multiple times
10502 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10504 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10505 * - add all micro optimizations from the old JIT
10506 * - put tree optimizations into the deadce pass
10507 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10508 * specific function.
10509 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10510 * fcompare + branchCC.
10511 * - create a helper function for allocating a stack slot, taking into account
10512 * MONO_CFG_HAS_SPILLUP.
10513 * - merge new GC changes in mini.c.
10515 * - merge the ia64 switch changes.
10516 * - merge the mips conditional changes.
10517 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10518 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10519 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10520 * - optimize mono_regstate2_alloc_int/float.
10521 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10522 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10523 * parts of the tree could be separated by other instructions, killing the tree
10524 * arguments, or stores killing loads etc. Also, should we fold loads into other
10525 * instructions if the result of the load is used multiple times ?
10526 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10527 * - LAST MERGE: 108395.
10528 * - when returning vtypes in registers, generate IR and append it to the end of the
10529 * last bb instead of doing it in the epilog.
10530 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10531 * ones in inssel.h.
10532 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10540 - When to decompose opcodes:
10541 - earlier: this makes some optimizations hard to implement, since the low level IR
10542 no longer contains the neccessary information. But it is easier to do.
10543 - later: harder to implement, enables more optimizations.
10544 - Branches inside bblocks:
10545 - created when decomposing complex opcodes.
10546 - branches to another bblock: harmless, but not tracked by the branch
10547 optimizations, so need to branch to a label at the start of the bblock.
10548 - branches to inside the same bblock: very problematic, trips up the local
10549 reg allocator. Can be fixed by spitting the current bblock, but that is a
10550 complex operation, since some local vregs can become global vregs etc.
10551 - Local/global vregs:
10552 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10553 local register allocator.
10554 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10555 structure, created by mono_create_var (). Assigned to hregs or the stack by
10556 the global register allocator.
10557 - When to do optimizations like alu->alu_imm:
10558 - earlier -> saves work later on since the IR will be smaller/simpler
10559 - later -> can work on more instructions
10560 - Handling of valuetypes:
10561 - When a vtype is pushed on the stack, a new temporary is created, an
10562 instruction computing its address (LDADDR) is emitted and pushed on
10563 the stack. Need to optimize cases when the vtype is used immediately as in
10564 argument passing, stloc etc.
10565 - Instead of the to_end stuff in the old JIT, simply call the function handling
10566 the values on the stack before emitting the last instruction of the bb.
10569 #endif /* DISABLE_JIT */