2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
57 #define BRANCH_COST 100
58 #define INLINE_LENGTH_LIMIT 20
59 #define INLINE_FAILURE do {\
60 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
63 #define CHECK_CFG_EXCEPTION do {\
64 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
67 #define METHOD_ACCESS_FAILURE do { \
68 char *method_fname = mono_method_full_name (method, TRUE); \
69 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
70 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
71 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
72 g_free (method_fname); \
73 g_free (cil_method_fname); \
74 goto exception_exit; \
76 #define FIELD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *field_fname = mono_field_full_name (field); \
79 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
80 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (field_fname); \
83 goto exception_exit; \
85 #define GENERIC_SHARING_FAILURE(opcode) do { \
86 if (cfg->generic_sharing_context) { \
87 if (cfg->verbose_level > 2) \
88 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
89 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
90 goto exception_exit; \
94 /* Determine whenever 'ins' represents a load of the 'this' argument */
95 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
97 static int ldind_to_load_membase (int opcode);
98 static int stind_to_store_membase (int opcode);
100 int mono_op_to_op_imm (int opcode);
101 int mono_op_to_op_imm_noemul (int opcode);
103 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
104 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
105 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
107 /* helper methods signature */
108 extern MonoMethodSignature *helper_sig_class_init_trampoline;
109 extern MonoMethodSignature *helper_sig_domain_get;
110 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
111 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
112 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
115 * Instruction metadata
120 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
126 #if SIZEOF_VOID_P == 8
131 /* keep in sync with the enum in mini.h */
134 #include "mini-ops.h"
138 extern GHashTable *jit_icall_name_hash;
140 #define MONO_INIT_VARINFO(vi,id) do { \
141 (vi)->range.first_use.pos.bid = 0xffff; \
147 mono_alloc_ireg (MonoCompile *cfg)
149 return alloc_ireg (cfg);
153 mono_alloc_freg (MonoCompile *cfg)
155 return alloc_freg (cfg);
159 mono_alloc_preg (MonoCompile *cfg)
161 return alloc_preg (cfg);
165 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
167 return alloc_dreg (cfg, stack_type);
171 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 switch (type->type) {
180 case MONO_TYPE_BOOLEAN:
192 case MONO_TYPE_FNPTR:
194 case MONO_TYPE_CLASS:
195 case MONO_TYPE_STRING:
196 case MONO_TYPE_OBJECT:
197 case MONO_TYPE_SZARRAY:
198 case MONO_TYPE_ARRAY:
202 #if SIZEOF_VOID_P == 8
211 case MONO_TYPE_VALUETYPE:
212 if (type->data.klass->enumtype) {
213 type = type->data.klass->enum_basetype;
216 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
219 case MONO_TYPE_TYPEDBYREF:
221 case MONO_TYPE_GENERICINST:
222 type = &type->data.generic_class->container_class->byval_arg;
226 g_assert (cfg->generic_sharing_context);
229 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 mono_print_bb (MonoBasicBlock *bb, const char *msg)
240 printf ("\n%s %d: [IN: ", msg, bb->block_num);
241 for (i = 0; i < bb->in_count; ++i)
242 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
244 for (i = 0; i < bb->out_count; ++i)
245 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
247 for (tree = bb->code; tree; tree = tree->next)
248 mono_print_ins_index (-1, tree);
252 * Can't put this at the beginning, since other files reference stuff from this
257 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
259 #define GET_BBLOCK(cfg,tblock,ip) do { \
260 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
262 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
263 NEW_BBLOCK (cfg, (tblock)); \
264 (tblock)->cil_code = (ip); \
265 ADD_BBLOCK (cfg, (tblock)); \
269 #if defined(__i386__) || defined(__x86_64__)
270 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
271 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
272 (dest)->dreg = alloc_preg ((cfg)); \
273 (dest)->sreg1 = (sr1); \
274 (dest)->sreg2 = (sr2); \
275 (dest)->inst_imm = (imm); \
276 (dest)->backend.shift_amount = (shift); \
277 MONO_ADD_INS ((cfg)->cbb, (dest)); \
281 #if SIZEOF_VOID_P == 8
282 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
283 /* FIXME: Need to add many more cases */ \
284 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
286 int dr = alloc_preg (cfg); \
287 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
288 (ins)->sreg2 = widen->dreg; \
292 #define ADD_WIDEN_OP(ins, arg1, arg2)
295 #define ADD_BINOP(op) do { \
296 MONO_INST_NEW (cfg, ins, (op)); \
298 ins->sreg1 = sp [0]->dreg; \
299 ins->sreg2 = sp [1]->dreg; \
300 type_from_op (ins, sp [0], sp [1]); \
302 /* Have to insert a widening op */ \
303 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
304 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
305 MONO_ADD_INS ((cfg)->cbb, (ins)); \
307 mono_decompose_opcode ((cfg), (ins)); \
310 #define ADD_UNOP(op) do { \
311 MONO_INST_NEW (cfg, ins, (op)); \
313 ins->sreg1 = sp [0]->dreg; \
314 type_from_op (ins, sp [0], NULL); \
316 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
317 MONO_ADD_INS ((cfg)->cbb, (ins)); \
319 mono_decompose_opcode (cfg, ins); \
322 #define ADD_BINCOND(next_block) do { \
325 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
326 cmp->sreg1 = sp [0]->dreg; \
327 cmp->sreg2 = sp [1]->dreg; \
328 type_from_op (cmp, sp [0], sp [1]); \
330 type_from_op (ins, sp [0], sp [1]); \
331 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
332 GET_BBLOCK (cfg, tblock, target); \
333 link_bblock (cfg, bblock, tblock); \
334 ins->inst_true_bb = tblock; \
335 if ((next_block)) { \
336 link_bblock (cfg, bblock, (next_block)); \
337 ins->inst_false_bb = (next_block); \
338 start_new_bblock = 1; \
340 GET_BBLOCK (cfg, tblock, ip); \
341 link_bblock (cfg, bblock, tblock); \
342 ins->inst_false_bb = tblock; \
343 start_new_bblock = 2; \
345 if (sp != stack_start) { \
346 handle_stack_args (cfg, stack_start, sp - stack_start); \
347 CHECK_UNVERIFIABLE (cfg); \
349 MONO_ADD_INS (bblock, cmp); \
350 MONO_ADD_INS (bblock, ins); \
354 * link_bblock: Links two basic blocks
356 * links two basic blocks in the control flow graph, the 'from'
357 * argument is the starting block and the 'to' argument is the block
358 * the control flow ends to after 'from'.
361 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
363 MonoBasicBlock **newa;
367 if (from->cil_code) {
369 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
371 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
374 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
376 printf ("edge from entry to exit\n");
381 for (i = 0; i < from->out_count; ++i) {
382 if (to == from->out_bb [i]) {
388 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
389 for (i = 0; i < from->out_count; ++i) {
390 newa [i] = from->out_bb [i];
398 for (i = 0; i < to->in_count; ++i) {
399 if (from == to->in_bb [i]) {
405 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
406 for (i = 0; i < to->in_count; ++i) {
407 newa [i] = to->in_bb [i];
416 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
418 link_bblock (cfg, from, to);
422 * mono_find_block_region:
424 * We mark each basic block with a region ID. We use that to avoid BB
425 * optimizations when blocks are in different regions.
428 * A region token that encodes where this region is, and information
429 * about the clause owner for this block.
431 * The region encodes the try/catch/filter clause that owns this block
432 * as well as the type. -1 is a special value that represents a block
433 * that is in none of try/catch/filter.
436 mono_find_block_region (MonoCompile *cfg, int offset)
438 MonoMethod *method = cfg->method;
439 MonoMethodHeader *header = mono_method_get_header (method);
440 MonoExceptionClause *clause;
443 /* first search for handlers and filters */
444 for (i = 0; i < header->num_clauses; ++i) {
445 clause = &header->clauses [i];
446 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
447 (offset < (clause->handler_offset)))
448 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
450 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
452 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
453 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
454 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
456 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
460 /* search the try blocks */
461 for (i = 0; i < header->num_clauses; ++i) {
462 clause = &header->clauses [i];
463 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
464 return ((i + 1) << 8) | clause->flags;
471 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
473 MonoMethod *method = cfg->method;
474 MonoMethodHeader *header = mono_method_get_header (method);
475 MonoExceptionClause *clause;
476 MonoBasicBlock *handler;
480 for (i = 0; i < header->num_clauses; ++i) {
481 clause = &header->clauses [i];
482 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
483 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
484 if (clause->flags == type) {
485 handler = cfg->cil_offset_to_bb [clause->handler_offset];
487 res = g_list_append (res, handler);
495 mono_create_spvar_for_region (MonoCompile *cfg, int region)
499 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
504 /* prevent it from being register allocated */
505 var->flags |= MONO_INST_INDIRECT;
507 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
511 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
513 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
517 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
521 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
525 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 * Returns the type used in the eval stack when @type is loaded.
536 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
539 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
543 inst->klass = klass = mono_class_from_mono_type (type);
545 inst->type = STACK_MP;
550 switch (type->type) {
552 inst->type = STACK_INV;
556 case MONO_TYPE_BOOLEAN:
562 inst->type = STACK_I4;
567 case MONO_TYPE_FNPTR:
568 inst->type = STACK_PTR;
570 case MONO_TYPE_CLASS:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_OBJECT:
573 case MONO_TYPE_SZARRAY:
574 case MONO_TYPE_ARRAY:
575 inst->type = STACK_OBJ;
579 inst->type = STACK_I8;
583 inst->type = STACK_R8;
585 case MONO_TYPE_VALUETYPE:
586 if (type->data.klass->enumtype) {
587 type = type->data.klass->enum_basetype;
591 inst->type = STACK_VTYPE;
594 case MONO_TYPE_TYPEDBYREF:
595 inst->klass = mono_defaults.typed_reference_class;
596 inst->type = STACK_VTYPE;
598 case MONO_TYPE_GENERICINST:
599 type = &type->data.generic_class->container_class->byval_arg;
602 case MONO_TYPE_MVAR :
603 /* FIXME: all the arguments must be references for now,
604 * later look inside cfg and see if the arg num is
607 g_assert (cfg->generic_sharing_context);
608 inst->type = STACK_OBJ;
611 g_error ("unknown type 0x%02x in eval stack type", type->type);
616 * The following tables are used to quickly validate the IL code in type_from_op ().
619 bin_num_table [STACK_MAX] [STACK_MAX] = {
620 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
621 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
622 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
623 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
632 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
635 /* reduce the size of this table */
637 bin_int_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 bin_comp_table [STACK_MAX] [STACK_MAX] = {
650 /* Inv i L p F & O vt */
652 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
653 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
654 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
655 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
656 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
657 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
658 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
661 /* reduce the size of this table */
663 shift_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 * Tables to map from the non-specific opcode to the matching
676 * type-specific opcode.
678 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
680 binops_op_map [STACK_MAX] = {
681 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
684 /* handles from CEE_NEG to CEE_CONV_U8 */
686 unops_op_map [STACK_MAX] = {
687 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
690 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
692 ovfops_op_map [STACK_MAX] = {
693 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
696 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
698 ovf2ops_op_map [STACK_MAX] = {
699 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
702 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
704 ovf3ops_op_map [STACK_MAX] = {
705 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
708 /* handles from CEE_BEQ to CEE_BLT_UN */
710 beqops_op_map [STACK_MAX] = {
711 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
714 /* handles from CEE_CEQ to CEE_CLT_UN */
716 ceqops_op_map [STACK_MAX] = {
717 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
721 * Sets ins->type (the type on the eval stack) according to the
722 * type of the opcode and the arguments to it.
723 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
725 * FIXME: this function sets ins->type unconditionally in some cases, but
726 * it should set it to invalid for some types (a conv.x on an object)
729 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
731 switch (ins->opcode) {
738 /* FIXME: check unverifiable args for STACK_MP */
739 ins->type = bin_num_table [src1->type] [src2->type];
740 ins->opcode += binops_op_map [ins->type];
747 ins->type = bin_int_table [src1->type] [src2->type];
748 ins->opcode += binops_op_map [ins->type];
753 ins->type = shift_table [src1->type] [src2->type];
754 ins->opcode += binops_op_map [ins->type];
759 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
760 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
761 ins->opcode = OP_LCOMPARE;
762 else if (src1->type == STACK_R8)
763 ins->opcode = OP_FCOMPARE;
765 ins->opcode = OP_ICOMPARE;
767 case OP_ICOMPARE_IMM:
768 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
769 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
770 ins->opcode = OP_LCOMPARE_IMM;
782 ins->opcode += beqops_op_map [src1->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 ins->opcode += ceqops_op_map [src1->type];
792 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
793 ins->opcode += ceqops_op_map [src1->type];
797 ins->type = neg_table [src1->type];
798 ins->opcode += unops_op_map [ins->type];
801 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
802 ins->type = src1->type;
804 ins->type = STACK_INV;
805 ins->opcode += unops_op_map [ins->type];
811 ins->type = STACK_I4;
812 ins->opcode += unops_op_map [src1->type];
815 ins->type = STACK_R8;
816 switch (src1->type) {
819 ins->opcode = OP_ICONV_TO_R_UN;
822 ins->opcode = OP_LCONV_TO_R_UN;
826 case CEE_CONV_OVF_I1:
827 case CEE_CONV_OVF_U1:
828 case CEE_CONV_OVF_I2:
829 case CEE_CONV_OVF_U2:
830 case CEE_CONV_OVF_I4:
831 case CEE_CONV_OVF_U4:
832 ins->type = STACK_I4;
833 ins->opcode += ovf3ops_op_map [src1->type];
835 case CEE_CONV_OVF_I_UN:
836 case CEE_CONV_OVF_U_UN:
837 ins->type = STACK_PTR;
838 ins->opcode += ovf2ops_op_map [src1->type];
840 case CEE_CONV_OVF_I1_UN:
841 case CEE_CONV_OVF_I2_UN:
842 case CEE_CONV_OVF_I4_UN:
843 case CEE_CONV_OVF_U1_UN:
844 case CEE_CONV_OVF_U2_UN:
845 case CEE_CONV_OVF_U4_UN:
846 ins->type = STACK_I4;
847 ins->opcode += ovf2ops_op_map [src1->type];
850 ins->type = STACK_PTR;
851 switch (src1->type) {
853 ins->opcode = OP_ICONV_TO_U;
857 #if SIZEOF_VOID_P == 8
858 ins->opcode = OP_LCONV_TO_U;
860 ins->opcode = OP_MOVE;
864 ins->opcode = OP_LCONV_TO_U;
867 ins->opcode = OP_FCONV_TO_U;
873 ins->type = STACK_I8;
874 ins->opcode += unops_op_map [src1->type];
876 case CEE_CONV_OVF_I8:
877 case CEE_CONV_OVF_U8:
878 ins->type = STACK_I8;
879 ins->opcode += ovf3ops_op_map [src1->type];
881 case CEE_CONV_OVF_U8_UN:
882 case CEE_CONV_OVF_I8_UN:
883 ins->type = STACK_I8;
884 ins->opcode += ovf2ops_op_map [src1->type];
888 ins->type = STACK_R8;
889 ins->opcode += unops_op_map [src1->type];
892 ins->type = STACK_R8;
896 ins->type = STACK_I4;
897 ins->opcode += ovfops_op_map [src1->type];
902 ins->type = STACK_PTR;
903 ins->opcode += ovfops_op_map [src1->type];
911 ins->type = bin_num_table [src1->type] [src2->type];
912 ins->opcode += ovfops_op_map [src1->type];
913 if (ins->type == STACK_R8)
914 ins->type = STACK_INV;
916 case OP_LOAD_MEMBASE:
917 ins->type = STACK_PTR;
919 case OP_LOADI1_MEMBASE:
920 case OP_LOADU1_MEMBASE:
921 case OP_LOADI2_MEMBASE:
922 case OP_LOADU2_MEMBASE:
923 case OP_LOADI4_MEMBASE:
924 case OP_LOADU4_MEMBASE:
925 ins->type = STACK_PTR;
927 case OP_LOADI8_MEMBASE:
928 ins->type = STACK_I8;
930 case OP_LOADR4_MEMBASE:
931 case OP_LOADR8_MEMBASE:
932 ins->type = STACK_R8;
935 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
939 if (ins->type == STACK_MP)
940 ins->klass = mono_defaults.object_class;
945 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
951 param_table [STACK_MAX] [STACK_MAX] = {
956 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
960 switch (args->type) {
970 for (i = 0; i < sig->param_count; ++i) {
971 switch (args [i].type) {
975 if (!sig->params [i]->byref)
979 if (sig->params [i]->byref)
981 switch (sig->params [i]->type) {
982 case MONO_TYPE_CLASS:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_OBJECT:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
993 if (sig->params [i]->byref)
995 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1004 /*if (!param_table [args [i].type] [sig->params [i]->type])
1012 * When we need a pointer to the current domain many times in a method, we
1013 * call mono_domain_get() once and we store the result in a local variable.
1014 * This function returns the variable that represents the MonoDomain*.
1016 inline static MonoInst *
1017 mono_get_domainvar (MonoCompile *cfg)
1019 if (!cfg->domainvar)
1020 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1021 return cfg->domainvar;
1025 * The got_var contains the address of the Global Offset Table when AOT
1028 inline static MonoInst *
1029 mono_get_got_var (MonoCompile *cfg)
1031 #ifdef MONO_ARCH_NEED_GOT_VAR
1032 if (!cfg->compile_aot)
1034 if (!cfg->got_var) {
1035 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1037 return cfg->got_var;
1044 mono_get_vtable_var (MonoCompile *cfg)
1046 g_assert (cfg->generic_sharing_context);
1048 if (!cfg->rgctx_var) {
1049 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 /* force the var to be stack allocated */
1051 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1054 return cfg->rgctx_var;
1058 type_from_stack_type (MonoInst *ins) {
1059 switch (ins->type) {
1060 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1061 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1062 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1063 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1065 return &ins->klass->this_arg;
1066 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1067 case STACK_VTYPE: return &ins->klass->byval_arg;
1069 g_error ("stack type %d to monotype not handled\n", ins->type);
1074 static G_GNUC_UNUSED int
1075 type_to_stack_type (MonoType *t)
1077 switch (mono_type_get_underlying_type (t)->type) {
1080 case MONO_TYPE_BOOLEAN:
1083 case MONO_TYPE_CHAR:
1090 case MONO_TYPE_FNPTR:
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1104 case MONO_TYPE_VALUETYPE:
1105 case MONO_TYPE_TYPEDBYREF:
1107 case MONO_TYPE_GENERICINST:
1108 if (mono_type_generic_inst_is_valuetype (t))
1114 g_assert_not_reached ();
1121 array_access_to_klass (int opcode)
1125 return mono_defaults.byte_class;
1127 return mono_defaults.uint16_class;
1130 return mono_defaults.int_class;
1133 return mono_defaults.sbyte_class;
1136 return mono_defaults.int16_class;
1139 return mono_defaults.int32_class;
1141 return mono_defaults.uint32_class;
1144 return mono_defaults.int64_class;
1147 return mono_defaults.single_class;
1150 return mono_defaults.double_class;
1151 case CEE_LDELEM_REF:
1152 case CEE_STELEM_REF:
1153 return mono_defaults.object_class;
1155 g_assert_not_reached ();
1161 * We try to share variables when possible
1164 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1169 /* inlining can result in deeper stacks */
1170 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1171 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1173 pos = ins->type - 1 + slot * STACK_MAX;
1175 switch (ins->type) {
1182 if ((vnum = cfg->intvars [pos]))
1183 return cfg->varinfo [vnum];
1184 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1185 cfg->intvars [pos] = res->inst_c0;
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1197 * Don't use this if a generic_context is set, since that means AOT can't
1198 * look up the method using just the image+token.
1199 * table == 0 means this is a reference made from a wrapper.
1201 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1202 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1203 jump_info_token->image = image;
1204 jump_info_token->token = token;
1205 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1210 * This function is called to handle items that are left on the evaluation stack
1211 * at basic block boundaries. What happens is that we save the values to local variables
1212 * and we reload them later when first entering the target basic block (with the
1213 * handle_loaded_temps () function).
1214 * A single joint point will use the same variables (stored in the array bb->out_stack or
1215 * bb->in_stack, if the basic block is before or after the joint point).
1217 * This function needs to be called _before_ emitting the last instruction of
1218 * the bb (i.e. before emitting a branch).
1219 * If the stack merge fails at a join point, cfg->unverifiable is set.
1222 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1225 MonoBasicBlock *bb = cfg->cbb;
1226 MonoBasicBlock *outb;
1227 MonoInst *inst, **locals;
1232 if (cfg->verbose_level > 3)
1233 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1234 if (!bb->out_scount) {
1235 bb->out_scount = count;
1236 //printf ("bblock %d has out:", bb->block_num);
1238 for (i = 0; i < bb->out_count; ++i) {
1239 outb = bb->out_bb [i];
1240 /* exception handlers are linked, but they should not be considered for stack args */
1241 if (outb->flags & BB_EXCEPTION_HANDLER)
1243 //printf (" %d", outb->block_num);
1244 if (outb->in_stack) {
1246 bb->out_stack = outb->in_stack;
1252 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1253 for (i = 0; i < count; ++i) {
1255 * try to reuse temps already allocated for this purpouse, if they occupy the same
1256 * stack slot and if they are of the same type.
1257 * This won't cause conflicts since if 'local' is used to
1258 * store one of the values in the in_stack of a bblock, then
1259 * the same variable will be used for the same outgoing stack
1261 * This doesn't work when inlining methods, since the bblocks
1262 * in the inlined methods do not inherit their in_stack from
1263 * the bblock they are inlined to. See bug #58863 for an
1266 if (cfg->inlined_method)
1267 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1269 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1274 for (i = 0; i < bb->out_count; ++i) {
1275 outb = bb->out_bb [i];
1276 /* exception handlers are linked, but they should not be considered for stack args */
1277 if (outb->flags & BB_EXCEPTION_HANDLER)
1279 if (outb->in_scount) {
1280 if (outb->in_scount != bb->out_scount) {
1281 cfg->unverifiable = TRUE;
1284 continue; /* check they are the same locals */
1286 outb->in_scount = count;
1287 outb->in_stack = bb->out_stack;
1290 locals = bb->out_stack;
1292 for (i = 0; i < count; ++i) {
1293 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1294 inst->cil_code = sp [i]->cil_code;
1295 sp [i] = locals [i];
1296 if (cfg->verbose_level > 3)
1297 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1301 * It is possible that the out bblocks already have in_stack assigned, and
1302 * the in_stacks differ. In this case, we will store to all the different
1309 /* Find a bblock which has a different in_stack */
1311 while (bindex < bb->out_count) {
1312 outb = bb->out_bb [bindex];
1313 /* exception handlers are linked, but they should not be considered for stack args */
1314 if (outb->flags & BB_EXCEPTION_HANDLER) {
1318 if (outb->in_stack != locals) {
1319 for (i = 0; i < count; ++i) {
1320 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1321 inst->cil_code = sp [i]->cil_code;
1322 sp [i] = locals [i];
1323 if (cfg->verbose_level > 3)
1324 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1326 locals = outb->in_stack;
1335 /* Emit code which loads interface_offsets [klass->interface_id]
1336 * The array is stored in memory before vtable.
1339 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1341 if (cfg->compile_aot) {
1342 int ioffset_reg = alloc_preg (cfg);
1343 int iid_reg = alloc_preg (cfg);
1345 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1346 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1355 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1356 * stored in "klass_reg" implements the interface "klass".
1359 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1361 int ibitmap_reg = alloc_preg (cfg);
1362 int ibitmap_byte_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1366 if (cfg->compile_aot) {
1367 int iid_reg = alloc_preg (cfg);
1368 int shifted_iid_reg = alloc_preg (cfg);
1369 int ibitmap_byte_address_reg = alloc_preg (cfg);
1370 int masked_iid_reg = alloc_preg (cfg);
1371 int iid_one_bit_reg = alloc_preg (cfg);
1372 int iid_bit_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1378 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1388 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1389 * stored in "vtable_reg" implements the interface "klass".
1392 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1394 int ibitmap_reg = alloc_preg (cfg);
1395 int ibitmap_byte_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1399 if (cfg->compile_aot) {
1400 int iid_reg = alloc_preg (cfg);
1401 int shifted_iid_reg = alloc_preg (cfg);
1402 int ibitmap_byte_address_reg = alloc_preg (cfg);
1403 int masked_iid_reg = alloc_preg (cfg);
1404 int iid_one_bit_reg = alloc_preg (cfg);
1405 int iid_bit_reg = alloc_preg (cfg);
1406 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1411 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which checks whenever the interface id of @klass is smaller than
1422 * than the value given by max_iid_reg.
1425 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1426 MonoBasicBlock *false_target)
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1438 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1441 /* Same as above, but obtains max_iid from a vtable */
1443 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 int max_iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1449 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1452 /* Same as above, but obtains max_iid from a klass */
1454 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 int max_iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1460 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1464 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1466 int idepth_reg = alloc_preg (cfg);
1467 int stypes_reg = alloc_preg (cfg);
1468 int stype = alloc_preg (cfg);
1470 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1477 if (cfg->compile_aot) {
1478 int const_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1482 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1488 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int intf_reg = alloc_preg (cfg);
1492 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1493 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1502 * Variant of the above that takes a register to the class, not the vtable.
1505 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1507 int intf_bit_reg = alloc_preg (cfg);
1509 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1510 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1519 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1521 if (cfg->compile_aot) {
1522 int const_reg = alloc_preg (cfg);
1523 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1528 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1532 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1534 if (cfg->compile_aot) {
1535 int const_reg = alloc_preg (cfg);
1536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1545 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1548 int rank_reg = alloc_preg (cfg);
1549 int eclass_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1556 if (klass->cast_class == mono_defaults.object_class) {
1557 int parent_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1559 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1560 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1563 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1564 } else if (klass->cast_class == mono_defaults.enum_class) {
1565 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1569 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1570 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1574 /* Check that the object is a vector too */
1575 int bounds_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1581 int idepth_reg = alloc_preg (cfg);
1582 int stypes_reg = alloc_preg (cfg);
1583 int stype = alloc_preg (cfg);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1592 mini_emit_class_check (cfg, stype, klass);
1597 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1601 g_assert (val == 0);
1606 if ((size <= 4) && (size <= align)) {
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1612 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1617 #if SIZEOF_VOID_P == 8
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1625 val_reg = alloc_preg (cfg);
1627 if (sizeof (gpointer) == 8)
1628 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1630 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1633 /* This could be optimized further if neccesary */
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1642 #if !NO_UNALIGNED_ACCESS
1643 if (sizeof (gpointer) == 8) {
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1674 #endif /* DISABLE_JIT */
1677 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1685 /* This could be optimized further if neccesary */
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1696 #if !NO_UNALIGNED_ACCESS
1697 if (sizeof (gpointer) == 8) {
1699 cur_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1738 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1741 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1744 type = mini_get_basic_type_from_generic (gsctx, type);
1745 switch (type->type) {
1746 case MONO_TYPE_VOID:
1747 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1750 case MONO_TYPE_BOOLEAN:
1753 case MONO_TYPE_CHAR:
1756 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1760 case MONO_TYPE_FNPTR:
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 case MONO_TYPE_CLASS:
1763 case MONO_TYPE_STRING:
1764 case MONO_TYPE_OBJECT:
1765 case MONO_TYPE_SZARRAY:
1766 case MONO_TYPE_ARRAY:
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1773 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1774 case MONO_TYPE_VALUETYPE:
1775 if (type->data.klass->enumtype) {
1776 type = type->data.klass->enum_basetype;
1779 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1780 case MONO_TYPE_TYPEDBYREF:
1781 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1782 case MONO_TYPE_GENERICINST:
1783 type = &type->data.generic_class->container_class->byval_arg;
1786 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 * target_type_is_incompatible:
1793 * @cfg: MonoCompile context
1795 * Check that the item @arg on the evaluation stack can be stored
1796 * in the target type (can be a local, or field, etc).
1797 * The cfg arg can be used to check if we need verification or just
1800 * Returns: non-0 value if arg can't be stored on a target.
1803 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1805 MonoType *simple_type;
1808 if (target->byref) {
1809 /* FIXME: check that the pointed to types match */
1810 if (arg->type == STACK_MP)
1811 return arg->klass != mono_class_from_mono_type (target);
1812 if (arg->type == STACK_PTR)
1817 simple_type = mono_type_get_underlying_type (target);
1818 switch (simple_type->type) {
1819 case MONO_TYPE_VOID:
1823 case MONO_TYPE_BOOLEAN:
1826 case MONO_TYPE_CHAR:
1829 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1833 /* STACK_MP is needed when setting pinned locals */
1834 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 case MONO_TYPE_FNPTR:
1840 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1843 case MONO_TYPE_CLASS:
1844 case MONO_TYPE_STRING:
1845 case MONO_TYPE_OBJECT:
1846 case MONO_TYPE_SZARRAY:
1847 case MONO_TYPE_ARRAY:
1848 if (arg->type != STACK_OBJ)
1850 /* FIXME: check type compatibility */
1854 if (arg->type != STACK_I8)
1859 if (arg->type != STACK_R8)
1862 case MONO_TYPE_VALUETYPE:
1863 if (arg->type != STACK_VTYPE)
1865 klass = mono_class_from_mono_type (simple_type);
1866 if (klass != arg->klass)
1869 case MONO_TYPE_TYPEDBYREF:
1870 if (arg->type != STACK_VTYPE)
1872 klass = mono_class_from_mono_type (simple_type);
1873 if (klass != arg->klass)
1876 case MONO_TYPE_GENERICINST:
1877 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1878 if (arg->type != STACK_VTYPE)
1880 klass = mono_class_from_mono_type (simple_type);
1881 if (klass != arg->klass)
1885 if (arg->type != STACK_OBJ)
1887 /* FIXME: check type compatibility */
1891 case MONO_TYPE_MVAR:
1892 /* FIXME: all the arguments must be references for now,
1893 * later look inside cfg and see if the arg num is
1894 * really a reference
1896 g_assert (cfg->generic_sharing_context);
1897 if (arg->type != STACK_OBJ)
1901 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 * Prepare arguments for passing to a function call.
1908 * Return a non-zero value if the arguments can't be passed to the given
1910 * The type checks are not yet complete and some conversions may need
1911 * casts on 32 or 64 bit architectures.
1913 * FIXME: implement this using target_type_is_incompatible ()
1916 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1918 MonoType *simple_type;
1922 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1926 for (i = 0; i < sig->param_count; ++i) {
1927 if (sig->params [i]->byref) {
1928 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1932 simple_type = sig->params [i];
1933 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1935 switch (simple_type->type) {
1936 case MONO_TYPE_VOID:
1941 case MONO_TYPE_BOOLEAN:
1944 case MONO_TYPE_CHAR:
1947 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1953 case MONO_TYPE_FNPTR:
1954 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1957 case MONO_TYPE_CLASS:
1958 case MONO_TYPE_STRING:
1959 case MONO_TYPE_OBJECT:
1960 case MONO_TYPE_SZARRAY:
1961 case MONO_TYPE_ARRAY:
1962 if (args [i]->type != STACK_OBJ)
1967 if (args [i]->type != STACK_I8)
1972 if (args [i]->type != STACK_R8)
1975 case MONO_TYPE_VALUETYPE:
1976 if (simple_type->data.klass->enumtype) {
1977 simple_type = simple_type->data.klass->enum_basetype;
1980 if (args [i]->type != STACK_VTYPE)
1983 case MONO_TYPE_TYPEDBYREF:
1984 if (args [i]->type != STACK_VTYPE)
1987 case MONO_TYPE_GENERICINST:
1988 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1992 g_error ("unknown type 0x%02x in check_call_signature",
2000 callvirt_to_call (int opcode)
2005 case OP_VOIDCALLVIRT:
2014 g_assert_not_reached ();
2021 callvirt_to_call_membase (int opcode)
2025 return OP_CALL_MEMBASE;
2026 case OP_VOIDCALLVIRT:
2027 return OP_VOIDCALL_MEMBASE;
2029 return OP_FCALL_MEMBASE;
2031 return OP_LCALL_MEMBASE;
2033 return OP_VCALL_MEMBASE;
2035 g_assert_not_reached ();
2041 #ifdef MONO_ARCH_HAVE_IMT
2043 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2045 #ifdef MONO_ARCH_IMT_REG
2046 int method_reg = alloc_preg (cfg);
2049 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2050 } else if (cfg->compile_aot) {
2051 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2054 MONO_INST_NEW (cfg, ins, OP_PCONST);
2055 ins->inst_p0 = call->method;
2056 ins->dreg = method_reg;
2057 MONO_ADD_INS (cfg->cbb, ins);
2060 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2062 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 static MonoJumpInfo *
2068 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2070 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2074 ji->data.target = target;
2079 inline static MonoInst*
2080 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2082 inline static MonoCallInst *
2083 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2084 MonoInst **args, int calli, int virtual)
2087 #ifdef MONO_ARCH_SOFT_FLOAT
2091 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2094 call->signature = sig;
2096 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2098 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2099 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2102 temp->backend.is_pinvoke = sig->pinvoke;
2105 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2106 * address of return value to increase optimization opportunities.
2107 * Before vtype decomposition, the dreg of the call ins itself represents the
2108 * fact the call modifies the return value. After decomposition, the call will
2109 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2110 * will be transformed into an LDADDR.
2112 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2113 loada->dreg = alloc_preg (cfg);
2114 loada->inst_p0 = temp;
2115 /* We reference the call too since call->dreg could change during optimization */
2116 loada->inst_p1 = call;
2117 MONO_ADD_INS (cfg->cbb, loada);
2119 call->inst.dreg = temp->dreg;
2121 call->vret_var = loada;
2122 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2123 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2127 * If the call has a float argument, we would need to do an r8->r4 conversion using
2128 * an icall, but that cannot be done during the call sequence since it would clobber
2129 * the call registers + the stack. So we do it before emitting the call.
2131 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 MonoInst *in = call->args [i];
2135 if (i >= sig->hasthis)
2136 t = sig->params [i - sig->hasthis];
2138 t = &mono_defaults.int_class->byval_arg;
2139 t = mono_type_get_underlying_type (t);
2141 if (!t->byref && t->type == MONO_TYPE_R4) {
2142 MonoInst *iargs [1];
2146 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2148 /* The result will be in an int vreg */
2149 call->args [i] = conv;
2154 mono_arch_emit_call (cfg, call);
2156 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2157 cfg->flags |= MONO_CFG_HAS_CALLS;
2162 inline static MonoInst*
2163 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2165 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2167 call->inst.sreg1 = addr->dreg;
2169 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2171 return (MonoInst*)call;
2174 inline static MonoInst*
2175 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2177 #ifdef MONO_ARCH_RGCTX_REG
2182 rgctx_reg = mono_alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2185 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2187 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2188 cfg->uses_rgctx_reg = TRUE;
2190 return (MonoInst*)call;
2192 g_assert_not_reached ();
2198 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2199 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2201 gboolean virtual = this != NULL;
2202 gboolean enable_for_aot = TRUE;
2205 if (method->string_ctor) {
2206 /* Create the real signature */
2207 /* FIXME: Cache these */
2208 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2209 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2214 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2216 if (this && sig->hasthis &&
2217 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2218 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2219 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2221 call->method = method;
2223 call->inst.flags |= MONO_INST_HAS_METHOD;
2224 call->inst.inst_left = this;
2227 int vtable_reg, slot_reg, this_reg;
2229 this_reg = this->dreg;
2231 if ((!cfg->compile_aot || enable_for_aot) &&
2232 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2233 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2234 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2236 * the method is not virtual, we just need to ensure this is not null
2237 * and then we can call the method directly.
2239 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2240 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2243 if (!method->string_ctor) {
2244 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2245 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2246 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2249 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2251 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2253 return (MonoInst*)call;
2256 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2257 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2258 /* Make a call to delegate->invoke_impl */
2259 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2260 call->inst.inst_basereg = this_reg;
2261 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2262 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2264 return (MonoInst*)call;
2268 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2269 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2270 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2272 * the method is virtual, but we can statically dispatch since either
2273 * it's class or the method itself are sealed.
2274 * But first we need to ensure it's not a null reference.
2276 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2277 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2278 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2280 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2281 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2283 return (MonoInst*)call;
2286 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2288 vtable_reg = alloc_preg (cfg);
2289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2290 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2292 #ifdef MONO_ARCH_HAVE_IMT
2294 guint32 imt_slot = mono_method_get_imt_slot (method);
2295 emit_imt_argument (cfg, call, imt_arg);
2296 slot_reg = vtable_reg;
2297 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2300 if (slot_reg == -1) {
2301 slot_reg = alloc_preg (cfg);
2302 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2303 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2306 slot_reg = vtable_reg;
2307 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2308 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2309 #ifdef MONO_ARCH_HAVE_IMT
2311 g_assert (mono_method_signature (method)->generic_param_count);
2312 emit_imt_argument (cfg, call, imt_arg);
2317 call->inst.sreg1 = slot_reg;
2318 call->virtual = TRUE;
2321 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2323 return (MonoInst*)call;
2327 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2328 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2335 #ifdef MONO_ARCH_RGCTX_REG
2336 rgctx_reg = mono_alloc_preg (cfg);
2337 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2342 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2344 call = (MonoCallInst*)ins;
2346 #ifdef MONO_ARCH_RGCTX_REG
2347 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2348 cfg->uses_rgctx_reg = TRUE;
2357 static inline MonoInst*
2358 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2360 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2364 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2371 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2374 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2376 return (MonoInst*)call;
2379 inline static MonoInst*
2380 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2382 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2386 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2390 * mono_emit_abs_call:
2392 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2394 inline static MonoInst*
2395 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2396 MonoMethodSignature *sig, MonoInst **args)
2398 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2402 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2405 if (cfg->abs_patches == NULL)
2406 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2407 g_hash_table_insert (cfg->abs_patches, ji, ji);
2408 ins = mono_emit_native_call (cfg, ji, sig, args);
2409 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2414 get_memcpy_method (void)
2416 static MonoMethod *memcpy_method = NULL;
2417 if (!memcpy_method) {
2418 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2420 g_error ("Old corlib found. Install a new one");
2422 return memcpy_method;
2426 * Emit code to copy a valuetype of type @klass whose address is stored in
2427 * @src->dreg to memory whose address is stored at @dest->dreg.
2430 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2432 MonoInst *iargs [3];
2435 MonoMethod *memcpy_method;
2439 * This check breaks with spilled vars... need to handle it during verification anyway.
2440 * g_assert (klass && klass == src->klass && klass == dest->klass);
2444 n = mono_class_native_size (klass, &align);
2446 n = mono_class_value_size (klass, &align);
2448 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2449 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2450 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2454 EMIT_NEW_ICONST (cfg, iargs [2], n);
2456 memcpy_method = get_memcpy_method ();
2457 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2462 get_memset_method (void)
2464 static MonoMethod *memset_method = NULL;
2465 if (!memset_method) {
2466 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2468 g_error ("Old corlib found. Install a new one");
2470 return memset_method;
2474 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2476 MonoInst *iargs [3];
2479 MonoMethod *memset_method;
2481 /* FIXME: Optimize this for the case when dest is an LDADDR */
2483 mono_class_init (klass);
2484 n = mono_class_value_size (klass, &align);
2486 if (n <= sizeof (gpointer) * 5) {
2487 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2490 memset_method = get_memset_method ();
2492 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2493 EMIT_NEW_ICONST (cfg, iargs [2], n);
2494 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2499 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2501 MonoInst *this = NULL;
2503 g_assert (cfg->generic_sharing_context);
2505 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2506 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2507 !method->klass->valuetype)
2508 EMIT_NEW_ARGLOAD (cfg, this, 0);
2510 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2511 MonoInst *mrgctx_loc, *mrgctx_var;
2514 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2516 mrgctx_loc = mono_get_vtable_var (cfg);
2517 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2520 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2521 MonoInst *vtable_loc, *vtable_var;
2525 vtable_loc = mono_get_vtable_var (cfg);
2526 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2528 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2529 MonoInst *mrgctx_var = vtable_var;
2532 vtable_reg = alloc_preg (cfg);
2533 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2534 vtable_var->type = STACK_PTR;
2540 int vtable_reg, res_reg;
2542 vtable_reg = alloc_preg (cfg);
2543 res_reg = alloc_preg (cfg);
2544 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2549 static MonoJumpInfoRgctxEntry *
2550 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2552 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2553 res->method = method;
2554 res->in_mrgctx = in_mrgctx;
2555 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2556 res->data->type = patch_type;
2557 res->data->data.target = patch_data;
2558 res->info_type = info_type;
2563 static inline MonoInst*
2564 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2566 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2570 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2571 MonoClass *klass, int rgctx_type)
2573 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2574 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2576 return emit_rgctx_fetch (cfg, rgctx, entry);
2580 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2581 MonoMethod *cmethod, int rgctx_type)
2583 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2584 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2586 return emit_rgctx_fetch (cfg, rgctx, entry);
2590 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2591 MonoClassField *field, int rgctx_type)
2593 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2594 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2596 return emit_rgctx_fetch (cfg, rgctx, entry);
2600 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2602 int vtable_reg = alloc_preg (cfg);
2603 int context_used = 0;
2605 if (cfg->generic_sharing_context)
2606 context_used = mono_class_check_context_used (array_class);
2608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2610 if (cfg->opt & MONO_OPT_SHARED) {
2611 int class_reg = alloc_preg (cfg);
2612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2613 if (cfg->compile_aot) {
2614 int klass_reg = alloc_preg (cfg);
2615 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2616 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2620 } else if (context_used) {
2621 MonoInst *vtable_ins;
2623 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2624 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2626 if (cfg->compile_aot) {
2627 int vt_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2635 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2639 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2641 if (mini_get_debug_options ()->better_cast_details) {
2642 int to_klass_reg = alloc_preg (cfg);
2643 int vtable_reg = alloc_preg (cfg);
2644 int klass_reg = alloc_preg (cfg);
2645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2648 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2652 MONO_ADD_INS (cfg->cbb, tls_get);
2653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2657 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2663 reset_cast_details (MonoCompile *cfg)
2665 /* Reset the variables holding the cast details */
2666 if (mini_get_debug_options ()->better_cast_details) {
2667 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2669 MONO_ADD_INS (cfg->cbb, tls_get);
2670 /* It is enough to reset the from field */
2671 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2676 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2677 * generic code is generated.
2680 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2682 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2685 MonoInst *rgctx, *addr;
2687 /* FIXME: What if the class is shared? We might not
2688 have to get the address of the method from the
2690 addr = emit_get_rgctx_method (cfg, context_used, method,
2691 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2693 rgctx = emit_get_rgctx (cfg, method, context_used);
2695 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2697 return mono_emit_method_call (cfg, method, &val, NULL);
2702 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2706 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2707 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2708 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2709 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2711 obj_reg = sp [0]->dreg;
2712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2715 /* FIXME: generics */
2716 g_assert (klass->rank == 0);
2719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2720 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2726 MonoInst *element_class;
2728 /* This assertion is from the unboxcast insn */
2729 g_assert (klass->rank == 0);
2731 element_class = emit_get_rgctx_klass (cfg, context_used,
2732 klass->element_class, MONO_RGCTX_INFO_KLASS);
2734 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2735 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2737 save_cast_details (cfg, klass->element_class, obj_reg);
2738 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2739 reset_cast_details (cfg);
2742 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2743 MONO_ADD_INS (cfg->cbb, add);
2744 add->type = STACK_MP;
2751 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2753 MonoInst *iargs [2];
2756 if (cfg->opt & MONO_OPT_SHARED) {
2757 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2758 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2760 alloc_ftn = mono_object_new;
2761 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2762 /* This happens often in argument checking code, eg. throw new FooException... */
2763 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2764 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2765 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2767 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2768 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2771 if (managed_alloc) {
2772 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2773 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2775 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2777 guint32 lw = vtable->klass->instance_size;
2778 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2779 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2780 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2783 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2787 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2791 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2794 MonoInst *iargs [2];
2795 MonoMethod *managed_alloc = NULL;
2799 FIXME: we cannot get managed_alloc here because we can't get
2800 the class's vtable (because it's not a closed class)
2802 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2803 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2806 if (cfg->opt & MONO_OPT_SHARED) {
2807 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2808 iargs [1] = data_inst;
2809 alloc_ftn = mono_object_new;
2811 if (managed_alloc) {
2812 iargs [0] = data_inst;
2813 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2816 iargs [0] = data_inst;
2817 alloc_ftn = mono_object_new_specific;
2820 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2824 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2826 MonoInst *alloc, *ins;
2828 if (mono_class_is_nullable (klass)) {
2829 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2830 return mono_emit_method_call (cfg, method, &val, NULL);
2833 alloc = handle_alloc (cfg, klass, TRUE);
2835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2841 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2843 MonoInst *alloc, *ins;
2845 if (mono_class_is_nullable (klass)) {
2846 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2847 /* FIXME: What if the class is shared? We might not
2848 have to get the method address from the RGCTX. */
2849 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2850 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2851 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2853 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2855 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2857 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2864 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2866 MonoBasicBlock *is_null_bb;
2867 int obj_reg = src->dreg;
2868 int vtable_reg = alloc_preg (cfg);
2870 NEW_BBLOCK (cfg, is_null_bb);
2872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2875 save_cast_details (cfg, klass, obj_reg);
2877 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2878 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2879 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2881 int klass_reg = alloc_preg (cfg);
2883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2885 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2886 /* the remoting code is broken, access the class for now */
2888 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2894 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2897 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2901 MONO_START_BB (cfg, is_null_bb);
2903 reset_cast_details (cfg);
2909 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2912 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2913 int obj_reg = src->dreg;
2914 int vtable_reg = alloc_preg (cfg);
2915 int res_reg = alloc_preg (cfg);
2917 NEW_BBLOCK (cfg, is_null_bb);
2918 NEW_BBLOCK (cfg, false_bb);
2919 NEW_BBLOCK (cfg, end_bb);
2921 /* Do the assignment at the beginning, so the other assignment can be if converted */
2922 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2923 ins->type = STACK_OBJ;
2926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2927 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2929 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2931 /* the is_null_bb target simply copies the input register to the output */
2932 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2934 int klass_reg = alloc_preg (cfg);
2936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2939 int rank_reg = alloc_preg (cfg);
2940 int eclass_reg = alloc_preg (cfg);
2942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2946 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2947 if (klass->cast_class == mono_defaults.object_class) {
2948 int parent_reg = alloc_preg (cfg);
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2950 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2951 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2953 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2954 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2955 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2957 } else if (klass->cast_class == mono_defaults.enum_class) {
2958 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2960 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2961 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2963 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2964 /* Check that the object is a vector too */
2965 int bounds_reg = alloc_preg (cfg);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2968 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2971 /* the is_null_bb target simply copies the input register to the output */
2972 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2974 } else if (mono_class_is_nullable (klass)) {
2975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2976 /* the is_null_bb target simply copies the input register to the output */
2977 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2979 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2980 /* the remoting code is broken, access the class for now */
2982 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2985 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2991 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2992 /* the is_null_bb target simply copies the input register to the output */
2993 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2998 MONO_START_BB (cfg, false_bb);
3000 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3003 MONO_START_BB (cfg, is_null_bb);
3005 MONO_START_BB (cfg, end_bb);
3011 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3013 /* This opcode takes as input an object reference and a class, and returns:
3014 0) if the object is an instance of the class,
3015 1) if the object is not instance of the class,
3016 2) if the object is a proxy whose type cannot be determined */
3019 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3020 int obj_reg = src->dreg;
3021 int dreg = alloc_ireg (cfg);
3023 int klass_reg = alloc_preg (cfg);
3025 NEW_BBLOCK (cfg, true_bb);
3026 NEW_BBLOCK (cfg, false_bb);
3027 NEW_BBLOCK (cfg, false2_bb);
3028 NEW_BBLOCK (cfg, end_bb);
3029 NEW_BBLOCK (cfg, no_proxy_bb);
3031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3034 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3035 NEW_BBLOCK (cfg, interface_fail_bb);
3037 tmp_reg = alloc_preg (cfg);
3038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3039 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3040 MONO_START_BB (cfg, interface_fail_bb);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3043 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3045 tmp_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3047 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3048 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3050 tmp_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3054 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3055 tmp_reg = alloc_preg (cfg);
3056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3057 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3059 tmp_reg = alloc_preg (cfg);
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3064 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3067 MONO_START_BB (cfg, no_proxy_bb);
3069 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3072 MONO_START_BB (cfg, false_bb);
3074 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3077 MONO_START_BB (cfg, false2_bb);
3079 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3082 MONO_START_BB (cfg, true_bb);
3084 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3086 MONO_START_BB (cfg, end_bb);
3089 MONO_INST_NEW (cfg, ins, OP_ICONST);
3091 ins->type = STACK_I4;
3097 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3099 /* This opcode takes as input an object reference and a class, and returns:
3100 0) if the object is an instance of the class,
3101 1) if the object is a proxy whose type cannot be determined
3102 an InvalidCastException exception is thrown otherwhise*/
3105 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3106 int obj_reg = src->dreg;
3107 int dreg = alloc_ireg (cfg);
3108 int tmp_reg = alloc_preg (cfg);
3109 int klass_reg = alloc_preg (cfg);
3111 NEW_BBLOCK (cfg, end_bb);
3112 NEW_BBLOCK (cfg, ok_result_bb);
3114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3117 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3118 NEW_BBLOCK (cfg, interface_fail_bb);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3121 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3122 MONO_START_BB (cfg, interface_fail_bb);
3123 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3125 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3127 tmp_reg = alloc_preg (cfg);
3128 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3130 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3132 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3133 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3136 NEW_BBLOCK (cfg, no_proxy_bb);
3138 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3140 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3142 tmp_reg = alloc_preg (cfg);
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3146 tmp_reg = alloc_preg (cfg);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3151 NEW_BBLOCK (cfg, fail_1_bb);
3153 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3155 MONO_START_BB (cfg, fail_1_bb);
3157 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3160 MONO_START_BB (cfg, no_proxy_bb);
3162 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3165 MONO_START_BB (cfg, ok_result_bb);
3167 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3169 MONO_START_BB (cfg, end_bb);
3172 MONO_INST_NEW (cfg, ins, OP_ICONST);
3174 ins->type = STACK_I4;
3179 static G_GNUC_UNUSED MonoInst*
3180 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3182 gpointer *trampoline;
3183 MonoInst *obj, *method_ins, *tramp_ins;
3187 obj = handle_alloc (cfg, klass, FALSE);
3189 /* Inline the contents of mono_delegate_ctor */
3191 /* Set target field */
3192 /* Optimize away setting of NULL target */
3193 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3194 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3196 /* Set method field */
3197 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3201 * To avoid looking up the compiled code belonging to the target method
3202 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3203 * store it, and we fill it after the method has been compiled.
3205 if (!cfg->compile_aot && !method->dynamic) {
3206 MonoInst *code_slot_ins;
3208 domain = mono_domain_get ();
3209 mono_domain_lock (domain);
3210 if (!domain_jit_info (domain)->method_code_hash)
3211 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3212 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3214 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3215 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3217 mono_domain_unlock (domain);
3219 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3223 /* Set invoke_impl field */
3224 if (cfg->compile_aot) {
3225 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3227 trampoline = mono_create_delegate_trampoline (klass);
3228 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3232 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3238 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3240 MonoJitICallInfo *info;
3242 /* Need to register the icall so it gets an icall wrapper */
3243 info = mono_get_array_new_va_icall (rank);
3245 cfg->flags |= MONO_CFG_HAS_VARARGS;
3247 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3248 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3252 mono_emit_load_got_addr (MonoCompile *cfg)
3254 MonoInst *getaddr, *dummy_use;
3256 if (!cfg->got_var || cfg->got_var_allocated)
3259 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3260 getaddr->dreg = cfg->got_var->dreg;
3262 /* Add it to the start of the first bblock */
3263 if (cfg->bb_entry->code) {
3264 getaddr->next = cfg->bb_entry->code;
3265 cfg->bb_entry->code = getaddr;
3268 MONO_ADD_INS (cfg->bb_entry, getaddr);
3270 cfg->got_var_allocated = TRUE;
3273 * Add a dummy use to keep the got_var alive, since real uses might
3274 * only be generated by the back ends.
3275 * Add it to end_bblock, so the variable's lifetime covers the whole
3277 * It would be better to make the usage of the got var explicit in all
3278 * cases when the backend needs it (i.e. calls, throw etc.), so this
3279 * wouldn't be needed.
3281 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3282 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3285 static int inline_limit;
3286 static gboolean inline_limit_inited;
3289 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3291 MonoMethodHeader *header = mono_method_get_header (method);
3293 #ifdef MONO_ARCH_SOFT_FLOAT
3294 MonoMethodSignature *sig = mono_method_signature (method);
3298 if (cfg->generic_sharing_context)
3301 #ifdef MONO_ARCH_HAVE_LMF_OPS
3302 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3303 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3304 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3308 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3309 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3310 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3311 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3312 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3313 (method->klass->marshalbyref) ||
3314 !header || header->num_clauses)
3317 /* also consider num_locals? */
3318 /* Do the size check early to avoid creating vtables */
3319 if (!inline_limit_inited) {
3320 if (getenv ("MONO_INLINELIMIT"))
3321 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3323 inline_limit = INLINE_LENGTH_LIMIT;
3324 inline_limit_inited = TRUE;
3326 if (header->code_size >= inline_limit)
3330 * if we can initialize the class of the method right away, we do,
3331 * otherwise we don't allow inlining if the class needs initialization,
3332 * since it would mean inserting a call to mono_runtime_class_init()
3333 * inside the inlined code
3335 if (!(cfg->opt & MONO_OPT_SHARED)) {
3336 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3337 if (cfg->run_cctors && method->klass->has_cctor) {
3338 if (!method->klass->runtime_info)
3339 /* No vtable created yet */
3341 vtable = mono_class_vtable (cfg->domain, method->klass);
3344 /* This makes so that inline cannot trigger */
3345 /* .cctors: too many apps depend on them */
3346 /* running with a specific order... */
3347 if (! vtable->initialized)
3349 mono_runtime_class_init (vtable);
3351 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3352 if (!method->klass->runtime_info)
3353 /* No vtable created yet */
3355 vtable = mono_class_vtable (cfg->domain, method->klass);
3358 if (!vtable->initialized)
3363 * If we're compiling for shared code
3364 * the cctor will need to be run at aot method load time, for example,
3365 * or at the end of the compilation of the inlining method.
3367 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3372 * CAS - do not inline methods with declarative security
3373 * Note: this has to be before any possible return TRUE;
3375 if (mono_method_has_declsec (method))
3378 #ifdef MONO_ARCH_SOFT_FLOAT
3380 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3382 for (i = 0; i < sig->param_count; ++i)
3383 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3391 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3393 if (vtable->initialized && !cfg->compile_aot)
3396 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3399 if (!mono_class_needs_cctor_run (vtable->klass, method))
3402 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3403 /* The initialization is already done before the method is called */
3410 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3414 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3416 mono_class_init (klass);
3417 size = mono_class_array_element_size (klass);
3419 mult_reg = alloc_preg (cfg);
3420 array_reg = arr->dreg;
3421 index_reg = index->dreg;
3423 #if SIZEOF_VOID_P == 8
3424 /* The array reg is 64 bits but the index reg is only 32 */
3425 index2_reg = alloc_preg (cfg);
3426 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3428 index2_reg = index_reg;
3431 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3433 #if defined(__i386__) || defined(__x86_64__)
3434 if (size == 1 || size == 2 || size == 4 || size == 8) {
3435 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3437 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3438 ins->type = STACK_PTR;
3444 add_reg = alloc_preg (cfg);
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3447 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3448 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3449 ins->type = STACK_PTR;
3450 MONO_ADD_INS (cfg->cbb, ins);
3455 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3457 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3459 int bounds_reg = alloc_preg (cfg);
3460 int add_reg = alloc_preg (cfg);
3461 int mult_reg = alloc_preg (cfg);
3462 int mult2_reg = alloc_preg (cfg);
3463 int low1_reg = alloc_preg (cfg);
3464 int low2_reg = alloc_preg (cfg);
3465 int high1_reg = alloc_preg (cfg);
3466 int high2_reg = alloc_preg (cfg);
3467 int realidx1_reg = alloc_preg (cfg);
3468 int realidx2_reg = alloc_preg (cfg);
3469 int sum_reg = alloc_preg (cfg);
3474 mono_class_init (klass);
3475 size = mono_class_array_element_size (klass);
3477 index1 = index_ins1->dreg;
3478 index2 = index_ins2->dreg;
3480 /* range checking */
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3482 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3485 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3486 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3488 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3489 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3490 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3493 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3494 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3496 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3497 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3498 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3500 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3501 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3503 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3504 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3506 ins->type = STACK_MP;
3508 MONO_ADD_INS (cfg->cbb, ins);
3515 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3519 MonoMethod *addr_method;
3522 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3525 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3527 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3528 /* emit_ldelema_2 depends on OP_LMUL */
3529 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3530 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3534 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3535 addr_method = mono_marshal_get_array_address (rank, element_size);
3536 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3542 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3544 MonoInst *ins = NULL;
3546 static MonoClass *runtime_helpers_class = NULL;
3547 if (! runtime_helpers_class)
3548 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3549 "System.Runtime.CompilerServices", "RuntimeHelpers");
3551 if (cmethod->klass == mono_defaults.string_class) {
3552 if (strcmp (cmethod->name, "get_Chars") == 0) {
3553 int dreg = alloc_ireg (cfg);
3554 int index_reg = alloc_preg (cfg);
3555 int mult_reg = alloc_preg (cfg);
3556 int add_reg = alloc_preg (cfg);
3558 #if SIZEOF_VOID_P == 8
3559 /* The array reg is 64 bits but the index reg is only 32 */
3560 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3562 index_reg = args [1]->dreg;
3564 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3566 #if defined(__i386__) || defined(__x86_64__)
3567 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3568 add_reg = ins->dreg;
3569 /* Avoid a warning */
3571 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3575 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3576 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3577 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3579 type_from_op (ins, NULL, NULL);
3581 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3582 int dreg = alloc_ireg (cfg);
3583 /* Decompose later to allow more optimizations */
3584 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3585 ins->type = STACK_I4;
3586 cfg->cbb->has_array_access = TRUE;
3587 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3590 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3591 int mult_reg = alloc_preg (cfg);
3592 int add_reg = alloc_preg (cfg);
3594 /* The corlib functions check for oob already. */
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3596 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3597 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3600 } else if (cmethod->klass == mono_defaults.object_class) {
3602 if (strcmp (cmethod->name, "GetType") == 0) {
3603 int dreg = alloc_preg (cfg);
3604 int vt_reg = alloc_preg (cfg);
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3606 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3607 type_from_op (ins, NULL, NULL);
3610 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3611 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3612 int dreg = alloc_ireg (cfg);
3613 int t1 = alloc_ireg (cfg);
3615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3616 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3617 ins->type = STACK_I4;
3621 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3622 MONO_INST_NEW (cfg, ins, OP_NOP);
3623 MONO_ADD_INS (cfg->cbb, ins);
3627 } else if (cmethod->klass == mono_defaults.array_class) {
3628 if (cmethod->name [0] != 'g')
3631 if (strcmp (cmethod->name, "get_Rank") == 0) {
3632 int dreg = alloc_ireg (cfg);
3633 int vtable_reg = alloc_preg (cfg);
3634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3635 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3636 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3637 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3638 type_from_op (ins, NULL, NULL);
3641 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3642 int dreg = alloc_ireg (cfg);
3644 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3645 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3646 type_from_op (ins, NULL, NULL);
3651 } else if (cmethod->klass == runtime_helpers_class) {
3653 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3654 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3658 } else if (cmethod->klass == mono_defaults.thread_class) {
3659 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3660 ins->dreg = alloc_preg (cfg);
3661 ins->type = STACK_OBJ;
3662 MONO_ADD_INS (cfg->cbb, ins);
3664 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3665 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3666 MONO_ADD_INS (cfg->cbb, ins);
3668 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3669 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3670 MONO_ADD_INS (cfg->cbb, ins);
3673 } else if (cmethod->klass == mono_defaults.monitor_class) {
3674 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3675 if (strcmp (cmethod->name, "Enter") == 0) {
3678 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3679 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3680 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3681 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3683 return (MonoInst*)call;
3684 } else if (strcmp (cmethod->name, "Exit") == 0) {
3687 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3688 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3689 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3690 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3692 return (MonoInst*)call;
3694 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3695 MonoMethod *fast_method = NULL;
3697 /* Avoid infinite recursion */
3698 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3699 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3700 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3703 if (strcmp (cmethod->name, "Enter") == 0 ||
3704 strcmp (cmethod->name, "Exit") == 0)
3705 fast_method = mono_monitor_get_fast_path (cmethod);
3709 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3711 } else if (mini_class_is_system_array (cmethod->klass) &&
3712 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3713 MonoInst *addr, *store, *load;
3714 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3716 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3717 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3718 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3720 } else if (cmethod->klass->image == mono_defaults.corlib &&
3721 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3722 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3725 #if SIZEOF_VOID_P == 8
3726 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3727 /* 64 bit reads are already atomic */
3728 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3729 ins->dreg = mono_alloc_preg (cfg);
3730 ins->inst_basereg = args [0]->dreg;
3731 ins->inst_offset = 0;
3732 MONO_ADD_INS (cfg->cbb, ins);
3736 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3737 if (strcmp (cmethod->name, "Increment") == 0) {
3738 MonoInst *ins_iconst;
3741 if (fsig->params [0]->type == MONO_TYPE_I4)
3742 opcode = OP_ATOMIC_ADD_NEW_I4;
3743 #if SIZEOF_VOID_P == 8
3744 else if (fsig->params [0]->type == MONO_TYPE_I8)
3745 opcode = OP_ATOMIC_ADD_NEW_I8;
3748 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3749 ins_iconst->inst_c0 = 1;
3750 ins_iconst->dreg = mono_alloc_ireg (cfg);
3751 MONO_ADD_INS (cfg->cbb, ins_iconst);
3753 MONO_INST_NEW (cfg, ins, opcode);
3754 ins->dreg = mono_alloc_ireg (cfg);
3755 ins->inst_basereg = args [0]->dreg;
3756 ins->inst_offset = 0;
3757 ins->sreg2 = ins_iconst->dreg;
3758 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3759 MONO_ADD_INS (cfg->cbb, ins);
3761 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3762 MonoInst *ins_iconst;
3765 if (fsig->params [0]->type == MONO_TYPE_I4)
3766 opcode = OP_ATOMIC_ADD_NEW_I4;
3767 #if SIZEOF_VOID_P == 8
3768 else if (fsig->params [0]->type == MONO_TYPE_I8)
3769 opcode = OP_ATOMIC_ADD_NEW_I8;
3772 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3773 ins_iconst->inst_c0 = -1;
3774 ins_iconst->dreg = mono_alloc_ireg (cfg);
3775 MONO_ADD_INS (cfg->cbb, ins_iconst);
3777 MONO_INST_NEW (cfg, ins, opcode);
3778 ins->dreg = mono_alloc_ireg (cfg);
3779 ins->inst_basereg = args [0]->dreg;
3780 ins->inst_offset = 0;
3781 ins->sreg2 = ins_iconst->dreg;
3782 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3783 MONO_ADD_INS (cfg->cbb, ins);
3785 } else if (strcmp (cmethod->name, "Add") == 0) {
3788 if (fsig->params [0]->type == MONO_TYPE_I4)
3789 opcode = OP_ATOMIC_ADD_NEW_I4;
3790 #if SIZEOF_VOID_P == 8
3791 else if (fsig->params [0]->type == MONO_TYPE_I8)
3792 opcode = OP_ATOMIC_ADD_NEW_I8;
3796 MONO_INST_NEW (cfg, ins, opcode);
3797 ins->dreg = mono_alloc_ireg (cfg);
3798 ins->inst_basereg = args [0]->dreg;
3799 ins->inst_offset = 0;
3800 ins->sreg2 = args [1]->dreg;
3801 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3802 MONO_ADD_INS (cfg->cbb, ins);
3805 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3807 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3808 if (strcmp (cmethod->name, "Exchange") == 0) {
3811 if (fsig->params [0]->type == MONO_TYPE_I4)
3812 opcode = OP_ATOMIC_EXCHANGE_I4;
3813 #if SIZEOF_VOID_P == 8
3814 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3815 (fsig->params [0]->type == MONO_TYPE_I) ||
3816 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3817 opcode = OP_ATOMIC_EXCHANGE_I8;
3819 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3820 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3821 opcode = OP_ATOMIC_EXCHANGE_I4;
3826 MONO_INST_NEW (cfg, ins, opcode);
3827 ins->dreg = mono_alloc_ireg (cfg);
3828 ins->inst_basereg = args [0]->dreg;
3829 ins->inst_offset = 0;
3830 ins->sreg2 = args [1]->dreg;
3831 MONO_ADD_INS (cfg->cbb, ins);
3833 switch (fsig->params [0]->type) {
3835 ins->type = STACK_I4;
3839 ins->type = STACK_I8;
3841 case MONO_TYPE_OBJECT:
3842 ins->type = STACK_OBJ;
3845 g_assert_not_reached ();
3848 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3850 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3852 * Can't implement CompareExchange methods this way since they have
3853 * three arguments. We can implement one of the common cases, where the new
3854 * value is a constant.
3856 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3857 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3858 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3859 && args [2]->opcode == OP_ICONST) {
3860 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3861 ins->dreg = alloc_ireg (cfg);
3862 ins->sreg1 = args [0]->dreg;
3863 ins->sreg2 = args [1]->dreg;
3864 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3865 ins->type = STACK_I4;
3866 MONO_ADD_INS (cfg->cbb, ins);
3868 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3870 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3874 } else if (cmethod->klass->image == mono_defaults.corlib) {
3875 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3876 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3877 MONO_INST_NEW (cfg, ins, OP_BREAK);
3878 MONO_ADD_INS (cfg->cbb, ins);
3881 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3882 && strcmp (cmethod->klass->name, "Environment") == 0) {
3883 #ifdef PLATFORM_WIN32
3884 EMIT_NEW_ICONST (cfg, ins, 1);
3886 EMIT_NEW_ICONST (cfg, ins, 0);
3890 } else if (cmethod->klass == mono_defaults.math_class) {
3892 * There is general branches code for Min/Max, but it does not work for
3894 * http://everything2.com/?node_id=1051618
3898 #ifdef MONO_ARCH_SIMD_INTRINSICS
3899 if (cfg->opt & MONO_OPT_SIMD) {
3900 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3906 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3910 * This entry point could be used later for arbitrary method
3913 inline static MonoInst*
3914 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3915 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3917 if (method->klass == mono_defaults.string_class) {
3918 /* managed string allocation support */
3919 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3920 MonoInst *iargs [2];
3921 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3922 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3925 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3926 iargs [1] = args [0];
3927 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3934 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3936 MonoInst *store, *temp;
3939 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3940 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3943 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3944 * would be different than the MonoInst's used to represent arguments, and
3945 * the ldelema implementation can't deal with that.
3946 * Solution: When ldelema is used on an inline argument, create a var for
3947 * it, emit ldelema on that var, and emit the saving code below in
3948 * inline_method () if needed.
3950 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3951 cfg->args [i] = temp;
3952 /* This uses cfg->args [i] which is set by the preceeding line */
3953 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3954 store->cil_code = sp [0]->cil_code;
3959 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3960 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3962 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3964 check_inline_called_method_name_limit (MonoMethod *called_method)
3967 static char *limit = NULL;
3969 if (limit == NULL) {
3970 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3972 if (limit_string != NULL)
3973 limit = limit_string;
3975 limit = (char *) "";
3978 if (limit [0] != '\0') {
3979 char *called_method_name = mono_method_full_name (called_method, TRUE);
3981 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3982 g_free (called_method_name);
3984 //return (strncmp_result <= 0);
3985 return (strncmp_result == 0);
3992 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3994 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3997 static char *limit = NULL;
3999 if (limit == NULL) {
4000 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4001 if (limit_string != NULL) {
4002 limit = limit_string;
4004 limit = (char *) "";
4008 if (limit [0] != '\0') {
4009 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4011 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4012 g_free (caller_method_name);
4014 //return (strncmp_result <= 0);
4015 return (strncmp_result == 0);
4023 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4024 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4026 MonoInst *ins, *rvar = NULL;
4027 MonoMethodHeader *cheader;
4028 MonoBasicBlock *ebblock, *sbblock;
4030 MonoMethod *prev_inlined_method;
4031 MonoInst **prev_locals, **prev_args;
4032 MonoType **prev_arg_types;
4033 guint prev_real_offset;
4034 GHashTable *prev_cbb_hash;
4035 MonoBasicBlock **prev_cil_offset_to_bb;
4036 MonoBasicBlock *prev_cbb;
4037 unsigned char* prev_cil_start;
4038 guint32 prev_cil_offset_to_bb_len;
4039 MonoMethod *prev_current_method;
4040 MonoGenericContext *prev_generic_context;
4042 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4044 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4045 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4048 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4049 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4053 if (cfg->verbose_level > 2)
4054 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4056 if (!cmethod->inline_info) {
4057 mono_jit_stats.inlineable_methods++;
4058 cmethod->inline_info = 1;
4060 /* allocate space to store the return value */
4061 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4062 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4065 /* allocate local variables */
4066 cheader = mono_method_get_header (cmethod);
4067 prev_locals = cfg->locals;
4068 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4069 for (i = 0; i < cheader->num_locals; ++i)
4070 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4072 /* allocate start and end blocks */
4073 /* This is needed so if the inline is aborted, we can clean up */
4074 NEW_BBLOCK (cfg, sbblock);
4075 sbblock->real_offset = real_offset;
4077 NEW_BBLOCK (cfg, ebblock);
4078 ebblock->block_num = cfg->num_bblocks++;
4079 ebblock->real_offset = real_offset;
4081 prev_args = cfg->args;
4082 prev_arg_types = cfg->arg_types;
4083 prev_inlined_method = cfg->inlined_method;
4084 cfg->inlined_method = cmethod;
4085 cfg->ret_var_set = FALSE;
4086 prev_real_offset = cfg->real_offset;
4087 prev_cbb_hash = cfg->cbb_hash;
4088 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4089 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4090 prev_cil_start = cfg->cil_start;
4091 prev_cbb = cfg->cbb;
4092 prev_current_method = cfg->current_method;
4093 prev_generic_context = cfg->generic_context;
4095 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4097 cfg->inlined_method = prev_inlined_method;
4098 cfg->real_offset = prev_real_offset;
4099 cfg->cbb_hash = prev_cbb_hash;
4100 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4101 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4102 cfg->cil_start = prev_cil_start;
4103 cfg->locals = prev_locals;
4104 cfg->args = prev_args;
4105 cfg->arg_types = prev_arg_types;
4106 cfg->current_method = prev_current_method;
4107 cfg->generic_context = prev_generic_context;
4109 if ((costs >= 0 && costs < 60) || inline_allways) {
4110 if (cfg->verbose_level > 2)
4111 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4113 mono_jit_stats.inlined_methods++;
4115 /* always add some code to avoid block split failures */
4116 MONO_INST_NEW (cfg, ins, OP_NOP);
4117 MONO_ADD_INS (prev_cbb, ins);
4119 prev_cbb->next_bb = sbblock;
4120 link_bblock (cfg, prev_cbb, sbblock);
4123 * Get rid of the begin and end bblocks if possible to aid local
4126 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4128 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4129 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4131 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4132 MonoBasicBlock *prev = ebblock->in_bb [0];
4133 mono_merge_basic_blocks (cfg, prev, ebblock);
4135 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4136 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4137 cfg->cbb = prev_cbb;
4145 * If the inlined method contains only a throw, then the ret var is not
4146 * set, so set it to a dummy value.
4148 if (!cfg->ret_var_set) {
4149 static double r8_0 = 0.0;
4151 switch (rvar->type) {
4153 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4156 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4161 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4164 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4165 ins->type = STACK_R8;
4166 ins->inst_p0 = (void*)&r8_0;
4167 ins->dreg = rvar->dreg;
4168 MONO_ADD_INS (cfg->cbb, ins);
4171 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4174 g_assert_not_reached ();
4178 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4183 if (cfg->verbose_level > 2)
4184 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4185 cfg->exception_type = MONO_EXCEPTION_NONE;
4186 mono_loader_clear_error ();
4188 /* This gets rid of the newly added bblocks */
4189 cfg->cbb = prev_cbb;
4195 * Some of these comments may well be out-of-date.
4196 * Design decisions: we do a single pass over the IL code (and we do bblock
4197 * splitting/merging in the few cases when it's required: a back jump to an IL
4198 * address that was not already seen as bblock starting point).
4199 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4200 * Complex operations are decomposed in simpler ones right away. We need to let the
4201 * arch-specific code peek and poke inside this process somehow (except when the
4202 * optimizations can take advantage of the full semantic info of coarse opcodes).
4203 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4204 * MonoInst->opcode initially is the IL opcode or some simplification of that
4205 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4206 * opcode with value bigger than OP_LAST.
4207 * At this point the IR can be handed over to an interpreter, a dumb code generator
4208 * or to the optimizing code generator that will translate it to SSA form.
4210 * Profiling directed optimizations.
4211 * We may compile by default with few or no optimizations and instrument the code
4212 * or the user may indicate what methods to optimize the most either in a config file
4213 * or through repeated runs where the compiler applies offline the optimizations to
4214 * each method and then decides if it was worth it.
4217 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4218 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4219 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4220 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4221 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4222 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4223 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4224 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4226 /* offset from br.s -> br like opcodes */
4227 #define BIG_BRANCH_OFFSET 13
4230 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4232 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4234 return b == NULL || b == bb;
4238 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4240 unsigned char *ip = start;
4241 unsigned char *target;
4244 MonoBasicBlock *bblock;
4245 const MonoOpcode *opcode;
4248 cli_addr = ip - start;
4249 i = mono_opcode_value ((const guint8 **)&ip, end);
4252 opcode = &mono_opcodes [i];
4253 switch (opcode->argument) {
4254 case MonoInlineNone:
4257 case MonoInlineString:
4258 case MonoInlineType:
4259 case MonoInlineField:
4260 case MonoInlineMethod:
4263 case MonoShortInlineR:
4270 case MonoShortInlineVar:
4271 case MonoShortInlineI:
4274 case MonoShortInlineBrTarget:
4275 target = start + cli_addr + 2 + (signed char)ip [1];
4276 GET_BBLOCK (cfg, bblock, target);
4279 GET_BBLOCK (cfg, bblock, ip);
4281 case MonoInlineBrTarget:
4282 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4283 GET_BBLOCK (cfg, bblock, target);
4286 GET_BBLOCK (cfg, bblock, ip);
4288 case MonoInlineSwitch: {
4289 guint32 n = read32 (ip + 1);
4292 cli_addr += 5 + 4 * n;
4293 target = start + cli_addr;
4294 GET_BBLOCK (cfg, bblock, target);
4296 for (j = 0; j < n; ++j) {
4297 target = start + cli_addr + (gint32)read32 (ip);
4298 GET_BBLOCK (cfg, bblock, target);
4308 g_assert_not_reached ();
4311 if (i == CEE_THROW) {
4312 unsigned char *bb_start = ip - 1;
4314 /* Find the start of the bblock containing the throw */
4316 while ((bb_start >= start) && !bblock) {
4317 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4321 bblock->out_of_line = 1;
4330 static inline MonoMethod *
4331 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4335 if (m->wrapper_type != MONO_WRAPPER_NONE)
4336 return mono_method_get_wrapper_data (m, token);
4338 method = mono_get_method_full (m->klass->image, token, klass, context);
4343 static inline MonoMethod *
4344 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4346 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4348 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4354 static inline MonoClass*
4355 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4359 if (method->wrapper_type != MONO_WRAPPER_NONE)
4360 klass = mono_method_get_wrapper_data (method, token);
4362 klass = mono_class_get_full (method->klass->image, token, context);
4364 mono_class_init (klass);
4369 * Returns TRUE if the JIT should abort inlining because "callee"
4370 * is influenced by security attributes.
4373 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4377 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4381 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4382 if (result == MONO_JIT_SECURITY_OK)
4385 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4386 /* Generate code to throw a SecurityException before the actual call/link */
4387 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4390 NEW_ICONST (cfg, args [0], 4);
4391 NEW_METHODCONST (cfg, args [1], caller);
4392 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4393 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4394 /* don't hide previous results */
4395 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4396 cfg->exception_data = result;
4404 method_access_exception (void)
4406 static MonoMethod *method = NULL;
4409 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4410 method = mono_class_get_method_from_name (secman->securitymanager,
4411 "MethodAccessException", 2);
4418 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4419 MonoBasicBlock *bblock, unsigned char *ip)
4421 MonoMethod *thrower = method_access_exception ();
4424 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4425 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4426 mono_emit_method_call (cfg, thrower, args, NULL);
4430 verification_exception (void)
4432 static MonoMethod *method = NULL;
4435 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4436 method = mono_class_get_method_from_name (secman->securitymanager,
4437 "VerificationException", 0);
4444 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4446 MonoMethod *thrower = verification_exception ();
4448 mono_emit_method_call (cfg, thrower, NULL, NULL);
4452 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4453 MonoBasicBlock *bblock, unsigned char *ip)
4455 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4456 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4457 gboolean is_safe = TRUE;
4459 if (!(caller_level >= callee_level ||
4460 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4461 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4466 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4470 method_is_safe (MonoMethod *method)
4473 if (strcmp (method->name, "unsafeMethod") == 0)
4480 * Check that the IL instructions at ip are the array initialization
4481 * sequence and return the pointer to the data and the size.
4484 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4487 * newarr[System.Int32]
4489 * ldtoken field valuetype ...
4490 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4492 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4493 guint32 token = read32 (ip + 7);
4494 guint32 field_token = read32 (ip + 2);
4495 guint32 field_index = field_token & 0xffffff;
4497 const char *data_ptr;
4499 MonoMethod *cmethod;
4500 MonoClass *dummy_class;
4501 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4507 *out_field_token = field_token;
4509 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4512 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4514 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4515 case MONO_TYPE_BOOLEAN:
4519 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4520 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4521 case MONO_TYPE_CHAR:
4531 return NULL; /* stupid ARM FP swapped format */
4541 if (size > mono_type_size (field->type, &dummy_align))
4544 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4545 if (!method->klass->image->dynamic) {
4546 field_index = read32 (ip + 2) & 0xffffff;
4547 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4548 data_ptr = mono_image_rva_map (method->klass->image, rva);
4549 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4550 /* for aot code we do the lookup on load */
4551 if (aot && data_ptr)
4552 return GUINT_TO_POINTER (rva);
4554 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4556 data_ptr = mono_field_get_data (field);
4564 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4566 char *method_fname = mono_method_full_name (method, TRUE);
4569 if (mono_method_get_header (method)->code_size == 0)
4570 method_code = g_strdup ("method body is empty.");
4572 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4573 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4574 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4575 g_free (method_fname);
4576 g_free (method_code);
4580 set_exception_object (MonoCompile *cfg, MonoException *exception)
4582 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4583 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4584 cfg->exception_ptr = exception;
4588 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4592 if (cfg->generic_sharing_context)
4593 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4595 type = &klass->byval_arg;
4596 return MONO_TYPE_IS_REFERENCE (type);
4600 * mono_decompose_array_access_opts:
4602 * Decompose array access opcodes.
4603 * This should be in decompose.c, but it emits calls so it has to stay here until
4604 * the old JIT is gone.
4607 mono_decompose_array_access_opts (MonoCompile *cfg)
4609 MonoBasicBlock *bb, *first_bb;
4612 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4613 * can be executed anytime. It should be run before decompose_long
4617 * Create a dummy bblock and emit code into it so we can use the normal
4618 * code generation macros.
4620 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4621 first_bb = cfg->cbb;
4623 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4625 MonoInst *prev = NULL;
4627 MonoInst *iargs [3];
4630 if (!bb->has_array_access)
4633 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4635 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4641 for (ins = bb->code; ins; ins = ins->next) {
4642 switch (ins->opcode) {
4644 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4645 G_STRUCT_OFFSET (MonoArray, max_length));
4646 MONO_ADD_INS (cfg->cbb, dest);
4648 case OP_BOUNDS_CHECK:
4649 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4652 if (cfg->opt & MONO_OPT_SHARED) {
4653 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4654 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4655 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4656 iargs [2]->dreg = ins->sreg1;
4658 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4659 dest->dreg = ins->dreg;
4661 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4664 NEW_VTABLECONST (cfg, iargs [0], vtable);
4665 MONO_ADD_INS (cfg->cbb, iargs [0]);
4666 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4667 iargs [1]->dreg = ins->sreg1;
4669 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4670 dest->dreg = ins->dreg;
4674 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4675 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4676 MONO_ADD_INS (cfg->cbb, dest);
4682 g_assert (cfg->cbb == first_bb);
4684 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4685 /* Replace the original instruction with the new code sequence */
4687 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4688 first_bb->code = first_bb->last_ins = NULL;
4689 first_bb->in_count = first_bb->out_count = 0;
4690 cfg->cbb = first_bb;
4697 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4707 #ifdef MONO_ARCH_SOFT_FLOAT
4710 * mono_decompose_soft_float:
4712 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4713 * similar to long support on 32 bit platforms. 32 bit float values require special
4714 * handling when used as locals, arguments, and in calls.
4715 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4718 mono_decompose_soft_float (MonoCompile *cfg)
4720 MonoBasicBlock *bb, *first_bb;
4723 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4727 * Create a dummy bblock and emit code into it so we can use the normal
4728 * code generation macros.
4730 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4731 first_bb = cfg->cbb;
4733 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4735 MonoInst *prev = NULL;
4738 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4740 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4746 for (ins = bb->code; ins; ins = ins->next) {
4747 const char *spec = INS_INFO (ins->opcode);
4749 /* Most fp operations are handled automatically by opcode emulation */
4751 switch (ins->opcode) {
4754 d.vald = *(double*)ins->inst_p0;
4755 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4760 /* We load the r8 value */
4761 d.vald = *(float*)ins->inst_p0;
4762 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4766 ins->opcode = OP_LMOVE;
4769 ins->opcode = OP_MOVE;
4770 ins->sreg1 = ins->sreg1 + 1;
4773 ins->opcode = OP_MOVE;
4774 ins->sreg1 = ins->sreg1 + 2;
4777 int reg = ins->sreg1;
4779 ins->opcode = OP_SETLRET;
4781 ins->sreg1 = reg + 1;
4782 ins->sreg2 = reg + 2;
4785 case OP_LOADR8_MEMBASE:
4786 ins->opcode = OP_LOADI8_MEMBASE;
4788 case OP_STORER8_MEMBASE_REG:
4789 ins->opcode = OP_STOREI8_MEMBASE_REG;
4791 case OP_STORER4_MEMBASE_REG: {
4792 MonoInst *iargs [2];
4795 /* Arg 1 is the double value */
4796 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4797 iargs [0]->dreg = ins->sreg1;
4799 /* Arg 2 is the address to store to */
4800 addr_reg = mono_alloc_preg (cfg);
4801 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4802 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4806 case OP_LOADR4_MEMBASE: {
4807 MonoInst *iargs [1];
4811 addr_reg = mono_alloc_preg (cfg);
4812 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4813 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4814 conv->dreg = ins->dreg;
4819 case OP_FCALL_MEMBASE: {
4820 MonoCallInst *call = (MonoCallInst*)ins;
4821 if (call->signature->ret->type == MONO_TYPE_R4) {
4822 MonoCallInst *call2;
4823 MonoInst *iargs [1];
4826 /* Convert the call into a call returning an int */
4827 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4828 memcpy (call2, call, sizeof (MonoCallInst));
4829 switch (ins->opcode) {
4831 call2->inst.opcode = OP_CALL;
4834 call2->inst.opcode = OP_CALL_REG;
4836 case OP_FCALL_MEMBASE:
4837 call2->inst.opcode = OP_CALL_MEMBASE;
4840 g_assert_not_reached ();
4842 call2->inst.dreg = mono_alloc_ireg (cfg);
4843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4845 /* FIXME: Optimize this */
4847 /* Emit an r4->r8 conversion */
4848 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4849 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4850 conv->dreg = ins->dreg;
4852 switch (ins->opcode) {
4854 ins->opcode = OP_LCALL;
4857 ins->opcode = OP_LCALL_REG;
4859 case OP_FCALL_MEMBASE:
4860 ins->opcode = OP_LCALL_MEMBASE;
4863 g_assert_not_reached ();
4869 MonoJitICallInfo *info;
4870 MonoInst *iargs [2];
4871 MonoInst *call, *cmp, *br;
4873 /* Convert fcompare+fbcc to icall+icompare+beq */
4875 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4878 /* Create dummy MonoInst's for the arguments */
4879 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4880 iargs [0]->dreg = ins->sreg1;
4881 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4882 iargs [1]->dreg = ins->sreg2;
4884 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4886 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4887 cmp->sreg1 = call->dreg;
4889 MONO_ADD_INS (cfg->cbb, cmp);
4891 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4892 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4893 br->inst_true_bb = ins->next->inst_true_bb;
4894 br->inst_false_bb = ins->next->inst_false_bb;
4895 MONO_ADD_INS (cfg->cbb, br);
4897 /* The call sequence might include fp ins */
4900 /* Skip fbcc or fccc */
4901 NULLIFY_INS (ins->next);
4909 MonoJitICallInfo *info;
4910 MonoInst *iargs [2];
4913 /* Convert fccc to icall+icompare+iceq */
4915 info = mono_find_jit_opcode_emulation (ins->opcode);
4918 /* Create dummy MonoInst's for the arguments */
4919 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4920 iargs [0]->dreg = ins->sreg1;
4921 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4922 iargs [1]->dreg = ins->sreg2;
4924 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4927 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4929 /* The call sequence might include fp ins */
4934 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4935 mono_print_ins (ins);
4936 g_assert_not_reached ();
4941 g_assert (cfg->cbb == first_bb);
4943 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4944 /* Replace the original instruction with the new code sequence */
4946 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4947 first_bb->code = first_bb->last_ins = NULL;
4948 first_bb->in_count = first_bb->out_count = 0;
4949 cfg->cbb = first_bb;
4956 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4959 mono_decompose_long_opts (cfg);
4965 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4968 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4969 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4970 /* Optimize reg-reg moves away */
4972 * Can't optimize other opcodes, since sp[0] might point to
4973 * the last ins of a decomposed opcode.
4975 sp [0]->dreg = (cfg)->locals [n]->dreg;
4977 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4982 * ldloca inhibits many optimizations so try to get rid of it in common
4985 static inline unsigned char *
4986 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4995 local = read16 (ip + 2);
4999 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5000 gboolean skip = FALSE;
5002 /* From the INITOBJ case */
5003 token = read32 (ip + 2);
5004 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5005 CHECK_TYPELOAD (klass);
5006 if (generic_class_is_reference_type (cfg, klass)) {
5007 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5008 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5009 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5010 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5011 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5024 * mono_method_to_ir:
5026 * Translate the .net IL into linear IR.
5029 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5030 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5031 guint inline_offset, gboolean is_virtual_call)
5033 MonoInst *ins, **sp, **stack_start;
5034 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5035 MonoMethod *cmethod, *method_definition;
5036 MonoInst **arg_array;
5037 MonoMethodHeader *header;
5039 guint32 token, ins_flag;
5041 MonoClass *constrained_call = NULL;
5042 unsigned char *ip, *end, *target, *err_pos;
5043 static double r8_0 = 0.0;
5044 MonoMethodSignature *sig;
5045 MonoGenericContext *generic_context = NULL;
5046 MonoGenericContainer *generic_container = NULL;
5047 MonoType **param_types;
5048 int i, n, start_new_bblock, dreg;
5049 int num_calls = 0, inline_costs = 0;
5050 int breakpoint_id = 0;
5052 MonoBoolean security, pinvoke;
5053 MonoSecurityManager* secman = NULL;
5054 MonoDeclSecurityActions actions;
5055 GSList *class_inits = NULL;
5056 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5059 /* serialization and xdomain stuff may need access to private fields and methods */
5060 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5061 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5062 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5063 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5064 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5065 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5067 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5069 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5070 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5071 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5072 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5074 image = method->klass->image;
5075 header = mono_method_get_header (method);
5076 generic_container = mono_method_get_generic_container (method);
5077 sig = mono_method_signature (method);
5078 num_args = sig->hasthis + sig->param_count;
5079 ip = (unsigned char*)header->code;
5080 cfg->cil_start = ip;
5081 end = ip + header->code_size;
5082 mono_jit_stats.cil_code_size += header->code_size;
5084 method_definition = method;
5085 while (method_definition->is_inflated) {
5086 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5087 method_definition = imethod->declaring;
5090 /* SkipVerification is not allowed if core-clr is enabled */
5091 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5093 dont_verify_stloc = TRUE;
5096 if (!dont_verify && mini_method_verify (cfg, method_definition))
5097 goto exception_exit;
5099 if (mono_debug_using_mono_debugger ())
5100 cfg->keep_cil_nops = TRUE;
5102 if (sig->is_inflated)
5103 generic_context = mono_method_get_context (method);
5104 else if (generic_container)
5105 generic_context = &generic_container->context;
5106 cfg->generic_context = generic_context;
5108 if (!cfg->generic_sharing_context)
5109 g_assert (!sig->has_type_parameters);
5111 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5112 g_assert (method->is_inflated);
5113 g_assert (mono_method_get_context (method)->method_inst);
5115 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5116 g_assert (sig->generic_param_count);
5118 if (cfg->method == method) {
5119 cfg->real_offset = 0;
5121 cfg->real_offset = inline_offset;
5124 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5125 cfg->cil_offset_to_bb_len = header->code_size;
5127 cfg->current_method = method;
5129 if (cfg->verbose_level > 2)
5130 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5132 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5134 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5135 for (n = 0; n < sig->param_count; ++n)
5136 param_types [n + sig->hasthis] = sig->params [n];
5137 cfg->arg_types = param_types;
5139 dont_inline = g_list_prepend (dont_inline, method);
5140 if (cfg->method == method) {
5142 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5143 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5146 NEW_BBLOCK (cfg, start_bblock);
5147 cfg->bb_entry = start_bblock;
5148 start_bblock->cil_code = NULL;
5149 start_bblock->cil_length = 0;
5152 NEW_BBLOCK (cfg, end_bblock);
5153 cfg->bb_exit = end_bblock;
5154 end_bblock->cil_code = NULL;
5155 end_bblock->cil_length = 0;
5156 g_assert (cfg->num_bblocks == 2);
5158 arg_array = cfg->args;
5160 if (header->num_clauses) {
5161 cfg->spvars = g_hash_table_new (NULL, NULL);
5162 cfg->exvars = g_hash_table_new (NULL, NULL);
5164 /* handle exception clauses */
5165 for (i = 0; i < header->num_clauses; ++i) {
5166 MonoBasicBlock *try_bb;
5167 MonoExceptionClause *clause = &header->clauses [i];
5168 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5169 try_bb->real_offset = clause->try_offset;
5170 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5171 tblock->real_offset = clause->handler_offset;
5172 tblock->flags |= BB_EXCEPTION_HANDLER;
5174 link_bblock (cfg, try_bb, tblock);
5176 if (*(ip + clause->handler_offset) == CEE_POP)
5177 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5179 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5180 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5181 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5182 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5183 MONO_ADD_INS (tblock, ins);
5185 /* todo: is a fault block unsafe to optimize? */
5186 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5187 tblock->flags |= BB_EXCEPTION_UNSAFE;
5191 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5193 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5195 /* catch and filter blocks get the exception object on the stack */
5196 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5197 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5198 MonoInst *dummy_use;
5200 /* mostly like handle_stack_args (), but just sets the input args */
5201 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5202 tblock->in_scount = 1;
5203 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5204 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5207 * Add a dummy use for the exvar so its liveness info will be
5211 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5213 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5214 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5215 tblock->flags |= BB_EXCEPTION_HANDLER;
5216 tblock->real_offset = clause->data.filter_offset;
5217 tblock->in_scount = 1;
5218 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5219 /* The filter block shares the exvar with the handler block */
5220 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5221 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5222 MONO_ADD_INS (tblock, ins);
5226 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5227 clause->data.catch_class &&
5228 cfg->generic_sharing_context &&
5229 mono_class_check_context_used (clause->data.catch_class)) {
5230 if (mono_method_get_context (method)->method_inst)
5231 GENERIC_SHARING_FAILURE (CEE_NOP);
5234 * In shared generic code with catch
5235 * clauses containing type variables
5236 * the exception handling code has to
5237 * be able to get to the rgctx.
5238 * Therefore we have to make sure that
5239 * the vtable/mrgctx argument (for
5240 * static or generic methods) or the
5241 * "this" argument (for non-static
5242 * methods) are live.
5244 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5245 mini_method_get_context (method)->method_inst ||
5246 method->klass->valuetype) {
5247 mono_get_vtable_var (cfg);
5249 MonoInst *dummy_use;
5251 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5256 arg_array = alloca (sizeof (MonoInst *) * num_args);
5257 cfg->cbb = start_bblock;
5258 cfg->args = arg_array;
5259 mono_save_args (cfg, sig, inline_args);
5262 /* FIRST CODE BLOCK */
5263 NEW_BBLOCK (cfg, bblock);
5264 bblock->cil_code = ip;
5268 ADD_BBLOCK (cfg, bblock);
5270 if (cfg->method == method) {
5271 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5272 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5273 MONO_INST_NEW (cfg, ins, OP_BREAK);
5274 MONO_ADD_INS (bblock, ins);
5278 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5279 secman = mono_security_manager_get_methods ();
5281 security = (secman && mono_method_has_declsec (method));
5282 /* at this point having security doesn't mean we have any code to generate */
5283 if (security && (cfg->method == method)) {
5284 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5285 * And we do not want to enter the next section (with allocation) if we
5286 * have nothing to generate */
5287 security = mono_declsec_get_demands (method, &actions);
5290 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5291 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5293 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5294 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5295 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5297 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5298 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5302 mono_custom_attrs_free (custom);
5305 custom = mono_custom_attrs_from_class (wrapped->klass);
5306 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5310 mono_custom_attrs_free (custom);
5313 /* not a P/Invoke after all */
5318 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5319 /* we use a separate basic block for the initialization code */
5320 NEW_BBLOCK (cfg, init_localsbb);
5321 cfg->bb_init = init_localsbb;
5322 init_localsbb->real_offset = cfg->real_offset;
5323 start_bblock->next_bb = init_localsbb;
5324 init_localsbb->next_bb = bblock;
5325 link_bblock (cfg, start_bblock, init_localsbb);
5326 link_bblock (cfg, init_localsbb, bblock);
5328 cfg->cbb = init_localsbb;
5330 start_bblock->next_bb = bblock;
5331 link_bblock (cfg, start_bblock, bblock);
5334 /* at this point we know, if security is TRUE, that some code needs to be generated */
5335 if (security && (cfg->method == method)) {
5338 mono_jit_stats.cas_demand_generation++;
5340 if (actions.demand.blob) {
5341 /* Add code for SecurityAction.Demand */
5342 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5343 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5344 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5345 mono_emit_method_call (cfg, secman->demand, args, NULL);
5347 if (actions.noncasdemand.blob) {
5348 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5349 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5350 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5351 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5352 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5353 mono_emit_method_call (cfg, secman->demand, args, NULL);
5355 if (actions.demandchoice.blob) {
5356 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5357 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5358 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5359 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5360 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5364 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5366 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5369 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5370 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5371 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5372 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5373 if (!(method->klass && method->klass->image &&
5374 mono_security_core_clr_is_platform_image (method->klass->image))) {
5375 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5379 if (!method_is_safe (method))
5380 emit_throw_verification_exception (cfg, bblock, ip);
5383 if (header->code_size == 0)
5386 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5391 if (cfg->method == method)
5392 mono_debug_init_method (cfg, bblock, breakpoint_id);
5394 for (n = 0; n < header->num_locals; ++n) {
5395 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5400 /* add a check for this != NULL to inlined methods */
5401 if (is_virtual_call) {
5404 NEW_ARGLOAD (cfg, arg_ins, 0);
5405 MONO_ADD_INS (cfg->cbb, arg_ins);
5406 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5407 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5408 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5411 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5412 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5415 start_new_bblock = 0;
5419 if (cfg->method == method)
5420 cfg->real_offset = ip - header->code;
5422 cfg->real_offset = inline_offset;
5427 if (start_new_bblock) {
5428 bblock->cil_length = ip - bblock->cil_code;
5429 if (start_new_bblock == 2) {
5430 g_assert (ip == tblock->cil_code);
5432 GET_BBLOCK (cfg, tblock, ip);
5434 bblock->next_bb = tblock;
5437 start_new_bblock = 0;
5438 for (i = 0; i < bblock->in_scount; ++i) {
5439 if (cfg->verbose_level > 3)
5440 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5441 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5445 g_slist_free (class_inits);
5448 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5449 link_bblock (cfg, bblock, tblock);
5450 if (sp != stack_start) {
5451 handle_stack_args (cfg, stack_start, sp - stack_start);
5453 CHECK_UNVERIFIABLE (cfg);
5455 bblock->next_bb = tblock;
5458 for (i = 0; i < bblock->in_scount; ++i) {
5459 if (cfg->verbose_level > 3)
5460 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5461 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5464 g_slist_free (class_inits);
5469 bblock->real_offset = cfg->real_offset;
5471 if ((cfg->method == method) && cfg->coverage_info) {
5472 guint32 cil_offset = ip - header->code;
5473 cfg->coverage_info->data [cil_offset].cil_code = ip;
5475 /* TODO: Use an increment here */
5476 #if defined(__i386__)
5477 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5478 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5480 MONO_ADD_INS (cfg->cbb, ins);
5482 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5483 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5487 if (cfg->verbose_level > 3)
5488 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5492 if (cfg->keep_cil_nops)
5493 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5495 MONO_INST_NEW (cfg, ins, OP_NOP);
5497 MONO_ADD_INS (bblock, ins);
5500 MONO_INST_NEW (cfg, ins, OP_BREAK);
5502 MONO_ADD_INS (bblock, ins);
5508 CHECK_STACK_OVF (1);
5509 n = (*ip)-CEE_LDARG_0;
5511 EMIT_NEW_ARGLOAD (cfg, ins, n);
5519 CHECK_STACK_OVF (1);
5520 n = (*ip)-CEE_LDLOC_0;
5522 EMIT_NEW_LOCLOAD (cfg, ins, n);
5531 n = (*ip)-CEE_STLOC_0;
5534 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5536 emit_stloc_ir (cfg, sp, header, n);
5543 CHECK_STACK_OVF (1);
5546 EMIT_NEW_ARGLOAD (cfg, ins, n);
5552 CHECK_STACK_OVF (1);
5555 NEW_ARGLOADA (cfg, ins, n);
5556 MONO_ADD_INS (cfg->cbb, ins);
5566 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5568 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5573 CHECK_STACK_OVF (1);
5576 EMIT_NEW_LOCLOAD (cfg, ins, n);
5580 case CEE_LDLOCA_S: {
5581 unsigned char *tmp_ip;
5583 CHECK_STACK_OVF (1);
5584 CHECK_LOCAL (ip [1]);
5586 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5592 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5601 CHECK_LOCAL (ip [1]);
5602 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5604 emit_stloc_ir (cfg, sp, header, ip [1]);
5609 CHECK_STACK_OVF (1);
5610 EMIT_NEW_PCONST (cfg, ins, NULL);
5611 ins->type = STACK_OBJ;
5616 CHECK_STACK_OVF (1);
5617 EMIT_NEW_ICONST (cfg, ins, -1);
5630 CHECK_STACK_OVF (1);
5631 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5637 CHECK_STACK_OVF (1);
5639 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5645 CHECK_STACK_OVF (1);
5646 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5652 CHECK_STACK_OVF (1);
5653 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5654 ins->type = STACK_I8;
5655 ins->dreg = alloc_dreg (cfg, STACK_I8);
5657 ins->inst_l = (gint64)read64 (ip);
5658 MONO_ADD_INS (bblock, ins);
5664 /* FIXME: we should really allocate this only late in the compilation process */
5665 mono_domain_lock (cfg->domain);
5666 f = mono_domain_alloc (cfg->domain, sizeof (float));
5667 mono_domain_unlock (cfg->domain);
5669 CHECK_STACK_OVF (1);
5670 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5671 ins->type = STACK_R8;
5672 ins->dreg = alloc_dreg (cfg, STACK_R8);
5676 MONO_ADD_INS (bblock, ins);
5684 /* FIXME: we should really allocate this only late in the compilation process */
5685 mono_domain_lock (cfg->domain);
5686 d = mono_domain_alloc (cfg->domain, sizeof (double));
5687 mono_domain_unlock (cfg->domain);
5689 CHECK_STACK_OVF (1);
5690 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5691 ins->type = STACK_R8;
5692 ins->dreg = alloc_dreg (cfg, STACK_R8);
5696 MONO_ADD_INS (bblock, ins);
5703 MonoInst *temp, *store;
5705 CHECK_STACK_OVF (1);
5709 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5710 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5712 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5715 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5728 if (sp [0]->type == STACK_R8)
5729 /* we need to pop the value from the x86 FP stack */
5730 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5737 if (stack_start != sp)
5739 token = read32 (ip + 1);
5740 /* FIXME: check the signature matches */
5741 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5746 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5747 GENERIC_SHARING_FAILURE (CEE_JMP);
5749 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5750 if (check_linkdemand (cfg, method, cmethod))
5752 CHECK_CFG_EXCEPTION;
5757 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5760 /* Handle tail calls similarly to calls */
5761 n = fsig->param_count + fsig->hasthis;
5763 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5764 call->method = cmethod;
5765 call->tail_call = TRUE;
5766 call->signature = mono_method_signature (cmethod);
5767 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5768 call->inst.inst_p0 = cmethod;
5769 for (i = 0; i < n; ++i)
5770 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5772 mono_arch_emit_call (cfg, call);
5773 MONO_ADD_INS (bblock, (MonoInst*)call);
5776 for (i = 0; i < num_args; ++i)
5777 /* Prevent arguments from being optimized away */
5778 arg_array [i]->flags |= MONO_INST_VOLATILE;
5780 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5781 ins = (MonoInst*)call;
5782 ins->inst_p0 = cmethod;
5783 MONO_ADD_INS (bblock, ins);
5787 start_new_bblock = 1;
5792 case CEE_CALLVIRT: {
5793 MonoInst *addr = NULL;
5794 MonoMethodSignature *fsig = NULL;
5796 int virtual = *ip == CEE_CALLVIRT;
5797 int calli = *ip == CEE_CALLI;
5798 gboolean pass_imt_from_rgctx = FALSE;
5799 MonoInst *imt_arg = NULL;
5800 gboolean pass_vtable = FALSE;
5801 gboolean pass_mrgctx = FALSE;
5802 MonoInst *vtable_arg = NULL;
5803 gboolean check_this = FALSE;
5806 token = read32 (ip + 1);
5813 if (method->wrapper_type != MONO_WRAPPER_NONE)
5814 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5816 fsig = mono_metadata_parse_signature (image, token);
5818 n = fsig->param_count + fsig->hasthis;
5820 MonoMethod *cil_method;
5822 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5823 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5824 cil_method = cmethod;
5825 } else if (constrained_call) {
5826 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5828 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5829 cil_method = cmethod;
5834 if (!dont_verify && !cfg->skip_visibility) {
5835 MonoMethod *target_method = cil_method;
5836 if (method->is_inflated) {
5837 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5839 if (!mono_method_can_access_method (method_definition, target_method) &&
5840 !mono_method_can_access_method (method, cil_method))
5841 METHOD_ACCESS_FAILURE;
5844 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5845 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5847 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5848 /* MS.NET seems to silently convert this to a callvirt */
5851 if (!cmethod->klass->inited)
5852 if (!mono_class_init (cmethod->klass))
5855 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5856 mini_class_is_system_array (cmethod->klass)) {
5857 array_rank = cmethod->klass->rank;
5858 fsig = mono_method_signature (cmethod);
5860 if (mono_method_signature (cmethod)->pinvoke) {
5861 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5862 check_for_pending_exc, FALSE);
5863 fsig = mono_method_signature (wrapper);
5864 } else if (constrained_call) {
5865 fsig = mono_method_signature (cmethod);
5867 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5871 mono_save_token_info (cfg, image, token, cil_method);
5873 n = fsig->param_count + fsig->hasthis;
5875 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5876 if (check_linkdemand (cfg, method, cmethod))
5878 CHECK_CFG_EXCEPTION;
5881 if (cmethod->string_ctor)
5882 g_assert_not_reached ();
5885 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5888 if (!cfg->generic_sharing_context && cmethod)
5889 g_assert (!mono_method_check_context_used (cmethod));
5893 //g_assert (!virtual || fsig->hasthis);
5897 if (constrained_call) {
5899 * We have the `constrained.' prefix opcode.
5901 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5905 * The type parameter is instantiated as a valuetype,
5906 * but that type doesn't override the method we're
5907 * calling, so we need to box `this'.
5909 dreg = alloc_dreg (cfg, STACK_VTYPE);
5910 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5911 ins->klass = constrained_call;
5912 sp [0] = handle_box (cfg, ins, constrained_call);
5913 } else if (!constrained_call->valuetype) {
5914 int dreg = alloc_preg (cfg);
5917 * The type parameter is instantiated as a reference
5918 * type. We have a managed pointer on the stack, so
5919 * we need to dereference it here.
5921 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5922 ins->type = STACK_OBJ;
5924 } else if (cmethod->klass->valuetype)
5926 constrained_call = NULL;
5929 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5933 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5934 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5935 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5936 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5937 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5940 * Pass vtable iff target method might
5941 * be shared, which means that sharing
5942 * is enabled for its class and its
5943 * context is sharable (and it's not a
5946 if (sharing_enabled && context_sharable &&
5947 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5951 if (cmethod && mini_method_get_context (cmethod) &&
5952 mini_method_get_context (cmethod)->method_inst) {
5953 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5954 MonoGenericContext *context = mini_method_get_context (cmethod);
5955 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5957 g_assert (!pass_vtable);
5959 if (sharing_enabled && context_sharable)
5963 if (cfg->generic_sharing_context && cmethod) {
5964 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5966 context_used = mono_method_check_context_used (cmethod);
5968 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5969 /* Generic method interface
5970 calls are resolved via a
5971 helper function and don't
5973 if (!cmethod_context || !cmethod_context->method_inst)
5974 pass_imt_from_rgctx = TRUE;
5978 * If a shared method calls another
5979 * shared method then the caller must
5980 * have a generic sharing context
5981 * because the magic trampoline
5982 * requires it. FIXME: We shouldn't
5983 * have to force the vtable/mrgctx
5984 * variable here. Instead there
5985 * should be a flag in the cfg to
5986 * request a generic sharing context.
5989 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5990 mono_get_vtable_var (cfg);
5995 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5997 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5999 CHECK_TYPELOAD (cmethod->klass);
6000 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6005 g_assert (!vtable_arg);
6008 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6010 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6013 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6014 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
6021 if (pass_imt_from_rgctx) {
6022 g_assert (!pass_vtable);
6025 imt_arg = emit_get_rgctx_method (cfg, context_used,
6026 cmethod, MONO_RGCTX_INFO_METHOD);
6032 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6033 check->sreg1 = sp [0]->dreg;
6034 MONO_ADD_INS (cfg->cbb, check);
6037 /* Calling virtual generic methods */
6038 if (cmethod && virtual &&
6039 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6040 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
6041 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6042 mono_method_signature (cmethod)->generic_param_count) {
6043 MonoInst *this_temp, *this_arg_temp, *store;
6044 MonoInst *iargs [4];
6046 g_assert (mono_method_signature (cmethod)->is_inflated);
6048 /* Prevent inlining of methods that contain indirect calls */
6051 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6052 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6053 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6054 g_assert (!imt_arg);
6056 imt_arg = emit_get_rgctx_method (cfg, context_used,
6057 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6061 cfg->disable_aot = TRUE;
6062 g_assert (cmethod->is_inflated);
6063 EMIT_NEW_PCONST (cfg, imt_arg,
6064 ((MonoMethodInflated*)cmethod)->context.method_inst);
6066 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6070 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6071 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6072 MONO_ADD_INS (bblock, store);
6074 /* FIXME: This should be a managed pointer */
6075 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6077 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6079 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6080 cmethod, MONO_RGCTX_INFO_METHOD);
6081 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6082 addr = mono_emit_jit_icall (cfg,
6083 mono_helper_compile_generic_method, iargs);
6085 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6086 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6087 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6090 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6092 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6095 if (!MONO_TYPE_IS_VOID (fsig->ret))
6104 /* FIXME: runtime generic context pointer for jumps? */
6105 /* FIXME: handle this for generic sharing eventually */
6106 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6107 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6110 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6113 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6114 call->tail_call = TRUE;
6115 call->method = cmethod;
6116 call->signature = mono_method_signature (cmethod);
6119 /* Handle tail calls similarly to calls */
6120 call->inst.opcode = OP_TAILCALL;
6122 mono_arch_emit_call (cfg, call);
6125 * We implement tail calls by storing the actual arguments into the
6126 * argument variables, then emitting a CEE_JMP.
6128 for (i = 0; i < n; ++i) {
6129 /* Prevent argument from being register allocated */
6130 arg_array [i]->flags |= MONO_INST_VOLATILE;
6131 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6135 ins = (MonoInst*)call;
6136 ins->inst_p0 = cmethod;
6137 ins->inst_p1 = arg_array [0];
6138 MONO_ADD_INS (bblock, ins);
6139 link_bblock (cfg, bblock, end_bblock);
6140 start_new_bblock = 1;
6141 /* skip CEE_RET as well */
6147 /* Conversion to a JIT intrinsic */
6148 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6149 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6150 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6161 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6162 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6163 mono_method_check_inlining (cfg, cmethod) &&
6164 !g_list_find (dont_inline, cmethod)) {
6166 gboolean allways = FALSE;
6168 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6169 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6170 /* Prevent inlining of methods that call wrappers */
6172 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6176 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6178 cfg->real_offset += 5;
6181 if (!MONO_TYPE_IS_VOID (fsig->ret))
6182 /* *sp is already set by inline_method */
6185 inline_costs += costs;
6191 inline_costs += 10 * num_calls++;
6193 /* Tail recursion elimination */
6194 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6195 gboolean has_vtargs = FALSE;
6198 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6201 /* keep it simple */
6202 for (i = fsig->param_count - 1; i >= 0; i--) {
6203 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6208 for (i = 0; i < n; ++i)
6209 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6210 MONO_INST_NEW (cfg, ins, OP_BR);
6211 MONO_ADD_INS (bblock, ins);
6212 tblock = start_bblock->out_bb [0];
6213 link_bblock (cfg, bblock, tblock);
6214 ins->inst_target_bb = tblock;
6215 start_new_bblock = 1;
6217 /* skip the CEE_RET, too */
6218 if (ip_in_bb (cfg, bblock, ip + 5))
6228 /* Generic sharing */
6229 /* FIXME: only do this for generic methods if
6230 they are not shared! */
6231 if (context_used && !imt_arg && !array_rank &&
6232 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6233 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6234 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6235 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6238 g_assert (cfg->generic_sharing_context && cmethod);
6242 * We are compiling a call to a
6243 * generic method from shared code,
6244 * which means that we have to look up
6245 * the method in the rgctx and do an
6248 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6251 /* Indirect calls */
6253 g_assert (!imt_arg);
6255 if (*ip == CEE_CALL)
6256 g_assert (context_used);
6257 else if (*ip == CEE_CALLI)
6258 g_assert (!vtable_arg);
6260 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6261 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6263 /* Prevent inlining of methods with indirect calls */
6267 #ifdef MONO_ARCH_RGCTX_REG
6269 int rgctx_reg = mono_alloc_preg (cfg);
6271 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6272 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6273 call = (MonoCallInst*)ins;
6274 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6275 cfg->uses_rgctx_reg = TRUE;
6280 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6282 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6283 if (fsig->pinvoke && !fsig->ret->byref) {
6287 * Native code might return non register sized integers
6288 * without initializing the upper bits.
6290 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6291 case OP_LOADI1_MEMBASE:
6292 widen_op = OP_ICONV_TO_I1;
6294 case OP_LOADU1_MEMBASE:
6295 widen_op = OP_ICONV_TO_U1;
6297 case OP_LOADI2_MEMBASE:
6298 widen_op = OP_ICONV_TO_I2;
6300 case OP_LOADU2_MEMBASE:
6301 widen_op = OP_ICONV_TO_U2;
6307 if (widen_op != -1) {
6308 int dreg = alloc_preg (cfg);
6311 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6312 widen->type = ins->type;
6329 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6330 if (sp [fsig->param_count]->type == STACK_OBJ) {
6331 MonoInst *iargs [2];
6334 iargs [1] = sp [fsig->param_count];
6336 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6339 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6341 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6342 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6347 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6348 if (!cmethod->klass->element_class->valuetype && !readonly)
6349 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6352 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6355 g_assert_not_reached ();
6363 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6365 if (!MONO_TYPE_IS_VOID (fsig->ret))
6376 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6378 } else if (imt_arg) {
6379 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6381 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6384 if (!MONO_TYPE_IS_VOID (fsig->ret))
6392 if (cfg->method != method) {
6393 /* return from inlined method */
6395 * If in_count == 0, that means the ret is unreachable due to
6396 * being preceeded by a throw. In that case, inline_method () will
6397 * handle setting the return value
6398 * (test case: test_0_inline_throw ()).
6400 if (return_var && cfg->cbb->in_count) {
6404 //g_assert (returnvar != -1);
6405 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6406 cfg->ret_var_set = TRUE;
6410 MonoType *ret_type = mono_method_signature (method)->ret;
6412 g_assert (!return_var);
6415 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6418 if (!cfg->vret_addr) {
6421 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6423 EMIT_NEW_RETLOADA (cfg, ret_addr);
6425 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6426 ins->klass = mono_class_from_mono_type (ret_type);
6429 #ifdef MONO_ARCH_SOFT_FLOAT
6430 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6431 MonoInst *iargs [1];
6435 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6436 mono_arch_emit_setret (cfg, method, conv);
6438 mono_arch_emit_setret (cfg, method, *sp);
6441 mono_arch_emit_setret (cfg, method, *sp);
6446 if (sp != stack_start)
6448 MONO_INST_NEW (cfg, ins, OP_BR);
6450 ins->inst_target_bb = end_bblock;
6451 MONO_ADD_INS (bblock, ins);
6452 link_bblock (cfg, bblock, end_bblock);
6453 start_new_bblock = 1;
6457 MONO_INST_NEW (cfg, ins, OP_BR);
6459 target = ip + 1 + (signed char)(*ip);
6461 GET_BBLOCK (cfg, tblock, target);
6462 link_bblock (cfg, bblock, tblock);
6463 ins->inst_target_bb = tblock;
6464 if (sp != stack_start) {
6465 handle_stack_args (cfg, stack_start, sp - stack_start);
6467 CHECK_UNVERIFIABLE (cfg);
6469 MONO_ADD_INS (bblock, ins);
6470 start_new_bblock = 1;
6471 inline_costs += BRANCH_COST;
6485 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6487 target = ip + 1 + *(signed char*)ip;
6493 inline_costs += BRANCH_COST;
6497 MONO_INST_NEW (cfg, ins, OP_BR);
6500 target = ip + 4 + (gint32)read32(ip);
6502 GET_BBLOCK (cfg, tblock, target);
6503 link_bblock (cfg, bblock, tblock);
6504 ins->inst_target_bb = tblock;
6505 if (sp != stack_start) {
6506 handle_stack_args (cfg, stack_start, sp - stack_start);
6508 CHECK_UNVERIFIABLE (cfg);
6511 MONO_ADD_INS (bblock, ins);
6513 start_new_bblock = 1;
6514 inline_costs += BRANCH_COST;
6521 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6522 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6523 guint32 opsize = is_short ? 1 : 4;
6525 CHECK_OPSIZE (opsize);
6527 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6530 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6535 GET_BBLOCK (cfg, tblock, target);
6536 link_bblock (cfg, bblock, tblock);
6537 GET_BBLOCK (cfg, tblock, ip);
6538 link_bblock (cfg, bblock, tblock);
6540 if (sp != stack_start) {
6541 handle_stack_args (cfg, stack_start, sp - stack_start);
6542 CHECK_UNVERIFIABLE (cfg);
6545 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6546 cmp->sreg1 = sp [0]->dreg;
6547 type_from_op (cmp, sp [0], NULL);
6550 #if SIZEOF_VOID_P == 4
6551 if (cmp->opcode == OP_LCOMPARE_IMM) {
6552 /* Convert it to OP_LCOMPARE */
6553 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6554 ins->type = STACK_I8;
6555 ins->dreg = alloc_dreg (cfg, STACK_I8);
6557 MONO_ADD_INS (bblock, ins);
6558 cmp->opcode = OP_LCOMPARE;
6559 cmp->sreg2 = ins->dreg;
6562 MONO_ADD_INS (bblock, cmp);
6564 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6565 type_from_op (ins, sp [0], NULL);
6566 MONO_ADD_INS (bblock, ins);
6567 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6568 GET_BBLOCK (cfg, tblock, target);
6569 ins->inst_true_bb = tblock;
6570 GET_BBLOCK (cfg, tblock, ip);
6571 ins->inst_false_bb = tblock;
6572 start_new_bblock = 2;
6575 inline_costs += BRANCH_COST;
6590 MONO_INST_NEW (cfg, ins, *ip);
6592 target = ip + 4 + (gint32)read32(ip);
6598 inline_costs += BRANCH_COST;
6602 MonoBasicBlock **targets;
6603 MonoBasicBlock *default_bblock;
6604 MonoJumpInfoBBTable *table;
6605 int offset_reg = alloc_preg (cfg);
6606 int target_reg = alloc_preg (cfg);
6607 int table_reg = alloc_preg (cfg);
6608 int sum_reg = alloc_preg (cfg);
6609 gboolean use_op_switch;
6613 n = read32 (ip + 1);
6616 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6620 CHECK_OPSIZE (n * sizeof (guint32));
6621 target = ip + n * sizeof (guint32);
6623 GET_BBLOCK (cfg, default_bblock, target);
6625 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6626 for (i = 0; i < n; ++i) {
6627 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6628 targets [i] = tblock;
6632 if (sp != stack_start) {
6634 * Link the current bb with the targets as well, so handle_stack_args
6635 * will set their in_stack correctly.
6637 link_bblock (cfg, bblock, default_bblock);
6638 for (i = 0; i < n; ++i)
6639 link_bblock (cfg, bblock, targets [i]);
6641 handle_stack_args (cfg, stack_start, sp - stack_start);
6643 CHECK_UNVERIFIABLE (cfg);
6646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6650 for (i = 0; i < n; ++i)
6651 link_bblock (cfg, bblock, targets [i]);
6653 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6654 table->table = targets;
6655 table->table_size = n;
6657 use_op_switch = FALSE;
6659 /* ARM implements SWITCH statements differently */
6660 /* FIXME: Make it use the generic implementation */
6661 if (!cfg->compile_aot)
6662 use_op_switch = TRUE;
6665 if (use_op_switch) {
6666 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6667 ins->sreg1 = src1->dreg;
6668 ins->inst_p0 = table;
6669 ins->inst_many_bb = targets;
6670 ins->klass = GUINT_TO_POINTER (n);
6671 MONO_ADD_INS (cfg->cbb, ins);
6673 if (sizeof (gpointer) == 8)
6674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6678 #if SIZEOF_VOID_P == 8
6679 /* The upper word might not be zero, and we add it to a 64 bit address later */
6680 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6683 if (cfg->compile_aot) {
6684 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6686 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6687 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6688 ins->inst_p0 = table;
6689 ins->dreg = table_reg;
6690 MONO_ADD_INS (cfg->cbb, ins);
6693 /* FIXME: Use load_memindex */
6694 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6696 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6698 start_new_bblock = 1;
6699 inline_costs += (BRANCH_COST * 2);
6719 dreg = alloc_freg (cfg);
6722 dreg = alloc_lreg (cfg);
6725 dreg = alloc_preg (cfg);
6728 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6729 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6730 ins->flags |= ins_flag;
6732 MONO_ADD_INS (bblock, ins);
6747 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6748 ins->flags |= ins_flag;
6750 MONO_ADD_INS (bblock, ins);
6758 MONO_INST_NEW (cfg, ins, (*ip));
6760 ins->sreg1 = sp [0]->dreg;
6761 ins->sreg2 = sp [1]->dreg;
6762 type_from_op (ins, sp [0], sp [1]);
6764 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6766 /* Use the immediate opcodes if possible */
6767 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6768 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6769 if (imm_opcode != -1) {
6770 ins->opcode = imm_opcode;
6771 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6774 sp [1]->opcode = OP_NOP;
6778 MONO_ADD_INS ((cfg)->cbb, (ins));
6781 mono_decompose_opcode (cfg, ins);
6798 MONO_INST_NEW (cfg, ins, (*ip));
6800 ins->sreg1 = sp [0]->dreg;
6801 ins->sreg2 = sp [1]->dreg;
6802 type_from_op (ins, sp [0], sp [1]);
6804 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6805 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6807 /* FIXME: Pass opcode to is_inst_imm */
6809 /* Use the immediate opcodes if possible */
6810 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6813 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6814 if (imm_opcode != -1) {
6815 ins->opcode = imm_opcode;
6816 if (sp [1]->opcode == OP_I8CONST) {
6817 #if SIZEOF_VOID_P == 8
6818 ins->inst_imm = sp [1]->inst_l;
6820 ins->inst_ls_word = sp [1]->inst_ls_word;
6821 ins->inst_ms_word = sp [1]->inst_ms_word;
6825 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6828 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6829 if (sp [1]->next == NULL)
6830 sp [1]->opcode = OP_NOP;
6833 MONO_ADD_INS ((cfg)->cbb, (ins));
6836 mono_decompose_opcode (cfg, ins);
6849 case CEE_CONV_OVF_I8:
6850 case CEE_CONV_OVF_U8:
6854 /* Special case this earlier so we have long constants in the IR */
6855 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6856 int data = sp [-1]->inst_c0;
6857 sp [-1]->opcode = OP_I8CONST;
6858 sp [-1]->type = STACK_I8;
6859 #if SIZEOF_VOID_P == 8
6860 if ((*ip) == CEE_CONV_U8)
6861 sp [-1]->inst_c0 = (guint32)data;
6863 sp [-1]->inst_c0 = data;
6865 sp [-1]->inst_ls_word = data;
6866 if ((*ip) == CEE_CONV_U8)
6867 sp [-1]->inst_ms_word = 0;
6869 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6871 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6878 case CEE_CONV_OVF_I4:
6879 case CEE_CONV_OVF_I1:
6880 case CEE_CONV_OVF_I2:
6881 case CEE_CONV_OVF_I:
6882 case CEE_CONV_OVF_U:
6885 if (sp [-1]->type == STACK_R8) {
6886 ADD_UNOP (CEE_CONV_OVF_I8);
6893 case CEE_CONV_OVF_U1:
6894 case CEE_CONV_OVF_U2:
6895 case CEE_CONV_OVF_U4:
6898 if (sp [-1]->type == STACK_R8) {
6899 ADD_UNOP (CEE_CONV_OVF_U8);
6906 case CEE_CONV_OVF_I1_UN:
6907 case CEE_CONV_OVF_I2_UN:
6908 case CEE_CONV_OVF_I4_UN:
6909 case CEE_CONV_OVF_I8_UN:
6910 case CEE_CONV_OVF_U1_UN:
6911 case CEE_CONV_OVF_U2_UN:
6912 case CEE_CONV_OVF_U4_UN:
6913 case CEE_CONV_OVF_U8_UN:
6914 case CEE_CONV_OVF_I_UN:
6915 case CEE_CONV_OVF_U_UN:
6925 case CEE_ADD_OVF_UN:
6927 case CEE_MUL_OVF_UN:
6929 case CEE_SUB_OVF_UN:
6937 token = read32 (ip + 1);
6938 klass = mini_get_class (method, token, generic_context);
6939 CHECK_TYPELOAD (klass);
6941 if (generic_class_is_reference_type (cfg, klass)) {
6942 MonoInst *store, *load;
6943 int dreg = alloc_preg (cfg);
6945 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6946 load->flags |= ins_flag;
6947 MONO_ADD_INS (cfg->cbb, load);
6949 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6950 store->flags |= ins_flag;
6951 MONO_ADD_INS (cfg->cbb, store);
6953 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6965 token = read32 (ip + 1);
6966 klass = mini_get_class (method, token, generic_context);
6967 CHECK_TYPELOAD (klass);
6969 /* Optimize the common ldobj+stloc combination */
6979 loc_index = ip [5] - CEE_STLOC_0;
6986 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6987 CHECK_LOCAL (loc_index);
6989 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6990 ins->dreg = cfg->locals [loc_index]->dreg;
6996 /* Optimize the ldobj+stobj combination */
6997 /* The reference case ends up being a load+store anyway */
6998 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7003 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7010 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7019 CHECK_STACK_OVF (1);
7021 n = read32 (ip + 1);
7023 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7024 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7025 ins->type = STACK_OBJ;
7028 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7029 MonoInst *iargs [1];
7031 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7032 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7034 if (cfg->opt & MONO_OPT_SHARED) {
7035 MonoInst *iargs [3];
7037 if (cfg->compile_aot) {
7038 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7040 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7041 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7042 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7043 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7044 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7046 if (bblock->out_of_line) {
7047 MonoInst *iargs [2];
7049 if (cfg->method->klass->image == mono_defaults.corlib) {
7051 * Avoid relocations in AOT and save some space by using a
7052 * version of helper_ldstr specialized to mscorlib.
7054 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7055 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7057 /* Avoid creating the string object */
7058 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7059 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7060 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7064 if (cfg->compile_aot) {
7065 NEW_LDSTRCONST (cfg, ins, image, n);
7067 MONO_ADD_INS (bblock, ins);
7070 NEW_PCONST (cfg, ins, NULL);
7071 ins->type = STACK_OBJ;
7072 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7074 MONO_ADD_INS (bblock, ins);
7083 MonoInst *iargs [2];
7084 MonoMethodSignature *fsig;
7087 MonoInst *vtable_arg = NULL;
7090 token = read32 (ip + 1);
7091 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7094 fsig = mono_method_get_signature (cmethod, image, token);
7096 mono_save_token_info (cfg, image, token, cmethod);
7098 if (!mono_class_init (cmethod->klass))
7101 if (cfg->generic_sharing_context)
7102 context_used = mono_method_check_context_used (cmethod);
7104 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7105 if (check_linkdemand (cfg, method, cmethod))
7107 CHECK_CFG_EXCEPTION;
7108 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7109 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7112 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7113 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7114 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7116 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7117 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7119 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7123 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7124 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7126 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7128 CHECK_TYPELOAD (cmethod->klass);
7129 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7134 n = fsig->param_count;
7138 * Generate smaller code for the common newobj <exception> instruction in
7139 * argument checking code.
7141 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7142 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7143 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7144 MonoInst *iargs [3];
7146 g_assert (!vtable_arg);
7150 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7153 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7157 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7162 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7165 g_assert_not_reached ();
7173 /* move the args to allow room for 'this' in the first position */
7179 /* check_call_signature () requires sp[0] to be set */
7180 this_ins.type = STACK_OBJ;
7182 if (check_call_signature (cfg, fsig, sp))
7187 if (mini_class_is_system_array (cmethod->klass)) {
7189 GENERIC_SHARING_FAILURE (*ip);
7190 g_assert (!context_used);
7191 g_assert (!vtable_arg);
7192 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7194 /* Avoid varargs in the common case */
7195 if (fsig->param_count == 1)
7196 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7197 else if (fsig->param_count == 2)
7198 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7200 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7201 } else if (cmethod->string_ctor) {
7202 g_assert (!context_used);
7203 g_assert (!vtable_arg);
7204 /* we simply pass a null pointer */
7205 EMIT_NEW_PCONST (cfg, *sp, NULL);
7206 /* now call the string ctor */
7207 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7209 MonoInst* callvirt_this_arg = NULL;
7211 if (cmethod->klass->valuetype) {
7212 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7213 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7214 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7219 * The code generated by mini_emit_virtual_call () expects
7220 * iargs [0] to be a boxed instance, but luckily the vcall
7221 * will be transformed into a normal call there.
7223 } else if (context_used) {
7227 if (cfg->opt & MONO_OPT_SHARED)
7228 rgctx_info = MONO_RGCTX_INFO_KLASS;
7230 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7231 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7233 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7236 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7238 CHECK_TYPELOAD (cmethod->klass);
7241 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7242 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7243 * As a workaround, we call class cctors before allocating objects.
7245 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7246 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7247 if (cfg->verbose_level > 2)
7248 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7249 class_inits = g_slist_prepend (class_inits, vtable);
7252 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7257 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7259 /* Now call the actual ctor */
7260 /* Avoid virtual calls to ctors if possible */
7261 if (cmethod->klass->marshalbyref)
7262 callvirt_this_arg = sp [0];
7264 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7265 mono_method_check_inlining (cfg, cmethod) &&
7266 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7267 !g_list_find (dont_inline, cmethod)) {
7270 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7271 cfg->real_offset += 5;
7274 inline_costs += costs - 5;
7277 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7279 } else if (context_used &&
7280 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7281 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7282 MonoInst *cmethod_addr;
7284 g_assert (!callvirt_this_arg);
7286 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7287 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7289 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7292 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7293 callvirt_this_arg, NULL, vtable_arg);
7297 if (alloc == NULL) {
7299 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7300 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7314 token = read32 (ip + 1);
7315 klass = mini_get_class (method, token, generic_context);
7316 CHECK_TYPELOAD (klass);
7317 if (sp [0]->type != STACK_OBJ)
7320 if (cfg->generic_sharing_context)
7321 context_used = mono_class_check_context_used (klass);
7330 args [1] = emit_get_rgctx_klass (cfg, context_used,
7331 klass, MONO_RGCTX_INFO_KLASS);
7333 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7337 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7338 MonoMethod *mono_castclass;
7339 MonoInst *iargs [1];
7342 mono_castclass = mono_marshal_get_castclass (klass);
7345 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7346 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7347 g_assert (costs > 0);
7350 cfg->real_offset += 5;
7355 inline_costs += costs;
7358 ins = handle_castclass (cfg, klass, *sp);
7368 token = read32 (ip + 1);
7369 klass = mini_get_class (method, token, generic_context);
7370 CHECK_TYPELOAD (klass);
7371 if (sp [0]->type != STACK_OBJ)
7374 if (cfg->generic_sharing_context)
7375 context_used = mono_class_check_context_used (klass);
7384 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7386 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7390 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7391 MonoMethod *mono_isinst;
7392 MonoInst *iargs [1];
7395 mono_isinst = mono_marshal_get_isinst (klass);
7398 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7399 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7400 g_assert (costs > 0);
7403 cfg->real_offset += 5;
7408 inline_costs += costs;
7411 ins = handle_isinst (cfg, klass, *sp);
7418 case CEE_UNBOX_ANY: {
7422 token = read32 (ip + 1);
7423 klass = mini_get_class (method, token, generic_context);
7424 CHECK_TYPELOAD (klass);
7426 mono_save_token_info (cfg, image, token, klass);
7428 if (cfg->generic_sharing_context)
7429 context_used = mono_class_check_context_used (klass);
7431 if (generic_class_is_reference_type (cfg, klass)) {
7434 MonoInst *iargs [2];
7439 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7440 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7444 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7445 MonoMethod *mono_castclass;
7446 MonoInst *iargs [1];
7449 mono_castclass = mono_marshal_get_castclass (klass);
7452 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7453 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7455 g_assert (costs > 0);
7458 cfg->real_offset += 5;
7462 inline_costs += costs;
7464 ins = handle_castclass (cfg, klass, *sp);
7472 if (mono_class_is_nullable (klass)) {
7473 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7480 ins = handle_unbox (cfg, klass, sp, context_used);
7486 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7499 token = read32 (ip + 1);
7500 klass = mini_get_class (method, token, generic_context);
7501 CHECK_TYPELOAD (klass);
7503 mono_save_token_info (cfg, image, token, klass);
7505 if (cfg->generic_sharing_context)
7506 context_used = mono_class_check_context_used (klass);
7508 if (generic_class_is_reference_type (cfg, klass)) {
7514 if (klass == mono_defaults.void_class)
7516 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7518 /* frequent check in generic code: box (struct), brtrue */
7519 if (!mono_class_is_nullable (klass) &&
7520 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7521 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7523 MONO_INST_NEW (cfg, ins, OP_BR);
7524 if (*ip == CEE_BRTRUE_S) {
7527 target = ip + 1 + (signed char)(*ip);
7532 target = ip + 4 + (gint)(read32 (ip));
7535 GET_BBLOCK (cfg, tblock, target);
7536 link_bblock (cfg, bblock, tblock);
7537 ins->inst_target_bb = tblock;
7538 GET_BBLOCK (cfg, tblock, ip);
7540 * This leads to some inconsistency, since the two bblocks are
7541 * not really connected, but it is needed for handling stack
7542 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7543 * FIXME: This should only be needed if sp != stack_start, but that
7544 * doesn't work for some reason (test failure in mcs/tests on x86).
7546 link_bblock (cfg, bblock, tblock);
7547 if (sp != stack_start) {
7548 handle_stack_args (cfg, stack_start, sp - stack_start);
7550 CHECK_UNVERIFIABLE (cfg);
7552 MONO_ADD_INS (bblock, ins);
7553 start_new_bblock = 1;
7561 if (cfg->opt & MONO_OPT_SHARED)
7562 rgctx_info = MONO_RGCTX_INFO_KLASS;
7564 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7565 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7566 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7568 *sp++ = handle_box (cfg, val, klass);
7579 token = read32 (ip + 1);
7580 klass = mini_get_class (method, token, generic_context);
7581 CHECK_TYPELOAD (klass);
7583 mono_save_token_info (cfg, image, token, klass);
7585 if (cfg->generic_sharing_context)
7586 context_used = mono_class_check_context_used (klass);
7588 if (mono_class_is_nullable (klass)) {
7591 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7592 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7596 ins = handle_unbox (cfg, klass, sp, context_used);
7606 MonoClassField *field;
7610 if (*ip == CEE_STFLD) {
7617 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7619 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7622 token = read32 (ip + 1);
7623 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7624 field = mono_method_get_wrapper_data (method, token);
7625 klass = field->parent;
7628 field = mono_field_from_token (image, token, &klass, generic_context);
7632 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7633 FIELD_ACCESS_FAILURE;
7634 mono_class_init (klass);
7636 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7637 if (*ip == CEE_STFLD) {
7638 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7640 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7641 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7642 MonoInst *iargs [5];
7645 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7646 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7647 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7651 if (cfg->opt & MONO_OPT_INLINE) {
7652 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7653 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7654 g_assert (costs > 0);
7656 cfg->real_offset += 5;
7659 inline_costs += costs;
7661 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7666 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7668 store->flags |= ins_flag;
7675 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7676 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7677 MonoInst *iargs [4];
7680 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7681 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7682 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7683 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7684 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7685 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7687 g_assert (costs > 0);
7689 cfg->real_offset += 5;
7693 inline_costs += costs;
7695 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7699 if (sp [0]->type == STACK_VTYPE) {
7702 /* Have to compute the address of the variable */
7704 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7706 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7708 g_assert (var->klass == klass);
7710 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7714 if (*ip == CEE_LDFLDA) {
7715 dreg = alloc_preg (cfg);
7717 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7718 ins->klass = mono_class_from_mono_type (field->type);
7719 ins->type = STACK_MP;
7724 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7725 load->flags |= ins_flag;
7736 MonoClassField *field;
7737 gpointer addr = NULL;
7738 gboolean is_special_static;
7741 token = read32 (ip + 1);
7743 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7744 field = mono_method_get_wrapper_data (method, token);
7745 klass = field->parent;
7748 field = mono_field_from_token (image, token, &klass, generic_context);
7751 mono_class_init (klass);
7752 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7753 FIELD_ACCESS_FAILURE;
7756 * We can only support shared generic static
7757 * field access on architectures where the
7758 * trampoline code has been extended to handle
7759 * the generic class init.
7761 #ifndef MONO_ARCH_VTABLE_REG
7762 GENERIC_SHARING_FAILURE (*ip);
7765 if (cfg->generic_sharing_context)
7766 context_used = mono_class_check_context_used (klass);
7768 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7770 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7771 * to be called here.
7773 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7774 mono_class_vtable (cfg->domain, klass);
7775 CHECK_TYPELOAD (klass);
7777 mono_domain_lock (cfg->domain);
7778 if (cfg->domain->special_static_fields)
7779 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7780 mono_domain_unlock (cfg->domain);
7782 is_special_static = mono_class_field_is_special_static (field);
7784 /* Generate IR to compute the field address */
7786 if ((cfg->opt & MONO_OPT_SHARED) ||
7787 (cfg->compile_aot && is_special_static) ||
7788 (context_used && is_special_static)) {
7789 MonoInst *iargs [2];
7791 g_assert (field->parent);
7792 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7794 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7795 field, MONO_RGCTX_INFO_CLASS_FIELD);
7797 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7799 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7800 } else if (context_used) {
7801 MonoInst *static_data;
7804 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7805 method->klass->name_space, method->klass->name, method->name,
7806 depth, field->offset);
7809 if (mono_class_needs_cctor_run (klass, method)) {
7813 vtable = emit_get_rgctx_klass (cfg, context_used,
7814 klass, MONO_RGCTX_INFO_VTABLE);
7816 // FIXME: This doesn't work since it tries to pass the argument
7817 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7819 * The vtable pointer is always passed in a register regardless of
7820 * the calling convention, so assign it manually, and make a call
7821 * using a signature without parameters.
7823 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7824 #ifdef MONO_ARCH_VTABLE_REG
7825 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7826 cfg->uses_vtable_reg = TRUE;
7833 * The pointer we're computing here is
7835 * super_info.static_data + field->offset
7837 static_data = emit_get_rgctx_klass (cfg, context_used,
7838 klass, MONO_RGCTX_INFO_STATIC_DATA);
7840 if (field->offset == 0) {
7843 int addr_reg = mono_alloc_preg (cfg);
7844 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7846 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7847 MonoInst *iargs [2];
7849 g_assert (field->parent);
7850 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7851 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7852 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7854 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7856 CHECK_TYPELOAD (klass);
7858 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7859 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7860 if (cfg->verbose_level > 2)
7861 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7862 class_inits = g_slist_prepend (class_inits, vtable);
7864 if (cfg->run_cctors) {
7866 /* This makes so that inline cannot trigger */
7867 /* .cctors: too many apps depend on them */
7868 /* running with a specific order... */
7869 if (! vtable->initialized)
7871 ex = mono_runtime_class_init_full (vtable, FALSE);
7873 set_exception_object (cfg, ex);
7874 goto exception_exit;
7878 addr = (char*)vtable->data + field->offset;
7880 if (cfg->compile_aot)
7881 EMIT_NEW_SFLDACONST (cfg, ins, field);
7883 EMIT_NEW_PCONST (cfg, ins, addr);
7886 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7887 * This could be later optimized to do just a couple of
7888 * memory dereferences with constant offsets.
7890 MonoInst *iargs [1];
7891 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7892 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7896 /* Generate IR to do the actual load/store operation */
7898 if (*ip == CEE_LDSFLDA) {
7899 ins->klass = mono_class_from_mono_type (field->type);
7900 ins->type = STACK_PTR;
7902 } else if (*ip == CEE_STSFLD) {
7907 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7908 store->flags |= ins_flag;
7910 gboolean is_const = FALSE;
7911 MonoVTable *vtable = NULL;
7913 if (!context_used) {
7914 vtable = mono_class_vtable (cfg->domain, klass);
7915 CHECK_TYPELOAD (klass);
7917 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7918 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7919 gpointer addr = (char*)vtable->data + field->offset;
7920 int ro_type = field->type->type;
7921 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7922 ro_type = field->type->data.klass->enum_basetype->type;
7924 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
7927 case MONO_TYPE_BOOLEAN:
7929 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7933 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7936 case MONO_TYPE_CHAR:
7938 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7942 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7947 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7951 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7956 case MONO_TYPE_STRING:
7957 case MONO_TYPE_OBJECT:
7958 case MONO_TYPE_CLASS:
7959 case MONO_TYPE_SZARRAY:
7961 case MONO_TYPE_FNPTR:
7962 case MONO_TYPE_ARRAY:
7963 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7964 type_to_eval_stack_type ((cfg), field->type, *sp);
7969 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7974 case MONO_TYPE_VALUETYPE:
7984 CHECK_STACK_OVF (1);
7986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7987 load->flags |= ins_flag;
8000 token = read32 (ip + 1);
8001 klass = mini_get_class (method, token, generic_context);
8002 CHECK_TYPELOAD (klass);
8003 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8004 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8015 const char *data_ptr;
8017 guint32 field_token;
8023 token = read32 (ip + 1);
8025 klass = mini_get_class (method, token, generic_context);
8026 CHECK_TYPELOAD (klass);
8028 if (cfg->generic_sharing_context)
8029 context_used = mono_class_check_context_used (klass);
8034 /* FIXME: Decompose later to help abcrem */
8037 args [0] = emit_get_rgctx_klass (cfg, context_used,
8038 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8043 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8045 if (cfg->opt & MONO_OPT_SHARED) {
8046 /* Decompose now to avoid problems with references to the domainvar */
8047 MonoInst *iargs [3];
8049 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8050 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8053 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8055 /* Decompose later since it is needed by abcrem */
8056 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8057 ins->dreg = alloc_preg (cfg);
8058 ins->sreg1 = sp [0]->dreg;
8059 ins->inst_newa_class = klass;
8060 ins->type = STACK_OBJ;
8062 MONO_ADD_INS (cfg->cbb, ins);
8063 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8064 cfg->cbb->has_array_access = TRUE;
8066 /* Needed so mono_emit_load_get_addr () gets called */
8067 mono_get_got_var (cfg);
8077 * we inline/optimize the initialization sequence if possible.
8078 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8079 * for small sizes open code the memcpy
8080 * ensure the rva field is big enough
8082 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8083 MonoMethod *memcpy_method = get_memcpy_method ();
8084 MonoInst *iargs [3];
8085 int add_reg = alloc_preg (cfg);
8087 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8088 if (cfg->compile_aot) {
8089 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8091 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8093 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8094 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8103 if (sp [0]->type != STACK_OBJ)
8106 dreg = alloc_preg (cfg);
8107 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8108 ins->dreg = alloc_preg (cfg);
8109 ins->sreg1 = sp [0]->dreg;
8110 ins->type = STACK_I4;
8111 MONO_ADD_INS (cfg->cbb, ins);
8112 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8113 cfg->cbb->has_array_access = TRUE;
8121 if (sp [0]->type != STACK_OBJ)
8124 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8126 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8127 CHECK_TYPELOAD (klass);
8128 /* we need to make sure that this array is exactly the type it needs
8129 * to be for correctness. the wrappers are lax with their usage
8130 * so we need to ignore them here
8132 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8133 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8136 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8140 case CEE_LDELEM_ANY:
8151 case CEE_LDELEM_REF: {
8157 if (*ip == CEE_LDELEM_ANY) {
8159 token = read32 (ip + 1);
8160 klass = mini_get_class (method, token, generic_context);
8161 CHECK_TYPELOAD (klass);
8162 mono_class_init (klass);
8165 klass = array_access_to_klass (*ip);
8167 if (sp [0]->type != STACK_OBJ)
8170 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8172 if (sp [1]->opcode == OP_ICONST) {
8173 int array_reg = sp [0]->dreg;
8174 int index_reg = sp [1]->dreg;
8175 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8177 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8178 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8180 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8181 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8184 if (*ip == CEE_LDELEM_ANY)
8197 case CEE_STELEM_REF:
8198 case CEE_STELEM_ANY: {
8204 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8206 if (*ip == CEE_STELEM_ANY) {
8208 token = read32 (ip + 1);
8209 klass = mini_get_class (method, token, generic_context);
8210 CHECK_TYPELOAD (klass);
8211 mono_class_init (klass);
8214 klass = array_access_to_klass (*ip);
8216 if (sp [0]->type != STACK_OBJ)
8219 /* storing a NULL doesn't need any of the complex checks in stelemref */
8220 if (generic_class_is_reference_type (cfg, klass) &&
8221 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8222 MonoMethod* helper = mono_marshal_get_stelemref ();
8223 MonoInst *iargs [3];
8225 if (sp [0]->type != STACK_OBJ)
8227 if (sp [2]->type != STACK_OBJ)
8234 mono_emit_method_call (cfg, helper, iargs, NULL);
8236 if (sp [1]->opcode == OP_ICONST) {
8237 int array_reg = sp [0]->dreg;
8238 int index_reg = sp [1]->dreg;
8239 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8241 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8242 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8244 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8245 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8249 if (*ip == CEE_STELEM_ANY)
8256 case CEE_CKFINITE: {
8260 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8261 ins->sreg1 = sp [0]->dreg;
8262 ins->dreg = alloc_freg (cfg);
8263 ins->type = STACK_R8;
8264 MONO_ADD_INS (bblock, ins);
8267 mono_decompose_opcode (cfg, ins);
8272 case CEE_REFANYVAL: {
8273 MonoInst *src_var, *src;
8275 int klass_reg = alloc_preg (cfg);
8276 int dreg = alloc_preg (cfg);
8279 MONO_INST_NEW (cfg, ins, *ip);
8282 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8283 CHECK_TYPELOAD (klass);
8284 mono_class_init (klass);
8286 if (cfg->generic_sharing_context)
8287 context_used = mono_class_check_context_used (klass);
8290 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8292 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8293 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8297 MonoInst *klass_ins;
8299 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8300 klass, MONO_RGCTX_INFO_KLASS);
8303 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8304 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8306 mini_emit_class_check (cfg, klass_reg, klass);
8308 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8309 ins->type = STACK_MP;
8314 case CEE_MKREFANY: {
8315 MonoInst *loc, *addr;
8318 MONO_INST_NEW (cfg, ins, *ip);
8321 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8322 CHECK_TYPELOAD (klass);
8323 mono_class_init (klass);
8325 if (cfg->generic_sharing_context)
8326 context_used = mono_class_check_context_used (klass);
8328 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8329 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8332 MonoInst *const_ins;
8333 int type_reg = alloc_preg (cfg);
8335 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8336 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8338 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8339 } else if (cfg->compile_aot) {
8340 int const_reg = alloc_preg (cfg);
8341 int type_reg = alloc_preg (cfg);
8343 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8344 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8348 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8349 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8351 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8353 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8354 ins->type = STACK_VTYPE;
8355 ins->klass = mono_defaults.typed_reference_class;
8362 MonoClass *handle_class;
8364 CHECK_STACK_OVF (1);
8367 n = read32 (ip + 1);
8369 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8370 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8371 handle = mono_method_get_wrapper_data (method, n);
8372 handle_class = mono_method_get_wrapper_data (method, n + 1);
8373 if (handle_class == mono_defaults.typehandle_class)
8374 handle = &((MonoClass*)handle)->byval_arg;
8377 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8381 mono_class_init (handle_class);
8382 if (cfg->generic_sharing_context) {
8383 if (handle_class == mono_defaults.typehandle_class) {
8384 /* If we get a MONO_TYPE_CLASS
8385 then we need to provide the
8387 instantiation of it. */
8388 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8391 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8392 } else if (handle_class == mono_defaults.fieldhandle_class)
8393 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8394 else if (handle_class == mono_defaults.methodhandle_class)
8395 context_used = mono_method_check_context_used (handle);
8397 g_assert_not_reached ();
8400 if ((cfg->opt & MONO_OPT_SHARED) &&
8401 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8402 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8403 MonoInst *addr, *vtvar, *iargs [3];
8404 int method_context_used;
8406 if (cfg->generic_sharing_context)
8407 method_context_used = mono_method_check_context_used (method);
8409 method_context_used = 0;
8411 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8413 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8414 EMIT_NEW_ICONST (cfg, iargs [1], n);
8415 if (method_context_used) {
8416 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8417 method, MONO_RGCTX_INFO_METHOD);
8418 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8420 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8421 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8423 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8425 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8427 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8429 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8430 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8431 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8432 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8433 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8434 MonoClass *tclass = mono_class_from_mono_type (handle);
8436 mono_class_init (tclass);
8438 ins = emit_get_rgctx_klass (cfg, context_used,
8439 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8440 } else if (cfg->compile_aot) {
8441 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8443 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8445 ins->type = STACK_OBJ;
8446 ins->klass = cmethod->klass;
8449 MonoInst *addr, *vtvar;
8451 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8454 if (handle_class == mono_defaults.typehandle_class) {
8455 ins = emit_get_rgctx_klass (cfg, context_used,
8456 mono_class_from_mono_type (handle),
8457 MONO_RGCTX_INFO_TYPE);
8458 } else if (handle_class == mono_defaults.methodhandle_class) {
8459 ins = emit_get_rgctx_method (cfg, context_used,
8460 handle, MONO_RGCTX_INFO_METHOD);
8461 } else if (handle_class == mono_defaults.fieldhandle_class) {
8462 ins = emit_get_rgctx_field (cfg, context_used,
8463 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8465 g_assert_not_reached ();
8467 } else if (cfg->compile_aot) {
8468 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8470 EMIT_NEW_PCONST (cfg, ins, handle);
8472 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8473 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8474 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8484 MONO_INST_NEW (cfg, ins, OP_THROW);
8486 ins->sreg1 = sp [0]->dreg;
8488 bblock->out_of_line = TRUE;
8489 MONO_ADD_INS (bblock, ins);
8490 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8491 MONO_ADD_INS (bblock, ins);
8494 link_bblock (cfg, bblock, end_bblock);
8495 start_new_bblock = 1;
8497 case CEE_ENDFINALLY:
8498 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8499 MONO_ADD_INS (bblock, ins);
8501 start_new_bblock = 1;
8504 * Control will leave the method so empty the stack, otherwise
8505 * the next basic block will start with a nonempty stack.
8507 while (sp != stack_start) {
8515 if (*ip == CEE_LEAVE) {
8517 target = ip + 5 + (gint32)read32(ip + 1);
8520 target = ip + 2 + (signed char)(ip [1]);
8523 /* empty the stack */
8524 while (sp != stack_start) {
8529 * If this leave statement is in a catch block, check for a
8530 * pending exception, and rethrow it if necessary.
8532 for (i = 0; i < header->num_clauses; ++i) {
8533 MonoExceptionClause *clause = &header->clauses [i];
8536 * Use <= in the final comparison to handle clauses with multiple
8537 * leave statements, like in bug #78024.
8538 * The ordering of the exception clauses guarantees that we find the
8541 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8543 MonoBasicBlock *dont_throw;
8548 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8551 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8553 NEW_BBLOCK (cfg, dont_throw);
8556 * Currently, we allways rethrow the abort exception, despite the
8557 * fact that this is not correct. See thread6.cs for an example.
8558 * But propagating the abort exception is more important than
8559 * getting the sematics right.
8561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8563 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8565 MONO_START_BB (cfg, dont_throw);
8570 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8572 for (tmp = handlers; tmp; tmp = tmp->next) {
8574 link_bblock (cfg, bblock, tblock);
8575 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8576 ins->inst_target_bb = tblock;
8577 MONO_ADD_INS (bblock, ins);
8579 g_list_free (handlers);
8582 MONO_INST_NEW (cfg, ins, OP_BR);
8583 MONO_ADD_INS (bblock, ins);
8584 GET_BBLOCK (cfg, tblock, target);
8585 link_bblock (cfg, bblock, tblock);
8586 ins->inst_target_bb = tblock;
8587 start_new_bblock = 1;
8589 if (*ip == CEE_LEAVE)
8598 * Mono specific opcodes
8600 case MONO_CUSTOM_PREFIX: {
8602 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8606 case CEE_MONO_ICALL: {
8608 MonoJitICallInfo *info;
8610 token = read32 (ip + 2);
8611 func = mono_method_get_wrapper_data (method, token);
8612 info = mono_find_jit_icall_by_addr (func);
8615 CHECK_STACK (info->sig->param_count);
8616 sp -= info->sig->param_count;
8618 ins = mono_emit_jit_icall (cfg, info->func, sp);
8619 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8623 inline_costs += 10 * num_calls++;
8627 case CEE_MONO_LDPTR: {
8630 CHECK_STACK_OVF (1);
8632 token = read32 (ip + 2);
8634 ptr = mono_method_get_wrapper_data (method, token);
8635 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8636 MonoJitICallInfo *callinfo;
8637 const char *icall_name;
8639 icall_name = method->name + strlen ("__icall_wrapper_");
8640 g_assert (icall_name);
8641 callinfo = mono_find_jit_icall_by_name (icall_name);
8642 g_assert (callinfo);
8644 if (ptr == callinfo->func) {
8645 /* Will be transformed into an AOTCONST later */
8646 EMIT_NEW_PCONST (cfg, ins, ptr);
8652 /* FIXME: Generalize this */
8653 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8654 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8659 EMIT_NEW_PCONST (cfg, ins, ptr);
8662 inline_costs += 10 * num_calls++;
8663 /* Can't embed random pointers into AOT code */
8664 cfg->disable_aot = 1;
8667 case CEE_MONO_ICALL_ADDR: {
8668 MonoMethod *cmethod;
8671 CHECK_STACK_OVF (1);
8673 token = read32 (ip + 2);
8675 cmethod = mono_method_get_wrapper_data (method, token);
8677 if (cfg->compile_aot) {
8678 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8680 ptr = mono_lookup_internal_call (cmethod);
8682 EMIT_NEW_PCONST (cfg, ins, ptr);
8688 case CEE_MONO_VTADDR: {
8689 MonoInst *src_var, *src;
8695 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8696 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8701 case CEE_MONO_NEWOBJ: {
8702 MonoInst *iargs [2];
8704 CHECK_STACK_OVF (1);
8706 token = read32 (ip + 2);
8707 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8708 mono_class_init (klass);
8709 NEW_DOMAINCONST (cfg, iargs [0]);
8710 MONO_ADD_INS (cfg->cbb, iargs [0]);
8711 NEW_CLASSCONST (cfg, iargs [1], klass);
8712 MONO_ADD_INS (cfg->cbb, iargs [1]);
8713 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8715 inline_costs += 10 * num_calls++;
8718 case CEE_MONO_OBJADDR:
8721 MONO_INST_NEW (cfg, ins, OP_MOVE);
8722 ins->dreg = alloc_preg (cfg);
8723 ins->sreg1 = sp [0]->dreg;
8724 ins->type = STACK_MP;
8725 MONO_ADD_INS (cfg->cbb, ins);
8729 case CEE_MONO_LDNATIVEOBJ:
8731 * Similar to LDOBJ, but instead load the unmanaged
8732 * representation of the vtype to the stack.
8737 token = read32 (ip + 2);
8738 klass = mono_method_get_wrapper_data (method, token);
8739 g_assert (klass->valuetype);
8740 mono_class_init (klass);
8743 MonoInst *src, *dest, *temp;
8746 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8747 temp->backend.is_pinvoke = 1;
8748 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8749 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8751 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8752 dest->type = STACK_VTYPE;
8753 dest->klass = klass;
8759 case CEE_MONO_RETOBJ: {
8761 * Same as RET, but return the native representation of a vtype
8764 g_assert (cfg->ret);
8765 g_assert (mono_method_signature (method)->pinvoke);
8770 token = read32 (ip + 2);
8771 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8773 if (!cfg->vret_addr) {
8774 g_assert (cfg->ret_var_is_local);
8776 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8778 EMIT_NEW_RETLOADA (cfg, ins);
8780 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8782 if (sp != stack_start)
8785 MONO_INST_NEW (cfg, ins, OP_BR);
8786 ins->inst_target_bb = end_bblock;
8787 MONO_ADD_INS (bblock, ins);
8788 link_bblock (cfg, bblock, end_bblock);
8789 start_new_bblock = 1;
8793 case CEE_MONO_CISINST:
8794 case CEE_MONO_CCASTCLASS: {
8799 token = read32 (ip + 2);
8800 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8801 if (ip [1] == CEE_MONO_CISINST)
8802 ins = handle_cisinst (cfg, klass, sp [0]);
8804 ins = handle_ccastclass (cfg, klass, sp [0]);
8810 case CEE_MONO_SAVE_LMF:
8811 case CEE_MONO_RESTORE_LMF:
8812 #ifdef MONO_ARCH_HAVE_LMF_OPS
8813 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8814 MONO_ADD_INS (bblock, ins);
8815 cfg->need_lmf_area = TRUE;
8819 case CEE_MONO_CLASSCONST:
8820 CHECK_STACK_OVF (1);
8822 token = read32 (ip + 2);
8823 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8826 inline_costs += 10 * num_calls++;
8828 case CEE_MONO_NOT_TAKEN:
8829 bblock->out_of_line = TRUE;
8833 CHECK_STACK_OVF (1);
8835 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8836 ins->dreg = alloc_preg (cfg);
8837 ins->inst_offset = (gint32)read32 (ip + 2);
8838 ins->type = STACK_PTR;
8839 MONO_ADD_INS (bblock, ins);
8844 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8854 /* somewhat similar to LDTOKEN */
8855 MonoInst *addr, *vtvar;
8856 CHECK_STACK_OVF (1);
8857 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8859 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8860 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8862 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8863 ins->type = STACK_VTYPE;
8864 ins->klass = mono_defaults.argumenthandle_class;
8877 * The following transforms:
8878 * CEE_CEQ into OP_CEQ
8879 * CEE_CGT into OP_CGT
8880 * CEE_CGT_UN into OP_CGT_UN
8881 * CEE_CLT into OP_CLT
8882 * CEE_CLT_UN into OP_CLT_UN
8884 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8886 MONO_INST_NEW (cfg, ins, cmp->opcode);
8888 cmp->sreg1 = sp [0]->dreg;
8889 cmp->sreg2 = sp [1]->dreg;
8890 type_from_op (cmp, sp [0], sp [1]);
8892 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8893 cmp->opcode = OP_LCOMPARE;
8894 else if (sp [0]->type == STACK_R8)
8895 cmp->opcode = OP_FCOMPARE;
8897 cmp->opcode = OP_ICOMPARE;
8898 MONO_ADD_INS (bblock, cmp);
8899 ins->type = STACK_I4;
8900 ins->dreg = alloc_dreg (cfg, ins->type);
8901 type_from_op (ins, sp [0], sp [1]);
8903 if (cmp->opcode == OP_FCOMPARE) {
8905 * The backends expect the fceq opcodes to do the
8908 cmp->opcode = OP_NOP;
8909 ins->sreg1 = cmp->sreg1;
8910 ins->sreg2 = cmp->sreg2;
8912 MONO_ADD_INS (bblock, ins);
8919 MonoMethod *cil_method, *ctor_method;
8920 gboolean needs_static_rgctx_invoke;
8922 CHECK_STACK_OVF (1);
8924 n = read32 (ip + 2);
8925 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8928 mono_class_init (cmethod->klass);
8930 mono_save_token_info (cfg, image, n, cmethod);
8932 if (cfg->generic_sharing_context)
8933 context_used = mono_method_check_context_used (cmethod);
8935 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8937 cil_method = cmethod;
8938 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8939 METHOD_ACCESS_FAILURE;
8941 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8942 if (check_linkdemand (cfg, method, cmethod))
8944 CHECK_CFG_EXCEPTION;
8945 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8946 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8950 * Optimize the common case of ldftn+delegate creation
8952 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8953 /* FIXME: SGEN support */
8954 /* FIXME: handle shared static generic methods */
8955 /* FIXME: handle this in shared code */
8956 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8957 MonoInst *target_ins;
8960 if (cfg->verbose_level > 3)
8961 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8962 target_ins = sp [-1];
8964 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8972 if (needs_static_rgctx_invoke)
8973 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8975 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8976 } else if (needs_static_rgctx_invoke) {
8977 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8979 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8981 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8985 inline_costs += 10 * num_calls++;
8988 case CEE_LDVIRTFTN: {
8993 n = read32 (ip + 2);
8994 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8997 mono_class_init (cmethod->klass);
8999 if (cfg->generic_sharing_context)
9000 context_used = mono_method_check_context_used (cmethod);
9002 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9003 if (check_linkdemand (cfg, method, cmethod))
9005 CHECK_CFG_EXCEPTION;
9006 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9007 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9014 args [1] = emit_get_rgctx_method (cfg, context_used,
9015 cmethod, MONO_RGCTX_INFO_METHOD);
9016 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9018 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9019 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9023 inline_costs += 10 * num_calls++;
9027 CHECK_STACK_OVF (1);
9029 n = read16 (ip + 2);
9031 EMIT_NEW_ARGLOAD (cfg, ins, n);
9036 CHECK_STACK_OVF (1);
9038 n = read16 (ip + 2);
9040 NEW_ARGLOADA (cfg, ins, n);
9041 MONO_ADD_INS (cfg->cbb, ins);
9049 n = read16 (ip + 2);
9051 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9053 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9057 CHECK_STACK_OVF (1);
9059 n = read16 (ip + 2);
9061 EMIT_NEW_LOCLOAD (cfg, ins, n);
9066 unsigned char *tmp_ip;
9067 CHECK_STACK_OVF (1);
9069 n = read16 (ip + 2);
9072 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9078 EMIT_NEW_LOCLOADA (cfg, ins, n);
9087 n = read16 (ip + 2);
9089 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9091 emit_stloc_ir (cfg, sp, header, n);
9098 if (sp != stack_start)
9100 if (cfg->method != method)
9102 * Inlining this into a loop in a parent could lead to
9103 * stack overflows which is different behavior than the
9104 * non-inlined case, thus disable inlining in this case.
9106 goto inline_failure;
9108 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9109 ins->dreg = alloc_preg (cfg);
9110 ins->sreg1 = sp [0]->dreg;
9111 ins->type = STACK_PTR;
9112 MONO_ADD_INS (cfg->cbb, ins);
9114 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9115 if (header->init_locals)
9116 ins->flags |= MONO_INST_INIT;
9121 case CEE_ENDFILTER: {
9122 MonoExceptionClause *clause, *nearest;
9123 int cc, nearest_num;
9127 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9129 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9130 ins->sreg1 = (*sp)->dreg;
9131 MONO_ADD_INS (bblock, ins);
9132 start_new_bblock = 1;
9137 for (cc = 0; cc < header->num_clauses; ++cc) {
9138 clause = &header->clauses [cc];
9139 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9140 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9141 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9147 if ((ip - header->code) != nearest->handler_offset)
9152 case CEE_UNALIGNED_:
9153 ins_flag |= MONO_INST_UNALIGNED;
9154 /* FIXME: record alignment? we can assume 1 for now */
9159 ins_flag |= MONO_INST_VOLATILE;
9163 ins_flag |= MONO_INST_TAILCALL;
9164 cfg->flags |= MONO_CFG_HAS_TAIL;
9165 /* Can't inline tail calls at this time */
9166 inline_costs += 100000;
9173 token = read32 (ip + 2);
9174 klass = mini_get_class (method, token, generic_context);
9175 CHECK_TYPELOAD (klass);
9176 if (generic_class_is_reference_type (cfg, klass))
9177 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9179 mini_emit_initobj (cfg, *sp, NULL, klass);
9183 case CEE_CONSTRAINED_:
9185 token = read32 (ip + 2);
9186 constrained_call = mono_class_get_full (image, token, generic_context);
9187 CHECK_TYPELOAD (constrained_call);
9192 MonoInst *iargs [3];
9196 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9197 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9198 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9199 /* emit_memset only works when val == 0 */
9200 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9205 if (ip [1] == CEE_CPBLK) {
9206 MonoMethod *memcpy_method = get_memcpy_method ();
9207 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9209 MonoMethod *memset_method = get_memset_method ();
9210 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9220 ins_flag |= MONO_INST_NOTYPECHECK;
9222 ins_flag |= MONO_INST_NORANGECHECK;
9223 /* we ignore the no-nullcheck for now since we
9224 * really do it explicitly only when doing callvirt->call
9230 int handler_offset = -1;
9232 for (i = 0; i < header->num_clauses; ++i) {
9233 MonoExceptionClause *clause = &header->clauses [i];
9234 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9235 handler_offset = clause->handler_offset;
9240 bblock->flags |= BB_EXCEPTION_UNSAFE;
9242 g_assert (handler_offset != -1);
9244 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9245 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9246 ins->sreg1 = load->dreg;
9247 MONO_ADD_INS (bblock, ins);
9249 link_bblock (cfg, bblock, end_bblock);
9250 start_new_bblock = 1;
9258 CHECK_STACK_OVF (1);
9260 token = read32 (ip + 2);
9261 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9262 MonoType *type = mono_type_create_from_typespec (image, token);
9263 token = mono_type_size (type, &ialign);
9265 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9266 CHECK_TYPELOAD (klass);
9267 mono_class_init (klass);
9268 token = mono_class_value_size (klass, &align);
9270 EMIT_NEW_ICONST (cfg, ins, token);
9275 case CEE_REFANYTYPE: {
9276 MonoInst *src_var, *src;
9282 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9284 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9285 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9286 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9296 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9301 g_error ("opcode 0x%02x not handled", *ip);
9304 if (start_new_bblock != 1)
9307 bblock->cil_length = ip - bblock->cil_code;
9308 bblock->next_bb = end_bblock;
9310 if (cfg->method == method && cfg->domainvar) {
9312 MonoInst *get_domain;
9314 cfg->cbb = init_localsbb;
9316 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9317 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9320 get_domain->dreg = alloc_preg (cfg);
9321 MONO_ADD_INS (cfg->cbb, get_domain);
9323 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9324 MONO_ADD_INS (cfg->cbb, store);
9327 if (cfg->method == method && cfg->got_var)
9328 mono_emit_load_got_addr (cfg);
9330 if (header->init_locals) {
9333 cfg->cbb = init_localsbb;
9335 for (i = 0; i < header->num_locals; ++i) {
9336 MonoType *ptype = header->locals [i];
9337 int t = ptype->type;
9338 dreg = cfg->locals [i]->dreg;
9340 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9341 t = ptype->data.klass->enum_basetype->type;
9343 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9344 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9345 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9346 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9347 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9348 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9349 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9350 ins->type = STACK_R8;
9351 ins->inst_p0 = (void*)&r8_0;
9352 ins->dreg = alloc_dreg (cfg, STACK_R8);
9353 MONO_ADD_INS (init_localsbb, ins);
9354 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9355 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9356 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9357 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9359 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9366 if (cfg->method == method) {
9368 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9369 bb->region = mono_find_block_region (cfg, bb->real_offset);
9371 mono_create_spvar_for_region (cfg, bb->region);
9372 if (cfg->verbose_level > 2)
9373 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9377 g_slist_free (class_inits);
9378 dont_inline = g_list_remove (dont_inline, method);
9380 if (inline_costs < 0) {
9383 /* Method is too large */
9384 mname = mono_method_full_name (method, TRUE);
9385 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9386 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9391 if ((cfg->verbose_level > 2) && (cfg->method == method))
9392 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9394 return inline_costs;
9397 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9398 g_slist_free (class_inits);
9399 dont_inline = g_list_remove (dont_inline, method);
9403 g_slist_free (class_inits);
9404 dont_inline = g_list_remove (dont_inline, method);
9408 g_slist_free (class_inits);
9409 dont_inline = g_list_remove (dont_inline, method);
9410 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9414 g_slist_free (class_inits);
9415 dont_inline = g_list_remove (dont_inline, method);
9416 set_exception_type_from_invalid_il (cfg, method, ip);
9421 store_membase_reg_to_store_membase_imm (int opcode)
9424 case OP_STORE_MEMBASE_REG:
9425 return OP_STORE_MEMBASE_IMM;
9426 case OP_STOREI1_MEMBASE_REG:
9427 return OP_STOREI1_MEMBASE_IMM;
9428 case OP_STOREI2_MEMBASE_REG:
9429 return OP_STOREI2_MEMBASE_IMM;
9430 case OP_STOREI4_MEMBASE_REG:
9431 return OP_STOREI4_MEMBASE_IMM;
9432 case OP_STOREI8_MEMBASE_REG:
9433 return OP_STOREI8_MEMBASE_IMM;
9435 g_assert_not_reached ();
9441 #endif /* DISABLE_JIT */
9444 mono_op_to_op_imm (int opcode)
9454 return OP_IDIV_UN_IMM;
9458 return OP_IREM_UN_IMM;
9472 return OP_ISHR_UN_IMM;
9489 return OP_LSHR_UN_IMM;
9492 return OP_COMPARE_IMM;
9494 return OP_ICOMPARE_IMM;
9496 return OP_LCOMPARE_IMM;
9498 case OP_STORE_MEMBASE_REG:
9499 return OP_STORE_MEMBASE_IMM;
9500 case OP_STOREI1_MEMBASE_REG:
9501 return OP_STOREI1_MEMBASE_IMM;
9502 case OP_STOREI2_MEMBASE_REG:
9503 return OP_STOREI2_MEMBASE_IMM;
9504 case OP_STOREI4_MEMBASE_REG:
9505 return OP_STOREI4_MEMBASE_IMM;
9507 #if defined(__i386__) || defined (__x86_64__)
9509 return OP_X86_PUSH_IMM;
9510 case OP_X86_COMPARE_MEMBASE_REG:
9511 return OP_X86_COMPARE_MEMBASE_IMM;
9513 #if defined(__x86_64__)
9514 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9515 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9517 case OP_VOIDCALL_REG:
9526 return OP_LOCALLOC_IMM;
9533 ldind_to_load_membase (int opcode)
9537 return OP_LOADI1_MEMBASE;
9539 return OP_LOADU1_MEMBASE;
9541 return OP_LOADI2_MEMBASE;
9543 return OP_LOADU2_MEMBASE;
9545 return OP_LOADI4_MEMBASE;
9547 return OP_LOADU4_MEMBASE;
9549 return OP_LOAD_MEMBASE;
9551 return OP_LOAD_MEMBASE;
9553 return OP_LOADI8_MEMBASE;
9555 return OP_LOADR4_MEMBASE;
9557 return OP_LOADR8_MEMBASE;
9559 g_assert_not_reached ();
9566 stind_to_store_membase (int opcode)
9570 return OP_STOREI1_MEMBASE_REG;
9572 return OP_STOREI2_MEMBASE_REG;
9574 return OP_STOREI4_MEMBASE_REG;
9577 return OP_STORE_MEMBASE_REG;
9579 return OP_STOREI8_MEMBASE_REG;
9581 return OP_STORER4_MEMBASE_REG;
9583 return OP_STORER8_MEMBASE_REG;
9585 g_assert_not_reached ();
9592 mono_load_membase_to_load_mem (int opcode)
9594 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9595 #if defined(__i386__) || defined(__x86_64__)
9597 case OP_LOAD_MEMBASE:
9599 case OP_LOADU1_MEMBASE:
9600 return OP_LOADU1_MEM;
9601 case OP_LOADU2_MEMBASE:
9602 return OP_LOADU2_MEM;
9603 case OP_LOADI4_MEMBASE:
9604 return OP_LOADI4_MEM;
9605 case OP_LOADU4_MEMBASE:
9606 return OP_LOADU4_MEM;
9607 #if SIZEOF_VOID_P == 8
9608 case OP_LOADI8_MEMBASE:
9609 return OP_LOADI8_MEM;
9618 op_to_op_dest_membase (int store_opcode, int opcode)
9620 #if defined(__i386__)
9621 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9626 return OP_X86_ADD_MEMBASE_REG;
9628 return OP_X86_SUB_MEMBASE_REG;
9630 return OP_X86_AND_MEMBASE_REG;
9632 return OP_X86_OR_MEMBASE_REG;
9634 return OP_X86_XOR_MEMBASE_REG;
9637 return OP_X86_ADD_MEMBASE_IMM;
9640 return OP_X86_SUB_MEMBASE_IMM;
9643 return OP_X86_AND_MEMBASE_IMM;
9646 return OP_X86_OR_MEMBASE_IMM;
9649 return OP_X86_XOR_MEMBASE_IMM;
9655 #if defined(__x86_64__)
9656 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9661 return OP_X86_ADD_MEMBASE_REG;
9663 return OP_X86_SUB_MEMBASE_REG;
9665 return OP_X86_AND_MEMBASE_REG;
9667 return OP_X86_OR_MEMBASE_REG;
9669 return OP_X86_XOR_MEMBASE_REG;
9671 return OP_X86_ADD_MEMBASE_IMM;
9673 return OP_X86_SUB_MEMBASE_IMM;
9675 return OP_X86_AND_MEMBASE_IMM;
9677 return OP_X86_OR_MEMBASE_IMM;
9679 return OP_X86_XOR_MEMBASE_IMM;
9681 return OP_AMD64_ADD_MEMBASE_REG;
9683 return OP_AMD64_SUB_MEMBASE_REG;
9685 return OP_AMD64_AND_MEMBASE_REG;
9687 return OP_AMD64_OR_MEMBASE_REG;
9689 return OP_AMD64_XOR_MEMBASE_REG;
9692 return OP_AMD64_ADD_MEMBASE_IMM;
9695 return OP_AMD64_SUB_MEMBASE_IMM;
9698 return OP_AMD64_AND_MEMBASE_IMM;
9701 return OP_AMD64_OR_MEMBASE_IMM;
9704 return OP_AMD64_XOR_MEMBASE_IMM;
9714 op_to_op_store_membase (int store_opcode, int opcode)
9716 #if defined(__i386__) || defined(__x86_64__)
9719 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9720 return OP_X86_SETEQ_MEMBASE;
9722 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9723 return OP_X86_SETNE_MEMBASE;
9731 op_to_op_src1_membase (int load_opcode, int opcode)
9734 /* FIXME: This has sign extension issues */
9736 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9737 return OP_X86_COMPARE_MEMBASE8_IMM;
9740 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9745 return OP_X86_PUSH_MEMBASE;
9746 case OP_COMPARE_IMM:
9747 case OP_ICOMPARE_IMM:
9748 return OP_X86_COMPARE_MEMBASE_IMM;
9751 return OP_X86_COMPARE_MEMBASE_REG;
9756 /* FIXME: This has sign extension issues */
9758 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9759 return OP_X86_COMPARE_MEMBASE8_IMM;
9764 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9765 return OP_X86_PUSH_MEMBASE;
9767 /* FIXME: This only works for 32 bit immediates
9768 case OP_COMPARE_IMM:
9769 case OP_LCOMPARE_IMM:
9770 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9771 return OP_AMD64_COMPARE_MEMBASE_IMM;
9773 case OP_ICOMPARE_IMM:
9774 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9775 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9779 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9780 return OP_AMD64_COMPARE_MEMBASE_REG;
9783 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9784 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9793 op_to_op_src2_membase (int load_opcode, int opcode)
9796 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9802 return OP_X86_COMPARE_REG_MEMBASE;
9804 return OP_X86_ADD_REG_MEMBASE;
9806 return OP_X86_SUB_REG_MEMBASE;
9808 return OP_X86_AND_REG_MEMBASE;
9810 return OP_X86_OR_REG_MEMBASE;
9812 return OP_X86_XOR_REG_MEMBASE;
9819 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9820 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9824 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9825 return OP_AMD64_COMPARE_REG_MEMBASE;
9828 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9829 return OP_X86_ADD_REG_MEMBASE;
9831 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9832 return OP_X86_SUB_REG_MEMBASE;
9834 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9835 return OP_X86_AND_REG_MEMBASE;
9837 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9838 return OP_X86_OR_REG_MEMBASE;
9840 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9841 return OP_X86_XOR_REG_MEMBASE;
9843 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9844 return OP_AMD64_ADD_REG_MEMBASE;
9846 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9847 return OP_AMD64_SUB_REG_MEMBASE;
9849 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9850 return OP_AMD64_AND_REG_MEMBASE;
9852 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9853 return OP_AMD64_OR_REG_MEMBASE;
9855 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9856 return OP_AMD64_XOR_REG_MEMBASE;
9864 mono_op_to_op_imm_noemul (int opcode)
9867 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9872 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9880 return mono_op_to_op_imm (opcode);
9887 * mono_handle_global_vregs:
9889 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9893 mono_handle_global_vregs (MonoCompile *cfg)
9899 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9901 #ifdef MONO_ARCH_SIMD_INTRINSICS
9902 if (cfg->uses_simd_intrinsics)
9903 mono_simd_simplify_indirection (cfg);
9906 /* Find local vregs used in more than one bb */
9907 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9908 MonoInst *ins = bb->code;
9909 int block_num = bb->block_num;
9911 if (cfg->verbose_level > 2)
9912 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9915 for (; ins; ins = ins->next) {
9916 const char *spec = INS_INFO (ins->opcode);
9917 int regtype, regindex;
9920 if (G_UNLIKELY (cfg->verbose_level > 2))
9921 mono_print_ins (ins);
9923 g_assert (ins->opcode >= MONO_CEE_LAST);
9925 for (regindex = 0; regindex < 3; regindex ++) {
9928 if (regindex == 0) {
9929 regtype = spec [MONO_INST_DEST];
9933 } else if (regindex == 1) {
9934 regtype = spec [MONO_INST_SRC1];
9939 regtype = spec [MONO_INST_SRC2];
9945 #if SIZEOF_VOID_P == 4
9946 if (regtype == 'l') {
9948 * Since some instructions reference the original long vreg,
9949 * and some reference the two component vregs, it is quite hard
9950 * to determine when it needs to be global. So be conservative.
9952 if (!get_vreg_to_inst (cfg, vreg)) {
9953 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9955 if (cfg->verbose_level > 2)
9956 printf ("LONG VREG R%d made global.\n", vreg);
9960 * Make the component vregs volatile since the optimizations can
9961 * get confused otherwise.
9963 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9964 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9968 g_assert (vreg != -1);
9970 prev_bb = vreg_to_bb [vreg];
9972 /* 0 is a valid block num */
9973 vreg_to_bb [vreg] = block_num + 1;
9974 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9975 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9978 if (!get_vreg_to_inst (cfg, vreg)) {
9979 if (G_UNLIKELY (cfg->verbose_level > 2))
9980 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9984 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9987 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9990 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9993 g_assert_not_reached ();
9997 /* Flag as having been used in more than one bb */
9998 vreg_to_bb [vreg] = -1;
10004 /* If a variable is used in only one bblock, convert it into a local vreg */
10005 for (i = 0; i < cfg->num_varinfo; i++) {
10006 MonoInst *var = cfg->varinfo [i];
10007 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10009 switch (var->type) {
10015 #if SIZEOF_VOID_P == 8
10018 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10019 /* Enabling this screws up the fp stack on x86 */
10022 /* Arguments are implicitly global */
10023 /* Putting R4 vars into registers doesn't work currently */
10024 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10026 * Make that the variable's liveness interval doesn't contain a call, since
10027 * that would cause the lvreg to be spilled, making the whole optimization
10030 /* This is too slow for JIT compilation */
10032 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10034 int def_index, call_index, ins_index;
10035 gboolean spilled = FALSE;
10040 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10041 const char *spec = INS_INFO (ins->opcode);
10043 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10044 def_index = ins_index;
10046 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10047 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10048 if (call_index > def_index) {
10054 if (MONO_IS_CALL (ins))
10055 call_index = ins_index;
10065 if (G_UNLIKELY (cfg->verbose_level > 2))
10066 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10067 var->flags |= MONO_INST_IS_DEAD;
10068 cfg->vreg_to_inst [var->dreg] = NULL;
10075 * Compress the varinfo and vars tables so the liveness computation is faster and
10076 * takes up less space.
10079 for (i = 0; i < cfg->num_varinfo; ++i) {
10080 MonoInst *var = cfg->varinfo [i];
10081 if (pos < i && cfg->locals_start == i)
10082 cfg->locals_start = pos;
10083 if (!(var->flags & MONO_INST_IS_DEAD)) {
10085 cfg->varinfo [pos] = cfg->varinfo [i];
10086 cfg->varinfo [pos]->inst_c0 = pos;
10087 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10088 cfg->vars [pos].idx = pos;
10089 #if SIZEOF_VOID_P == 4
10090 if (cfg->varinfo [pos]->type == STACK_I8) {
10091 /* Modify the two component vars too */
10094 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10095 var1->inst_c0 = pos;
10096 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10097 var1->inst_c0 = pos;
10104 cfg->num_varinfo = pos;
10105 if (cfg->locals_start > cfg->num_varinfo)
10106 cfg->locals_start = cfg->num_varinfo;
10110 * mono_spill_global_vars:
10112 * Generate spill code for variables which are not allocated to registers,
10113 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10114 * code is generated which could be optimized by the local optimization passes.
10117 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10119 MonoBasicBlock *bb;
10121 int orig_next_vreg;
10122 guint32 *vreg_to_lvreg;
10124 guint32 i, lvregs_len;
10125 gboolean dest_has_lvreg = FALSE;
10126 guint32 stacktypes [128];
10128 *need_local_opts = FALSE;
10130 memset (spec2, 0, sizeof (spec2));
10132 /* FIXME: Move this function to mini.c */
10133 stacktypes ['i'] = STACK_PTR;
10134 stacktypes ['l'] = STACK_I8;
10135 stacktypes ['f'] = STACK_R8;
10136 #ifdef MONO_ARCH_SIMD_INTRINSICS
10137 stacktypes ['x'] = STACK_VTYPE;
10140 #if SIZEOF_VOID_P == 4
10141 /* Create MonoInsts for longs */
10142 for (i = 0; i < cfg->num_varinfo; i++) {
10143 MonoInst *ins = cfg->varinfo [i];
10145 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10146 switch (ins->type) {
10147 #ifdef MONO_ARCH_SOFT_FLOAT
10153 g_assert (ins->opcode == OP_REGOFFSET);
10155 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10157 tree->opcode = OP_REGOFFSET;
10158 tree->inst_basereg = ins->inst_basereg;
10159 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10161 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10163 tree->opcode = OP_REGOFFSET;
10164 tree->inst_basereg = ins->inst_basereg;
10165 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10175 /* FIXME: widening and truncation */
10178 * As an optimization, when a variable allocated to the stack is first loaded into
10179 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10180 * the variable again.
10182 orig_next_vreg = cfg->next_vreg;
10183 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10184 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10187 /* Add spill loads/stores */
10188 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10191 if (cfg->verbose_level > 2)
10192 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10194 /* Clear vreg_to_lvreg array */
10195 for (i = 0; i < lvregs_len; i++)
10196 vreg_to_lvreg [lvregs [i]] = 0;
10200 MONO_BB_FOR_EACH_INS (bb, ins) {
10201 const char *spec = INS_INFO (ins->opcode);
10202 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10203 gboolean store, no_lvreg;
10205 if (G_UNLIKELY (cfg->verbose_level > 2))
10206 mono_print_ins (ins);
10208 if (ins->opcode == OP_NOP)
10212 * We handle LDADDR here as well, since it can only be decomposed
10213 * when variable addresses are known.
10215 if (ins->opcode == OP_LDADDR) {
10216 MonoInst *var = ins->inst_p0;
10218 if (var->opcode == OP_VTARG_ADDR) {
10219 /* Happens on SPARC/S390 where vtypes are passed by reference */
10220 MonoInst *vtaddr = var->inst_left;
10221 if (vtaddr->opcode == OP_REGVAR) {
10222 ins->opcode = OP_MOVE;
10223 ins->sreg1 = vtaddr->dreg;
10225 else if (var->inst_left->opcode == OP_REGOFFSET) {
10226 ins->opcode = OP_LOAD_MEMBASE;
10227 ins->inst_basereg = vtaddr->inst_basereg;
10228 ins->inst_offset = vtaddr->inst_offset;
10232 g_assert (var->opcode == OP_REGOFFSET);
10234 ins->opcode = OP_ADD_IMM;
10235 ins->sreg1 = var->inst_basereg;
10236 ins->inst_imm = var->inst_offset;
10239 *need_local_opts = TRUE;
10240 spec = INS_INFO (ins->opcode);
10243 if (ins->opcode < MONO_CEE_LAST) {
10244 mono_print_ins (ins);
10245 g_assert_not_reached ();
10249 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10253 if (MONO_IS_STORE_MEMBASE (ins)) {
10254 tmp_reg = ins->dreg;
10255 ins->dreg = ins->sreg2;
10256 ins->sreg2 = tmp_reg;
10259 spec2 [MONO_INST_DEST] = ' ';
10260 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10261 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10263 } else if (MONO_IS_STORE_MEMINDEX (ins))
10264 g_assert_not_reached ();
10269 if (G_UNLIKELY (cfg->verbose_level > 2))
10270 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10275 regtype = spec [MONO_INST_DEST];
10276 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10279 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10280 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10281 MonoInst *store_ins;
10284 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10286 if (var->opcode == OP_REGVAR) {
10287 ins->dreg = var->dreg;
10288 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10290 * Instead of emitting a load+store, use a _membase opcode.
10292 g_assert (var->opcode == OP_REGOFFSET);
10293 if (ins->opcode == OP_MOVE) {
10296 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10297 ins->inst_basereg = var->inst_basereg;
10298 ins->inst_offset = var->inst_offset;
10301 spec = INS_INFO (ins->opcode);
10305 g_assert (var->opcode == OP_REGOFFSET);
10307 prev_dreg = ins->dreg;
10309 /* Invalidate any previous lvreg for this vreg */
10310 vreg_to_lvreg [ins->dreg] = 0;
10314 #ifdef MONO_ARCH_SOFT_FLOAT
10315 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10317 store_opcode = OP_STOREI8_MEMBASE_REG;
10321 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10323 if (regtype == 'l') {
10324 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10325 mono_bblock_insert_after_ins (bb, ins, store_ins);
10326 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10327 mono_bblock_insert_after_ins (bb, ins, store_ins);
10330 g_assert (store_opcode != OP_STOREV_MEMBASE);
10332 /* Try to fuse the store into the instruction itself */
10333 /* FIXME: Add more instructions */
10334 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10335 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10336 ins->inst_imm = ins->inst_c0;
10337 ins->inst_destbasereg = var->inst_basereg;
10338 ins->inst_offset = var->inst_offset;
10339 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10340 ins->opcode = store_opcode;
10341 ins->inst_destbasereg = var->inst_basereg;
10342 ins->inst_offset = var->inst_offset;
10346 tmp_reg = ins->dreg;
10347 ins->dreg = ins->sreg2;
10348 ins->sreg2 = tmp_reg;
10351 spec2 [MONO_INST_DEST] = ' ';
10352 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10353 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10355 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10356 // FIXME: The backends expect the base reg to be in inst_basereg
10357 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10359 ins->inst_basereg = var->inst_basereg;
10360 ins->inst_offset = var->inst_offset;
10361 spec = INS_INFO (ins->opcode);
10363 /* printf ("INS: "); mono_print_ins (ins); */
10364 /* Create a store instruction */
10365 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10367 /* Insert it after the instruction */
10368 mono_bblock_insert_after_ins (bb, ins, store_ins);
10371 * We can't assign ins->dreg to var->dreg here, since the
10372 * sregs could use it. So set a flag, and do it after
10375 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10376 dest_has_lvreg = TRUE;
10385 for (srcindex = 0; srcindex < 2; ++srcindex) {
10386 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10387 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10389 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10390 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10391 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10392 MonoInst *load_ins;
10393 guint32 load_opcode;
10395 if (var->opcode == OP_REGVAR) {
10397 ins->sreg1 = var->dreg;
10399 ins->sreg2 = var->dreg;
10403 g_assert (var->opcode == OP_REGOFFSET);
10405 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10407 g_assert (load_opcode != OP_LOADV_MEMBASE);
10409 if (vreg_to_lvreg [sreg]) {
10410 /* The variable is already loaded to an lvreg */
10411 if (G_UNLIKELY (cfg->verbose_level > 2))
10412 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10414 ins->sreg1 = vreg_to_lvreg [sreg];
10416 ins->sreg2 = vreg_to_lvreg [sreg];
10420 /* Try to fuse the load into the instruction */
10421 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10422 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10423 ins->inst_basereg = var->inst_basereg;
10424 ins->inst_offset = var->inst_offset;
10425 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10426 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10427 ins->sreg2 = var->inst_basereg;
10428 ins->inst_offset = var->inst_offset;
10430 if (MONO_IS_REAL_MOVE (ins)) {
10431 ins->opcode = OP_NOP;
10434 //printf ("%d ", srcindex); mono_print_ins (ins);
10436 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10438 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10439 if (var->dreg == prev_dreg) {
10441 * sreg refers to the value loaded by the load
10442 * emitted below, but we need to use ins->dreg
10443 * since it refers to the store emitted earlier.
10447 vreg_to_lvreg [var->dreg] = sreg;
10448 g_assert (lvregs_len < 1024);
10449 lvregs [lvregs_len ++] = var->dreg;
10458 if (regtype == 'l') {
10459 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10460 mono_bblock_insert_before_ins (bb, ins, load_ins);
10461 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10462 mono_bblock_insert_before_ins (bb, ins, load_ins);
10465 #if SIZEOF_VOID_P == 4
10466 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10468 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10469 mono_bblock_insert_before_ins (bb, ins, load_ins);
10475 if (dest_has_lvreg) {
10476 vreg_to_lvreg [prev_dreg] = ins->dreg;
10477 g_assert (lvregs_len < 1024);
10478 lvregs [lvregs_len ++] = prev_dreg;
10479 dest_has_lvreg = FALSE;
10483 tmp_reg = ins->dreg;
10484 ins->dreg = ins->sreg2;
10485 ins->sreg2 = tmp_reg;
10488 if (MONO_IS_CALL (ins)) {
10489 /* Clear vreg_to_lvreg array */
10490 for (i = 0; i < lvregs_len; i++)
10491 vreg_to_lvreg [lvregs [i]] = 0;
10495 if (cfg->verbose_level > 2)
10496 mono_print_ins_index (1, ins);
10503 * - use 'iadd' instead of 'int_add'
10504 * - handling ovf opcodes: decompose in method_to_ir.
10505 * - unify iregs/fregs
10506 * -> partly done, the missing parts are:
10507 * - a more complete unification would involve unifying the hregs as well, so
10508 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10509 * would no longer map to the machine hregs, so the code generators would need to
10510 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10511 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10512 * fp/non-fp branches speeds it up by about 15%.
10513 * - use sext/zext opcodes instead of shifts
10515 * - get rid of TEMPLOADs if possible and use vregs instead
10516 * - clean up usage of OP_P/OP_ opcodes
10517 * - cleanup usage of DUMMY_USE
10518 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10520 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10521 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10522 * - make sure handle_stack_args () is called before the branch is emitted
10523 * - when the new IR is done, get rid of all unused stuff
10524 * - COMPARE/BEQ as separate instructions or unify them ?
10525 * - keeping them separate allows specialized compare instructions like
10526 * compare_imm, compare_membase
10527 * - most back ends unify fp compare+branch, fp compare+ceq
10528 * - integrate mono_save_args into inline_method
10529 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10530 * - handle long shift opts on 32 bit platforms somehow: they require
10531 * 3 sregs (2 for arg1 and 1 for arg2)
10532 * - make byref a 'normal' type.
10533 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10534 * variable if needed.
10535 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10536 * like inline_method.
10537 * - remove inlining restrictions
10538 * - fix LNEG and enable cfold of INEG
10539 * - generalize x86 optimizations like ldelema as a peephole optimization
10540 * - add store_mem_imm for amd64
10541 * - optimize the loading of the interruption flag in the managed->native wrappers
10542 * - avoid special handling of OP_NOP in passes
10543 * - move code inserting instructions into one function/macro.
10544 * - try a coalescing phase after liveness analysis
10545 * - add float -> vreg conversion + local optimizations on !x86
10546 * - figure out how to handle decomposed branches during optimizations, ie.
10547 * compare+branch, op_jump_table+op_br etc.
10548 * - promote RuntimeXHandles to vregs
10549 * - vtype cleanups:
10550 * - add a NEW_VARLOADA_VREG macro
10551 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10552 * accessing vtype fields.
10553 * - get rid of I8CONST on 64 bit platforms
10554 * - dealing with the increase in code size due to branches created during opcode
10556 * - use extended basic blocks
10557 * - all parts of the JIT
10558 * - handle_global_vregs () && local regalloc
10559 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10560 * - sources of increase in code size:
10563 * - isinst and castclass
10564 * - lvregs not allocated to global registers even if used multiple times
10565 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10567 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10568 * - add all micro optimizations from the old JIT
10569 * - put tree optimizations into the deadce pass
10570 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10571 * specific function.
10572 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10573 * fcompare + branchCC.
10574 * - create a helper function for allocating a stack slot, taking into account
10575 * MONO_CFG_HAS_SPILLUP.
10576 * - merge new GC changes in mini.c.
10578 * - merge the ia64 switch changes.
10579 * - merge the mips conditional changes.
10580 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10581 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10582 * - optimize mono_regstate2_alloc_int/float.
10583 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10584 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10585 * parts of the tree could be separated by other instructions, killing the tree
10586 * arguments, or stores killing loads etc. Also, should we fold loads into other
10587 * instructions if the result of the load is used multiple times ?
10588 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10589 * - LAST MERGE: 108395.
10590 * - when returning vtypes in registers, generate IR and append it to the end of the
10591 * last bb instead of doing it in the epilog.
10592 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10600 - When to decompose opcodes:
10601 - earlier: this makes some optimizations hard to implement, since the low level IR
10602 no longer contains the neccessary information. But it is easier to do.
10603 - later: harder to implement, enables more optimizations.
10604 - Branches inside bblocks:
10605 - created when decomposing complex opcodes.
10606 - branches to another bblock: harmless, but not tracked by the branch
10607 optimizations, so need to branch to a label at the start of the bblock.
10608 - branches to inside the same bblock: very problematic, trips up the local
10609 reg allocator. Can be fixed by spitting the current bblock, but that is a
10610 complex operation, since some local vregs can become global vregs etc.
10611 - Local/global vregs:
10612 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10613 local register allocator.
10614 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10615 structure, created by mono_create_var (). Assigned to hregs or the stack by
10616 the global register allocator.
10617 - When to do optimizations like alu->alu_imm:
10618 - earlier -> saves work later on since the IR will be smaller/simpler
10619 - later -> can work on more instructions
10620 - Handling of valuetypes:
10621 - When a vtype is pushed on the stack, a new temporary is created, an
10622 instruction computing its address (LDADDR) is emitted and pushed on
10623 the stack. Need to optimize cases when the vtype is used immediately as in
10624 argument passing, stloc etc.
10625 - Instead of the to_end stuff in the old JIT, simply call the function handling
10626 the values on the stack before emitting the last instruction of the bb.
10629 #endif /* DISABLE_JIT */