2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
57 #define BRANCH_COST 100
58 #define INLINE_LENGTH_LIMIT 20
59 #define INLINE_FAILURE do {\
60 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
63 #define CHECK_CFG_EXCEPTION do {\
64 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
67 #define METHOD_ACCESS_FAILURE do { \
68 char *method_fname = mono_method_full_name (method, TRUE); \
69 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
70 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
71 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
72 g_free (method_fname); \
73 g_free (cil_method_fname); \
74 goto exception_exit; \
76 #define FIELD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *field_fname = mono_field_full_name (field); \
79 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
80 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (field_fname); \
83 goto exception_exit; \
85 #define GENERIC_SHARING_FAILURE(opcode) do { \
86 if (cfg->generic_sharing_context) { \
87 if (cfg->verbose_level > 2) \
88 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
89 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
90 goto exception_exit; \
94 /* Determine whenever 'ins' represents a load of the 'this' argument */
95 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
97 static int ldind_to_load_membase (int opcode);
98 static int stind_to_store_membase (int opcode);
100 int mono_op_to_op_imm (int opcode);
101 int mono_op_to_op_imm_noemul (int opcode);
103 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
104 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
105 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
107 /* helper methods signature */
108 extern MonoMethodSignature *helper_sig_class_init_trampoline;
109 extern MonoMethodSignature *helper_sig_domain_get;
110 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
111 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
112 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
115 * Instruction metadata
120 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
126 #if SIZEOF_VOID_P == 8
131 /* keep in sync with the enum in mini.h */
134 #include "mini-ops.h"
138 extern GHashTable *jit_icall_name_hash;
140 #define MONO_INIT_VARINFO(vi,id) do { \
141 (vi)->range.first_use.pos.bid = 0xffff; \
147 mono_alloc_ireg (MonoCompile *cfg)
149 return alloc_ireg (cfg);
153 mono_alloc_freg (MonoCompile *cfg)
155 return alloc_freg (cfg);
159 mono_alloc_preg (MonoCompile *cfg)
161 return alloc_preg (cfg);
165 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
167 return alloc_dreg (cfg, stack_type);
171 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 switch (type->type) {
180 case MONO_TYPE_BOOLEAN:
192 case MONO_TYPE_FNPTR:
194 case MONO_TYPE_CLASS:
195 case MONO_TYPE_STRING:
196 case MONO_TYPE_OBJECT:
197 case MONO_TYPE_SZARRAY:
198 case MONO_TYPE_ARRAY:
202 #if SIZEOF_VOID_P == 8
211 case MONO_TYPE_VALUETYPE:
212 if (type->data.klass->enumtype) {
213 type = type->data.klass->enum_basetype;
216 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
219 case MONO_TYPE_TYPEDBYREF:
221 case MONO_TYPE_GENERICINST:
222 type = &type->data.generic_class->container_class->byval_arg;
226 g_assert (cfg->generic_sharing_context);
229 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 mono_print_bb (MonoBasicBlock *bb, const char *msg)
240 printf ("\n%s %d: [IN: ", msg, bb->block_num);
241 for (i = 0; i < bb->in_count; ++i)
242 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
244 for (i = 0; i < bb->out_count; ++i)
245 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
247 for (tree = bb->code; tree; tree = tree->next)
248 mono_print_ins_index (-1, tree);
252 * Can't put this at the beginning, since other files reference stuff from this
257 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
259 #define GET_BBLOCK(cfg,tblock,ip) do { \
260 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
262 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
263 NEW_BBLOCK (cfg, (tblock)); \
264 (tblock)->cil_code = (ip); \
265 ADD_BBLOCK (cfg, (tblock)); \
269 #if defined(__i386__) || defined(__x86_64__)
270 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
271 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
272 (dest)->dreg = alloc_preg ((cfg)); \
273 (dest)->sreg1 = (sr1); \
274 (dest)->sreg2 = (sr2); \
275 (dest)->inst_imm = (imm); \
276 (dest)->backend.shift_amount = (shift); \
277 MONO_ADD_INS ((cfg)->cbb, (dest)); \
281 #if SIZEOF_VOID_P == 8
282 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
283 /* FIXME: Need to add many more cases */ \
284 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
286 int dr = alloc_preg (cfg); \
287 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
288 (ins)->sreg2 = widen->dreg; \
292 #define ADD_WIDEN_OP(ins, arg1, arg2)
295 #define ADD_BINOP(op) do { \
296 MONO_INST_NEW (cfg, ins, (op)); \
298 ins->sreg1 = sp [0]->dreg; \
299 ins->sreg2 = sp [1]->dreg; \
300 type_from_op (ins, sp [0], sp [1]); \
302 /* Have to insert a widening op */ \
303 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
304 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
305 MONO_ADD_INS ((cfg)->cbb, (ins)); \
307 mono_decompose_opcode ((cfg), (ins)); \
310 #define ADD_UNOP(op) do { \
311 MONO_INST_NEW (cfg, ins, (op)); \
313 ins->sreg1 = sp [0]->dreg; \
314 type_from_op (ins, sp [0], NULL); \
316 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
317 MONO_ADD_INS ((cfg)->cbb, (ins)); \
319 mono_decompose_opcode (cfg, ins); \
322 #define ADD_BINCOND(next_block) do { \
325 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
326 cmp->sreg1 = sp [0]->dreg; \
327 cmp->sreg2 = sp [1]->dreg; \
328 type_from_op (cmp, sp [0], sp [1]); \
330 type_from_op (ins, sp [0], sp [1]); \
331 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
332 GET_BBLOCK (cfg, tblock, target); \
333 link_bblock (cfg, bblock, tblock); \
334 ins->inst_true_bb = tblock; \
335 if ((next_block)) { \
336 link_bblock (cfg, bblock, (next_block)); \
337 ins->inst_false_bb = (next_block); \
338 start_new_bblock = 1; \
340 GET_BBLOCK (cfg, tblock, ip); \
341 link_bblock (cfg, bblock, tblock); \
342 ins->inst_false_bb = tblock; \
343 start_new_bblock = 2; \
345 if (sp != stack_start) { \
346 handle_stack_args (cfg, stack_start, sp - stack_start); \
347 CHECK_UNVERIFIABLE (cfg); \
349 MONO_ADD_INS (bblock, cmp); \
350 MONO_ADD_INS (bblock, ins); \
354 * link_bblock: Links two basic blocks
356 * links two basic blocks in the control flow graph, the 'from'
357 * argument is the starting block and the 'to' argument is the block
358 * the control flow ends to after 'from'.
361 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
363 MonoBasicBlock **newa;
367 if (from->cil_code) {
369 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
371 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
374 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
376 printf ("edge from entry to exit\n");
381 for (i = 0; i < from->out_count; ++i) {
382 if (to == from->out_bb [i]) {
388 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
389 for (i = 0; i < from->out_count; ++i) {
390 newa [i] = from->out_bb [i];
398 for (i = 0; i < to->in_count; ++i) {
399 if (from == to->in_bb [i]) {
405 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
406 for (i = 0; i < to->in_count; ++i) {
407 newa [i] = to->in_bb [i];
416 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
418 link_bblock (cfg, from, to);
422 * mono_find_block_region:
424 * We mark each basic block with a region ID. We use that to avoid BB
425 * optimizations when blocks are in different regions.
428 * A region token that encodes where this region is, and information
429 * about the clause owner for this block.
431 * The region encodes the try/catch/filter clause that owns this block
432 * as well as the type. -1 is a special value that represents a block
433 * that is in none of try/catch/filter.
436 mono_find_block_region (MonoCompile *cfg, int offset)
438 MonoMethod *method = cfg->method;
439 MonoMethodHeader *header = mono_method_get_header (method);
440 MonoExceptionClause *clause;
443 /* first search for handlers and filters */
444 for (i = 0; i < header->num_clauses; ++i) {
445 clause = &header->clauses [i];
446 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
447 (offset < (clause->handler_offset)))
448 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
450 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
452 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
453 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
454 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
456 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
460 /* search the try blocks */
461 for (i = 0; i < header->num_clauses; ++i) {
462 clause = &header->clauses [i];
463 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
464 return ((i + 1) << 8) | clause->flags;
471 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
473 MonoMethod *method = cfg->method;
474 MonoMethodHeader *header = mono_method_get_header (method);
475 MonoExceptionClause *clause;
476 MonoBasicBlock *handler;
480 for (i = 0; i < header->num_clauses; ++i) {
481 clause = &header->clauses [i];
482 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
483 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
484 if (clause->flags == type) {
485 handler = cfg->cil_offset_to_bb [clause->handler_offset];
487 res = g_list_append (res, handler);
495 mono_create_spvar_for_region (MonoCompile *cfg, int region)
499 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
504 /* prevent it from being register allocated */
505 var->flags |= MONO_INST_INDIRECT;
507 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
511 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
513 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
517 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
521 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
525 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 * Returns the type used in the eval stack when @type is loaded.
536 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
539 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
543 inst->klass = klass = mono_class_from_mono_type (type);
545 inst->type = STACK_MP;
550 switch (type->type) {
552 inst->type = STACK_INV;
556 case MONO_TYPE_BOOLEAN:
562 inst->type = STACK_I4;
567 case MONO_TYPE_FNPTR:
568 inst->type = STACK_PTR;
570 case MONO_TYPE_CLASS:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_OBJECT:
573 case MONO_TYPE_SZARRAY:
574 case MONO_TYPE_ARRAY:
575 inst->type = STACK_OBJ;
579 inst->type = STACK_I8;
583 inst->type = STACK_R8;
585 case MONO_TYPE_VALUETYPE:
586 if (type->data.klass->enumtype) {
587 type = type->data.klass->enum_basetype;
591 inst->type = STACK_VTYPE;
594 case MONO_TYPE_TYPEDBYREF:
595 inst->klass = mono_defaults.typed_reference_class;
596 inst->type = STACK_VTYPE;
598 case MONO_TYPE_GENERICINST:
599 type = &type->data.generic_class->container_class->byval_arg;
602 case MONO_TYPE_MVAR :
603 /* FIXME: all the arguments must be references for now,
604 * later look inside cfg and see if the arg num is
607 g_assert (cfg->generic_sharing_context);
608 inst->type = STACK_OBJ;
611 g_error ("unknown type 0x%02x in eval stack type", type->type);
616 * The following tables are used to quickly validate the IL code in type_from_op ().
619 bin_num_table [STACK_MAX] [STACK_MAX] = {
620 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
621 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
622 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
623 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
632 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
635 /* reduce the size of this table */
637 bin_int_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 bin_comp_table [STACK_MAX] [STACK_MAX] = {
650 /* Inv i L p F & O vt */
652 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
653 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
654 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
655 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
656 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
657 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
658 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
661 /* reduce the size of this table */
663 shift_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 * Tables to map from the non-specific opcode to the matching
676 * type-specific opcode.
678 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
680 binops_op_map [STACK_MAX] = {
681 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
684 /* handles from CEE_NEG to CEE_CONV_U8 */
686 unops_op_map [STACK_MAX] = {
687 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
690 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
692 ovfops_op_map [STACK_MAX] = {
693 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
696 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
698 ovf2ops_op_map [STACK_MAX] = {
699 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
702 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
704 ovf3ops_op_map [STACK_MAX] = {
705 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
708 /* handles from CEE_BEQ to CEE_BLT_UN */
710 beqops_op_map [STACK_MAX] = {
711 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
714 /* handles from CEE_CEQ to CEE_CLT_UN */
716 ceqops_op_map [STACK_MAX] = {
717 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
721 * Sets ins->type (the type on the eval stack) according to the
722 * type of the opcode and the arguments to it.
723 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
725 * FIXME: this function sets ins->type unconditionally in some cases, but
726 * it should set it to invalid for some types (a conv.x on an object)
729 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
731 switch (ins->opcode) {
738 /* FIXME: check unverifiable args for STACK_MP */
739 ins->type = bin_num_table [src1->type] [src2->type];
740 ins->opcode += binops_op_map [ins->type];
747 ins->type = bin_int_table [src1->type] [src2->type];
748 ins->opcode += binops_op_map [ins->type];
753 ins->type = shift_table [src1->type] [src2->type];
754 ins->opcode += binops_op_map [ins->type];
759 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
760 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
761 ins->opcode = OP_LCOMPARE;
762 else if (src1->type == STACK_R8)
763 ins->opcode = OP_FCOMPARE;
765 ins->opcode = OP_ICOMPARE;
767 case OP_ICOMPARE_IMM:
768 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
769 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
770 ins->opcode = OP_LCOMPARE_IMM;
782 ins->opcode += beqops_op_map [src1->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 ins->opcode += ceqops_op_map [src1->type];
792 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
793 ins->opcode += ceqops_op_map [src1->type];
797 ins->type = neg_table [src1->type];
798 ins->opcode += unops_op_map [ins->type];
801 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
802 ins->type = src1->type;
804 ins->type = STACK_INV;
805 ins->opcode += unops_op_map [ins->type];
811 ins->type = STACK_I4;
812 ins->opcode += unops_op_map [src1->type];
815 ins->type = STACK_R8;
816 switch (src1->type) {
819 ins->opcode = OP_ICONV_TO_R_UN;
822 ins->opcode = OP_LCONV_TO_R_UN;
826 case CEE_CONV_OVF_I1:
827 case CEE_CONV_OVF_U1:
828 case CEE_CONV_OVF_I2:
829 case CEE_CONV_OVF_U2:
830 case CEE_CONV_OVF_I4:
831 case CEE_CONV_OVF_U4:
832 ins->type = STACK_I4;
833 ins->opcode += ovf3ops_op_map [src1->type];
835 case CEE_CONV_OVF_I_UN:
836 case CEE_CONV_OVF_U_UN:
837 ins->type = STACK_PTR;
838 ins->opcode += ovf2ops_op_map [src1->type];
840 case CEE_CONV_OVF_I1_UN:
841 case CEE_CONV_OVF_I2_UN:
842 case CEE_CONV_OVF_I4_UN:
843 case CEE_CONV_OVF_U1_UN:
844 case CEE_CONV_OVF_U2_UN:
845 case CEE_CONV_OVF_U4_UN:
846 ins->type = STACK_I4;
847 ins->opcode += ovf2ops_op_map [src1->type];
850 ins->type = STACK_PTR;
851 switch (src1->type) {
853 ins->opcode = OP_ICONV_TO_U;
857 #if SIZEOF_VOID_P == 8
858 ins->opcode = OP_LCONV_TO_U;
860 ins->opcode = OP_MOVE;
864 ins->opcode = OP_LCONV_TO_U;
867 ins->opcode = OP_FCONV_TO_U;
873 ins->type = STACK_I8;
874 ins->opcode += unops_op_map [src1->type];
876 case CEE_CONV_OVF_I8:
877 case CEE_CONV_OVF_U8:
878 ins->type = STACK_I8;
879 ins->opcode += ovf3ops_op_map [src1->type];
881 case CEE_CONV_OVF_U8_UN:
882 case CEE_CONV_OVF_I8_UN:
883 ins->type = STACK_I8;
884 ins->opcode += ovf2ops_op_map [src1->type];
888 ins->type = STACK_R8;
889 ins->opcode += unops_op_map [src1->type];
892 ins->type = STACK_R8;
896 ins->type = STACK_I4;
897 ins->opcode += ovfops_op_map [src1->type];
902 ins->type = STACK_PTR;
903 ins->opcode += ovfops_op_map [src1->type];
911 ins->type = bin_num_table [src1->type] [src2->type];
912 ins->opcode += ovfops_op_map [src1->type];
913 if (ins->type == STACK_R8)
914 ins->type = STACK_INV;
916 case OP_LOAD_MEMBASE:
917 ins->type = STACK_PTR;
919 case OP_LOADI1_MEMBASE:
920 case OP_LOADU1_MEMBASE:
921 case OP_LOADI2_MEMBASE:
922 case OP_LOADU2_MEMBASE:
923 case OP_LOADI4_MEMBASE:
924 case OP_LOADU4_MEMBASE:
925 ins->type = STACK_PTR;
927 case OP_LOADI8_MEMBASE:
928 ins->type = STACK_I8;
930 case OP_LOADR4_MEMBASE:
931 case OP_LOADR8_MEMBASE:
932 ins->type = STACK_R8;
935 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
939 if (ins->type == STACK_MP)
940 ins->klass = mono_defaults.object_class;
945 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
951 param_table [STACK_MAX] [STACK_MAX] = {
956 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
960 switch (args->type) {
970 for (i = 0; i < sig->param_count; ++i) {
971 switch (args [i].type) {
975 if (!sig->params [i]->byref)
979 if (sig->params [i]->byref)
981 switch (sig->params [i]->type) {
982 case MONO_TYPE_CLASS:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_OBJECT:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
993 if (sig->params [i]->byref)
995 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1004 /*if (!param_table [args [i].type] [sig->params [i]->type])
1012 * When we need a pointer to the current domain many times in a method, we
1013 * call mono_domain_get() once and we store the result in a local variable.
1014 * This function returns the variable that represents the MonoDomain*.
1016 inline static MonoInst *
1017 mono_get_domainvar (MonoCompile *cfg)
1019 if (!cfg->domainvar)
1020 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1021 return cfg->domainvar;
1025 * The got_var contains the address of the Global Offset Table when AOT
1028 inline static MonoInst *
1029 mono_get_got_var (MonoCompile *cfg)
1031 #ifdef MONO_ARCH_NEED_GOT_VAR
1032 if (!cfg->compile_aot)
1034 if (!cfg->got_var) {
1035 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1037 return cfg->got_var;
1044 mono_get_vtable_var (MonoCompile *cfg)
1046 g_assert (cfg->generic_sharing_context);
1048 if (!cfg->rgctx_var) {
1049 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 /* force the var to be stack allocated */
1051 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1054 return cfg->rgctx_var;
1058 type_from_stack_type (MonoInst *ins) {
1059 switch (ins->type) {
1060 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1061 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1062 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1063 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1065 return &ins->klass->this_arg;
1066 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1067 case STACK_VTYPE: return &ins->klass->byval_arg;
1069 g_error ("stack type %d to monotype not handled\n", ins->type);
1074 static G_GNUC_UNUSED int
1075 type_to_stack_type (MonoType *t)
1077 switch (mono_type_get_underlying_type (t)->type) {
1080 case MONO_TYPE_BOOLEAN:
1083 case MONO_TYPE_CHAR:
1090 case MONO_TYPE_FNPTR:
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1104 case MONO_TYPE_VALUETYPE:
1105 case MONO_TYPE_TYPEDBYREF:
1107 case MONO_TYPE_GENERICINST:
1108 if (mono_type_generic_inst_is_valuetype (t))
1114 g_assert_not_reached ();
1121 array_access_to_klass (int opcode)
1125 return mono_defaults.byte_class;
1127 return mono_defaults.uint16_class;
1130 return mono_defaults.int_class;
1133 return mono_defaults.sbyte_class;
1136 return mono_defaults.int16_class;
1139 return mono_defaults.int32_class;
1141 return mono_defaults.uint32_class;
1144 return mono_defaults.int64_class;
1147 return mono_defaults.single_class;
1150 return mono_defaults.double_class;
1151 case CEE_LDELEM_REF:
1152 case CEE_STELEM_REF:
1153 return mono_defaults.object_class;
1155 g_assert_not_reached ();
1161 * We try to share variables when possible
1164 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1169 /* inlining can result in deeper stacks */
1170 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1171 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1173 pos = ins->type - 1 + slot * STACK_MAX;
1175 switch (ins->type) {
1182 if ((vnum = cfg->intvars [pos]))
1183 return cfg->varinfo [vnum];
1184 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1185 cfg->intvars [pos] = res->inst_c0;
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1197 * Don't use this if a generic_context is set, since that means AOT can't
1198 * look up the method using just the image+token.
1199 * table == 0 means this is a reference made from a wrapper.
1201 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1202 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1203 jump_info_token->image = image;
1204 jump_info_token->token = token;
1205 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1210 * This function is called to handle items that are left on the evaluation stack
1211 * at basic block boundaries. What happens is that we save the values to local variables
1212 * and we reload them later when first entering the target basic block (with the
1213 * handle_loaded_temps () function).
1214 * A single joint point will use the same variables (stored in the array bb->out_stack or
1215 * bb->in_stack, if the basic block is before or after the joint point).
1217 * This function needs to be called _before_ emitting the last instruction of
1218 * the bb (i.e. before emitting a branch).
1219 * If the stack merge fails at a join point, cfg->unverifiable is set.
1222 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1225 MonoBasicBlock *bb = cfg->cbb;
1226 MonoBasicBlock *outb;
1227 MonoInst *inst, **locals;
1232 if (cfg->verbose_level > 3)
1233 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1234 if (!bb->out_scount) {
1235 bb->out_scount = count;
1236 //printf ("bblock %d has out:", bb->block_num);
1238 for (i = 0; i < bb->out_count; ++i) {
1239 outb = bb->out_bb [i];
1240 /* exception handlers are linked, but they should not be considered for stack args */
1241 if (outb->flags & BB_EXCEPTION_HANDLER)
1243 //printf (" %d", outb->block_num);
1244 if (outb->in_stack) {
1246 bb->out_stack = outb->in_stack;
1252 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1253 for (i = 0; i < count; ++i) {
1255 * try to reuse temps already allocated for this purpouse, if they occupy the same
1256 * stack slot and if they are of the same type.
1257 * This won't cause conflicts since if 'local' is used to
1258 * store one of the values in the in_stack of a bblock, then
1259 * the same variable will be used for the same outgoing stack
1261 * This doesn't work when inlining methods, since the bblocks
1262 * in the inlined methods do not inherit their in_stack from
1263 * the bblock they are inlined to. See bug #58863 for an
1266 if (cfg->inlined_method)
1267 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1269 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1274 for (i = 0; i < bb->out_count; ++i) {
1275 outb = bb->out_bb [i];
1276 /* exception handlers are linked, but they should not be considered for stack args */
1277 if (outb->flags & BB_EXCEPTION_HANDLER)
1279 if (outb->in_scount) {
1280 if (outb->in_scount != bb->out_scount) {
1281 cfg->unverifiable = TRUE;
1284 continue; /* check they are the same locals */
1286 outb->in_scount = count;
1287 outb->in_stack = bb->out_stack;
1290 locals = bb->out_stack;
1292 for (i = 0; i < count; ++i) {
1293 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1294 inst->cil_code = sp [i]->cil_code;
1295 sp [i] = locals [i];
1296 if (cfg->verbose_level > 3)
1297 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1301 * It is possible that the out bblocks already have in_stack assigned, and
1302 * the in_stacks differ. In this case, we will store to all the different
1309 /* Find a bblock which has a different in_stack */
1311 while (bindex < bb->out_count) {
1312 outb = bb->out_bb [bindex];
1313 /* exception handlers are linked, but they should not be considered for stack args */
1314 if (outb->flags & BB_EXCEPTION_HANDLER) {
1318 if (outb->in_stack != locals) {
1319 for (i = 0; i < count; ++i) {
1320 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1321 inst->cil_code = sp [i]->cil_code;
1322 sp [i] = locals [i];
1323 if (cfg->verbose_level > 3)
1324 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1326 locals = outb->in_stack;
1335 /* Emit code which loads interface_offsets [klass->interface_id]
1336 * The array is stored in memory before vtable.
1339 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1341 if (cfg->compile_aot) {
1342 int ioffset_reg = alloc_preg (cfg);
1343 int iid_reg = alloc_preg (cfg);
1345 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1346 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1355 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1356 * stored in "klass_reg" implements the interface "klass".
1359 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1361 int ibitmap_reg = alloc_preg (cfg);
1362 int ibitmap_byte_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1366 if (cfg->compile_aot) {
1367 int iid_reg = alloc_preg (cfg);
1368 int shifted_iid_reg = alloc_preg (cfg);
1369 int ibitmap_byte_address_reg = alloc_preg (cfg);
1370 int masked_iid_reg = alloc_preg (cfg);
1371 int iid_one_bit_reg = alloc_preg (cfg);
1372 int iid_bit_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1378 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1388 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1389 * stored in "vtable_reg" implements the interface "klass".
1392 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1394 int ibitmap_reg = alloc_preg (cfg);
1395 int ibitmap_byte_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1399 if (cfg->compile_aot) {
1400 int iid_reg = alloc_preg (cfg);
1401 int shifted_iid_reg = alloc_preg (cfg);
1402 int ibitmap_byte_address_reg = alloc_preg (cfg);
1403 int masked_iid_reg = alloc_preg (cfg);
1404 int iid_one_bit_reg = alloc_preg (cfg);
1405 int iid_bit_reg = alloc_preg (cfg);
1406 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1411 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which checks whenever the interface id of @klass is smaller than
1422 * than the value given by max_iid_reg.
1425 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1426 MonoBasicBlock *false_target)
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1438 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1441 /* Same as above, but obtains max_iid from a vtable */
1443 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 int max_iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1449 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1452 /* Same as above, but obtains max_iid from a klass */
1454 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 int max_iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1460 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1464 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1466 int idepth_reg = alloc_preg (cfg);
1467 int stypes_reg = alloc_preg (cfg);
1468 int stype = alloc_preg (cfg);
1470 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1477 if (cfg->compile_aot) {
1478 int const_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1482 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1488 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int intf_reg = alloc_preg (cfg);
1492 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1493 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1502 * Variant of the above that takes a register to the class, not the vtable.
1505 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1507 int intf_bit_reg = alloc_preg (cfg);
1509 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1510 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1519 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1521 if (cfg->compile_aot) {
1522 int const_reg = alloc_preg (cfg);
1523 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1528 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1532 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1534 if (cfg->compile_aot) {
1535 int const_reg = alloc_preg (cfg);
1536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1545 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1548 int rank_reg = alloc_preg (cfg);
1549 int eclass_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1556 if (klass->cast_class == mono_defaults.object_class) {
1557 int parent_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1559 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1560 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1563 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1564 } else if (klass->cast_class == mono_defaults.enum_class) {
1565 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1569 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1570 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1574 /* Check that the object is a vector too */
1575 int bounds_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1581 int idepth_reg = alloc_preg (cfg);
1582 int stypes_reg = alloc_preg (cfg);
1583 int stype = alloc_preg (cfg);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1592 mini_emit_class_check (cfg, stype, klass);
1597 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1601 g_assert (val == 0);
1606 if ((size <= 4) && (size <= align)) {
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1612 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1617 #if SIZEOF_VOID_P == 8
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1625 val_reg = alloc_preg (cfg);
1627 if (sizeof (gpointer) == 8)
1628 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1630 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1633 /* This could be optimized further if neccesary */
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1642 #if !NO_UNALIGNED_ACCESS
1643 if (sizeof (gpointer) == 8) {
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1674 #endif /* DISABLE_JIT */
1677 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1685 /* This could be optimized further if neccesary */
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1696 #if !NO_UNALIGNED_ACCESS
1697 if (sizeof (gpointer) == 8) {
1699 cur_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1738 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1741 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1744 type = mini_get_basic_type_from_generic (gsctx, type);
1745 switch (type->type) {
1746 case MONO_TYPE_VOID:
1747 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1750 case MONO_TYPE_BOOLEAN:
1753 case MONO_TYPE_CHAR:
1756 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1760 case MONO_TYPE_FNPTR:
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 case MONO_TYPE_CLASS:
1763 case MONO_TYPE_STRING:
1764 case MONO_TYPE_OBJECT:
1765 case MONO_TYPE_SZARRAY:
1766 case MONO_TYPE_ARRAY:
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1773 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1774 case MONO_TYPE_VALUETYPE:
1775 if (type->data.klass->enumtype) {
1776 type = type->data.klass->enum_basetype;
1779 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1780 case MONO_TYPE_TYPEDBYREF:
1781 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1782 case MONO_TYPE_GENERICINST:
1783 type = &type->data.generic_class->container_class->byval_arg;
1786 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 * target_type_is_incompatible:
1793 * @cfg: MonoCompile context
1795 * Check that the item @arg on the evaluation stack can be stored
1796 * in the target type (can be a local, or field, etc).
1797 * The cfg arg can be used to check if we need verification or just
1800 * Returns: non-0 value if arg can't be stored on a target.
1803 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1805 MonoType *simple_type;
1808 if (target->byref) {
1809 /* FIXME: check that the pointed to types match */
1810 if (arg->type == STACK_MP)
1811 return arg->klass != mono_class_from_mono_type (target);
1812 if (arg->type == STACK_PTR)
1817 simple_type = mono_type_get_underlying_type (target);
1818 switch (simple_type->type) {
1819 case MONO_TYPE_VOID:
1823 case MONO_TYPE_BOOLEAN:
1826 case MONO_TYPE_CHAR:
1829 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1833 /* STACK_MP is needed when setting pinned locals */
1834 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 case MONO_TYPE_FNPTR:
1840 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1843 case MONO_TYPE_CLASS:
1844 case MONO_TYPE_STRING:
1845 case MONO_TYPE_OBJECT:
1846 case MONO_TYPE_SZARRAY:
1847 case MONO_TYPE_ARRAY:
1848 if (arg->type != STACK_OBJ)
1850 /* FIXME: check type compatibility */
1854 if (arg->type != STACK_I8)
1859 if (arg->type != STACK_R8)
1862 case MONO_TYPE_VALUETYPE:
1863 if (arg->type != STACK_VTYPE)
1865 klass = mono_class_from_mono_type (simple_type);
1866 if (klass != arg->klass)
1869 case MONO_TYPE_TYPEDBYREF:
1870 if (arg->type != STACK_VTYPE)
1872 klass = mono_class_from_mono_type (simple_type);
1873 if (klass != arg->klass)
1876 case MONO_TYPE_GENERICINST:
1877 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1878 if (arg->type != STACK_VTYPE)
1880 klass = mono_class_from_mono_type (simple_type);
1881 if (klass != arg->klass)
1885 if (arg->type != STACK_OBJ)
1887 /* FIXME: check type compatibility */
1891 case MONO_TYPE_MVAR:
1892 /* FIXME: all the arguments must be references for now,
1893 * later look inside cfg and see if the arg num is
1894 * really a reference
1896 g_assert (cfg->generic_sharing_context);
1897 if (arg->type != STACK_OBJ)
1901 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 * Prepare arguments for passing to a function call.
1908 * Return a non-zero value if the arguments can't be passed to the given
1910 * The type checks are not yet complete and some conversions may need
1911 * casts on 32 or 64 bit architectures.
1913 * FIXME: implement this using target_type_is_incompatible ()
1916 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1918 MonoType *simple_type;
1922 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1926 for (i = 0; i < sig->param_count; ++i) {
1927 if (sig->params [i]->byref) {
1928 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1932 simple_type = sig->params [i];
1933 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1935 switch (simple_type->type) {
1936 case MONO_TYPE_VOID:
1941 case MONO_TYPE_BOOLEAN:
1944 case MONO_TYPE_CHAR:
1947 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1953 case MONO_TYPE_FNPTR:
1954 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1957 case MONO_TYPE_CLASS:
1958 case MONO_TYPE_STRING:
1959 case MONO_TYPE_OBJECT:
1960 case MONO_TYPE_SZARRAY:
1961 case MONO_TYPE_ARRAY:
1962 if (args [i]->type != STACK_OBJ)
1967 if (args [i]->type != STACK_I8)
1972 if (args [i]->type != STACK_R8)
1975 case MONO_TYPE_VALUETYPE:
1976 if (simple_type->data.klass->enumtype) {
1977 simple_type = simple_type->data.klass->enum_basetype;
1980 if (args [i]->type != STACK_VTYPE)
1983 case MONO_TYPE_TYPEDBYREF:
1984 if (args [i]->type != STACK_VTYPE)
1987 case MONO_TYPE_GENERICINST:
1988 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1992 g_error ("unknown type 0x%02x in check_call_signature",
2000 callvirt_to_call (int opcode)
2005 case OP_VOIDCALLVIRT:
2014 g_assert_not_reached ();
2021 callvirt_to_call_membase (int opcode)
2025 return OP_CALL_MEMBASE;
2026 case OP_VOIDCALLVIRT:
2027 return OP_VOIDCALL_MEMBASE;
2029 return OP_FCALL_MEMBASE;
2031 return OP_LCALL_MEMBASE;
2033 return OP_VCALL_MEMBASE;
2035 g_assert_not_reached ();
2041 #ifdef MONO_ARCH_HAVE_IMT
2043 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2045 #ifdef MONO_ARCH_IMT_REG
2046 int method_reg = alloc_preg (cfg);
2049 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2050 } else if (cfg->compile_aot) {
2051 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2054 MONO_INST_NEW (cfg, ins, OP_PCONST);
2055 ins->inst_p0 = call->method;
2056 ins->dreg = method_reg;
2057 MONO_ADD_INS (cfg->cbb, ins);
2060 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2062 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 static MonoJumpInfo *
2068 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2070 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2074 ji->data.target = target;
2079 inline static MonoInst*
2080 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2082 inline static MonoCallInst *
2083 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2084 MonoInst **args, int calli, int virtual)
2087 #ifdef MONO_ARCH_SOFT_FLOAT
2091 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2094 call->signature = sig;
2096 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2098 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2099 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2102 temp->backend.is_pinvoke = sig->pinvoke;
2105 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2106 * address of return value to increase optimization opportunities.
2107 * Before vtype decomposition, the dreg of the call ins itself represents the
2108 * fact the call modifies the return value. After decomposition, the call will
2109 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2110 * will be transformed into an LDADDR.
2112 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2113 loada->dreg = alloc_preg (cfg);
2114 loada->inst_p0 = temp;
2115 /* We reference the call too since call->dreg could change during optimization */
2116 loada->inst_p1 = call;
2117 MONO_ADD_INS (cfg->cbb, loada);
2119 call->inst.dreg = temp->dreg;
2121 call->vret_var = loada;
2122 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2123 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2127 * If the call has a float argument, we would need to do an r8->r4 conversion using
2128 * an icall, but that cannot be done during the call sequence since it would clobber
2129 * the call registers + the stack. So we do it before emitting the call.
2131 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 MonoInst *in = call->args [i];
2135 if (i >= sig->hasthis)
2136 t = sig->params [i - sig->hasthis];
2138 t = &mono_defaults.int_class->byval_arg;
2139 t = mono_type_get_underlying_type (t);
2141 if (!t->byref && t->type == MONO_TYPE_R4) {
2142 MonoInst *iargs [1];
2146 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2148 /* The result will be in an int vreg */
2149 call->args [i] = conv;
2154 mono_arch_emit_call (cfg, call);
2156 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2157 cfg->flags |= MONO_CFG_HAS_CALLS;
2162 inline static MonoInst*
2163 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2165 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2167 call->inst.sreg1 = addr->dreg;
2169 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2171 return (MonoInst*)call;
2174 inline static MonoInst*
2175 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2177 #ifdef MONO_ARCH_RGCTX_REG
2182 rgctx_reg = mono_alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2185 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2187 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2188 cfg->uses_rgctx_reg = TRUE;
2190 return (MonoInst*)call;
2192 g_assert_not_reached ();
2198 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2199 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2201 gboolean virtual = this != NULL;
2202 gboolean enable_for_aot = TRUE;
2205 if (method->string_ctor) {
2206 /* Create the real signature */
2207 /* FIXME: Cache these */
2208 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2209 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2214 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2216 if (this && sig->hasthis &&
2217 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2218 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2219 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2221 call->method = method;
2223 call->inst.flags |= MONO_INST_HAS_METHOD;
2224 call->inst.inst_left = this;
2227 int vtable_reg, slot_reg, this_reg;
2229 this_reg = this->dreg;
2231 if ((!cfg->compile_aot || enable_for_aot) &&
2232 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2233 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2234 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2236 * the method is not virtual, we just need to ensure this is not null
2237 * and then we can call the method directly.
2239 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2240 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2243 if (!method->string_ctor) {
2244 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2245 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2246 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2249 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2251 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2253 return (MonoInst*)call;
2256 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2257 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2258 /* Make a call to delegate->invoke_impl */
2259 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2260 call->inst.inst_basereg = this_reg;
2261 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2262 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2264 return (MonoInst*)call;
2268 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2269 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2270 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2272 * the method is virtual, but we can statically dispatch since either
2273 * it's class or the method itself are sealed.
2274 * But first we need to ensure it's not a null reference.
2276 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2277 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2278 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2280 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2281 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2283 return (MonoInst*)call;
2286 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2288 vtable_reg = alloc_preg (cfg);
2289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2290 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2292 #ifdef MONO_ARCH_HAVE_IMT
2294 guint32 imt_slot = mono_method_get_imt_slot (method);
2295 emit_imt_argument (cfg, call, imt_arg);
2296 slot_reg = vtable_reg;
2297 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2300 if (slot_reg == -1) {
2301 slot_reg = alloc_preg (cfg);
2302 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2303 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2306 slot_reg = vtable_reg;
2307 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2308 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2310 g_assert (mono_method_signature (method)->generic_param_count);
2311 emit_imt_argument (cfg, call, imt_arg);
2315 call->inst.sreg1 = slot_reg;
2316 call->virtual = TRUE;
2319 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2321 return (MonoInst*)call;
2325 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2326 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2333 #ifdef MONO_ARCH_RGCTX_REG
2334 rgctx_reg = mono_alloc_preg (cfg);
2335 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2340 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2342 call = (MonoCallInst*)ins;
2344 #ifdef MONO_ARCH_RGCTX_REG
2345 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2346 cfg->uses_rgctx_reg = TRUE;
2355 static inline MonoInst*
2356 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2358 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2362 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2369 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2374 return (MonoInst*)call;
2377 inline static MonoInst*
2378 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2380 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2384 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2388 * mono_emit_abs_call:
2390 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2392 inline static MonoInst*
2393 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2394 MonoMethodSignature *sig, MonoInst **args)
2396 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2400 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2403 if (cfg->abs_patches == NULL)
2404 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2405 g_hash_table_insert (cfg->abs_patches, ji, ji);
2406 ins = mono_emit_native_call (cfg, ji, sig, args);
2407 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2412 get_memcpy_method (void)
2414 static MonoMethod *memcpy_method = NULL;
2415 if (!memcpy_method) {
2416 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2418 g_error ("Old corlib found. Install a new one");
2420 return memcpy_method;
2424 * Emit code to copy a valuetype of type @klass whose address is stored in
2425 * @src->dreg to memory whose address is stored at @dest->dreg.
2428 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2430 MonoInst *iargs [3];
2433 MonoMethod *memcpy_method;
2437 * This check breaks with spilled vars... need to handle it during verification anyway.
2438 * g_assert (klass && klass == src->klass && klass == dest->klass);
2442 n = mono_class_native_size (klass, &align);
2444 n = mono_class_value_size (klass, &align);
2446 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2447 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2448 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2452 EMIT_NEW_ICONST (cfg, iargs [2], n);
2454 memcpy_method = get_memcpy_method ();
2455 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2460 get_memset_method (void)
2462 static MonoMethod *memset_method = NULL;
2463 if (!memset_method) {
2464 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2466 g_error ("Old corlib found. Install a new one");
2468 return memset_method;
2472 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2474 MonoInst *iargs [3];
2477 MonoMethod *memset_method;
2479 /* FIXME: Optimize this for the case when dest is an LDADDR */
2481 mono_class_init (klass);
2482 n = mono_class_value_size (klass, &align);
2484 if (n <= sizeof (gpointer) * 5) {
2485 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2488 memset_method = get_memset_method ();
2490 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2491 EMIT_NEW_ICONST (cfg, iargs [2], n);
2492 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2497 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2499 MonoInst *this = NULL;
2501 g_assert (cfg->generic_sharing_context);
2503 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2504 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2505 !method->klass->valuetype)
2506 EMIT_NEW_ARGLOAD (cfg, this, 0);
2508 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2509 MonoInst *mrgctx_loc, *mrgctx_var;
2512 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2514 mrgctx_loc = mono_get_vtable_var (cfg);
2515 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2518 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2519 MonoInst *vtable_loc, *vtable_var;
2523 vtable_loc = mono_get_vtable_var (cfg);
2524 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2526 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2527 MonoInst *mrgctx_var = vtable_var;
2530 vtable_reg = alloc_preg (cfg);
2531 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2532 vtable_var->type = STACK_PTR;
2538 int vtable_reg, res_reg;
2540 vtable_reg = alloc_preg (cfg);
2541 res_reg = alloc_preg (cfg);
2542 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2547 static MonoJumpInfoRgctxEntry *
2548 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2550 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2551 res->method = method;
2552 res->in_mrgctx = in_mrgctx;
2553 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2554 res->data->type = patch_type;
2555 res->data->data.target = patch_data;
2556 res->info_type = info_type;
2561 static inline MonoInst*
2562 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2564 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2568 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2569 MonoClass *klass, int rgctx_type)
2571 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2572 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2574 return emit_rgctx_fetch (cfg, rgctx, entry);
2578 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2579 MonoMethod *cmethod, int rgctx_type)
2581 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2582 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2584 return emit_rgctx_fetch (cfg, rgctx, entry);
2588 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2589 MonoClassField *field, int rgctx_type)
2591 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2592 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2594 return emit_rgctx_fetch (cfg, rgctx, entry);
2598 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2600 int vtable_reg = alloc_preg (cfg);
2601 int context_used = 0;
2603 if (cfg->generic_sharing_context)
2604 context_used = mono_class_check_context_used (array_class);
2606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2608 if (cfg->opt & MONO_OPT_SHARED) {
2609 int class_reg = alloc_preg (cfg);
2610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2611 if (cfg->compile_aot) {
2612 int klass_reg = alloc_preg (cfg);
2613 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2614 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2618 } else if (context_used) {
2619 MonoInst *vtable_ins;
2621 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2622 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2624 if (cfg->compile_aot) {
2625 int vt_reg = alloc_preg (cfg);
2626 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2627 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2637 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2639 if (mini_get_debug_options ()->better_cast_details) {
2640 int to_klass_reg = alloc_preg (cfg);
2641 int vtable_reg = alloc_preg (cfg);
2642 int klass_reg = alloc_preg (cfg);
2643 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2646 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2650 MONO_ADD_INS (cfg->cbb, tls_get);
2651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2655 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2661 reset_cast_details (MonoCompile *cfg)
2663 /* Reset the variables holding the cast details */
2664 if (mini_get_debug_options ()->better_cast_details) {
2665 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2667 MONO_ADD_INS (cfg->cbb, tls_get);
2668 /* It is enough to reset the from field */
2669 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2674 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2675 * generic code is generated.
2678 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2680 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2683 MonoInst *rgctx, *addr;
2685 /* FIXME: What if the class is shared? We might not
2686 have to get the address of the method from the
2688 addr = emit_get_rgctx_method (cfg, context_used, method,
2689 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2691 rgctx = emit_get_rgctx (cfg, method, context_used);
2693 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2695 return mono_emit_method_call (cfg, method, &val, NULL);
2700 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2704 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2705 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2706 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2707 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2709 obj_reg = sp [0]->dreg;
2710 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2713 /* FIXME: generics */
2714 g_assert (klass->rank == 0);
2717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2724 MonoInst *element_class;
2726 /* This assertion is from the unboxcast insn */
2727 g_assert (klass->rank == 0);
2729 element_class = emit_get_rgctx_klass (cfg, context_used,
2730 klass->element_class, MONO_RGCTX_INFO_KLASS);
2732 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2733 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2735 save_cast_details (cfg, klass->element_class, obj_reg);
2736 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2737 reset_cast_details (cfg);
2740 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2741 MONO_ADD_INS (cfg->cbb, add);
2742 add->type = STACK_MP;
2749 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2751 MonoInst *iargs [2];
2754 if (cfg->opt & MONO_OPT_SHARED) {
2755 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2756 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2758 alloc_ftn = mono_object_new;
2759 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2760 /* This happens often in argument checking code, eg. throw new FooException... */
2761 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2762 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2763 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2765 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2766 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2769 if (managed_alloc) {
2770 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2771 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2773 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2775 guint32 lw = vtable->klass->instance_size;
2776 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2777 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2778 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2781 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2785 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2789 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2792 MonoInst *iargs [2];
2793 MonoMethod *managed_alloc = NULL;
2797 FIXME: we cannot get managed_alloc here because we can't get
2798 the class's vtable (because it's not a closed class)
2800 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2801 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2804 if (cfg->opt & MONO_OPT_SHARED) {
2805 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2806 iargs [1] = data_inst;
2807 alloc_ftn = mono_object_new;
2809 if (managed_alloc) {
2810 iargs [0] = data_inst;
2811 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2814 iargs [0] = data_inst;
2815 alloc_ftn = mono_object_new_specific;
2818 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2822 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2824 MonoInst *alloc, *ins;
2826 if (mono_class_is_nullable (klass)) {
2827 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2828 return mono_emit_method_call (cfg, method, &val, NULL);
2831 alloc = handle_alloc (cfg, klass, TRUE);
2833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2839 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2841 MonoInst *alloc, *ins;
2843 if (mono_class_is_nullable (klass)) {
2844 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2845 /* FIXME: What if the class is shared? We might not
2846 have to get the method address from the RGCTX. */
2847 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2848 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2849 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2851 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2853 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2855 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2862 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2864 MonoBasicBlock *is_null_bb;
2865 int obj_reg = src->dreg;
2866 int vtable_reg = alloc_preg (cfg);
2868 NEW_BBLOCK (cfg, is_null_bb);
2870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2871 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2873 save_cast_details (cfg, klass, obj_reg);
2875 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2877 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2879 int klass_reg = alloc_preg (cfg);
2881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2883 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2884 /* the remoting code is broken, access the class for now */
2886 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2887 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2892 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2894 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2895 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2899 MONO_START_BB (cfg, is_null_bb);
2901 reset_cast_details (cfg);
2907 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2910 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2911 int obj_reg = src->dreg;
2912 int vtable_reg = alloc_preg (cfg);
2913 int res_reg = alloc_preg (cfg);
2915 NEW_BBLOCK (cfg, is_null_bb);
2916 NEW_BBLOCK (cfg, false_bb);
2917 NEW_BBLOCK (cfg, end_bb);
2919 /* Do the assignment at the beginning, so the other assignment can be if converted */
2920 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2921 ins->type = STACK_OBJ;
2924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2925 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2927 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2928 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2929 /* the is_null_bb target simply copies the input register to the output */
2930 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2932 int klass_reg = alloc_preg (cfg);
2934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2937 int rank_reg = alloc_preg (cfg);
2938 int eclass_reg = alloc_preg (cfg);
2940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2945 if (klass->cast_class == mono_defaults.object_class) {
2946 int parent_reg = alloc_preg (cfg);
2947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2948 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2949 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2951 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2952 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2953 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2955 } else if (klass->cast_class == mono_defaults.enum_class) {
2956 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2957 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2958 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2959 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2961 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2962 /* Check that the object is a vector too */
2963 int bounds_reg = alloc_preg (cfg);
2964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2969 /* the is_null_bb target simply copies the input register to the output */
2970 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2972 } else if (mono_class_is_nullable (klass)) {
2973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2974 /* the is_null_bb target simply copies the input register to the output */
2975 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2977 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2978 /* the remoting code is broken, access the class for now */
2980 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2984 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2990 /* the is_null_bb target simply copies the input register to the output */
2991 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2996 MONO_START_BB (cfg, false_bb);
2998 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2999 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3001 MONO_START_BB (cfg, is_null_bb);
3003 MONO_START_BB (cfg, end_bb);
3009 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3011 /* This opcode takes as input an object reference and a class, and returns:
3012 0) if the object is an instance of the class,
3013 1) if the object is not instance of the class,
3014 2) if the object is a proxy whose type cannot be determined */
3017 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3018 int obj_reg = src->dreg;
3019 int dreg = alloc_ireg (cfg);
3021 int klass_reg = alloc_preg (cfg);
3023 NEW_BBLOCK (cfg, true_bb);
3024 NEW_BBLOCK (cfg, false_bb);
3025 NEW_BBLOCK (cfg, false2_bb);
3026 NEW_BBLOCK (cfg, end_bb);
3027 NEW_BBLOCK (cfg, no_proxy_bb);
3029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3032 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3033 NEW_BBLOCK (cfg, interface_fail_bb);
3035 tmp_reg = alloc_preg (cfg);
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3037 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3038 MONO_START_BB (cfg, interface_fail_bb);
3039 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3041 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3043 tmp_reg = alloc_preg (cfg);
3044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3046 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3048 tmp_reg = alloc_preg (cfg);
3049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3052 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3053 tmp_reg = alloc_preg (cfg);
3054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3057 tmp_reg = alloc_preg (cfg);
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3059 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3060 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3062 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3063 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3065 MONO_START_BB (cfg, no_proxy_bb);
3067 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3070 MONO_START_BB (cfg, false_bb);
3072 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3073 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3075 MONO_START_BB (cfg, false2_bb);
3077 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3080 MONO_START_BB (cfg, true_bb);
3082 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3084 MONO_START_BB (cfg, end_bb);
3087 MONO_INST_NEW (cfg, ins, OP_ICONST);
3089 ins->type = STACK_I4;
3095 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3097 /* This opcode takes as input an object reference and a class, and returns:
3098 0) if the object is an instance of the class,
3099 1) if the object is a proxy whose type cannot be determined
3100 an InvalidCastException exception is thrown otherwhise*/
3103 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3104 int obj_reg = src->dreg;
3105 int dreg = alloc_ireg (cfg);
3106 int tmp_reg = alloc_preg (cfg);
3107 int klass_reg = alloc_preg (cfg);
3109 NEW_BBLOCK (cfg, end_bb);
3110 NEW_BBLOCK (cfg, ok_result_bb);
3112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3115 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3116 NEW_BBLOCK (cfg, interface_fail_bb);
3118 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3119 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3120 MONO_START_BB (cfg, interface_fail_bb);
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3123 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3125 tmp_reg = alloc_preg (cfg);
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3128 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3130 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3134 NEW_BBLOCK (cfg, no_proxy_bb);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3138 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3140 tmp_reg = alloc_preg (cfg);
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3144 tmp_reg = alloc_preg (cfg);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3149 NEW_BBLOCK (cfg, fail_1_bb);
3151 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3153 MONO_START_BB (cfg, fail_1_bb);
3155 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3158 MONO_START_BB (cfg, no_proxy_bb);
3160 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3163 MONO_START_BB (cfg, ok_result_bb);
3165 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3167 MONO_START_BB (cfg, end_bb);
3170 MONO_INST_NEW (cfg, ins, OP_ICONST);
3172 ins->type = STACK_I4;
3177 static G_GNUC_UNUSED MonoInst*
3178 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3180 gpointer *trampoline;
3181 MonoInst *obj, *method_ins, *tramp_ins;
3185 obj = handle_alloc (cfg, klass, FALSE);
3187 /* Inline the contents of mono_delegate_ctor */
3189 /* Set target field */
3190 /* Optimize away setting of NULL target */
3191 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3192 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3194 /* Set method field */
3195 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3196 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3199 * To avoid looking up the compiled code belonging to the target method
3200 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3201 * store it, and we fill it after the method has been compiled.
3203 if (!cfg->compile_aot && !method->dynamic) {
3204 MonoInst *code_slot_ins;
3206 domain = mono_domain_get ();
3207 mono_domain_lock (domain);
3208 if (!domain_jit_info (domain)->method_code_hash)
3209 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3210 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3212 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3213 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3215 mono_domain_unlock (domain);
3217 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3221 /* Set invoke_impl field */
3222 if (cfg->compile_aot) {
3223 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3225 trampoline = mono_create_delegate_trampoline (klass);
3226 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3228 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3230 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3236 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3238 MonoJitICallInfo *info;
3240 /* Need to register the icall so it gets an icall wrapper */
3241 info = mono_get_array_new_va_icall (rank);
3243 cfg->flags |= MONO_CFG_HAS_VARARGS;
3245 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3246 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3250 mono_emit_load_got_addr (MonoCompile *cfg)
3252 MonoInst *getaddr, *dummy_use;
3254 if (!cfg->got_var || cfg->got_var_allocated)
3257 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3258 getaddr->dreg = cfg->got_var->dreg;
3260 /* Add it to the start of the first bblock */
3261 if (cfg->bb_entry->code) {
3262 getaddr->next = cfg->bb_entry->code;
3263 cfg->bb_entry->code = getaddr;
3266 MONO_ADD_INS (cfg->bb_entry, getaddr);
3268 cfg->got_var_allocated = TRUE;
3271 * Add a dummy use to keep the got_var alive, since real uses might
3272 * only be generated by the back ends.
3273 * Add it to end_bblock, so the variable's lifetime covers the whole
3275 * It would be better to make the usage of the got var explicit in all
3276 * cases when the backend needs it (i.e. calls, throw etc.), so this
3277 * wouldn't be needed.
3279 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3280 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3283 static int inline_limit;
3284 static gboolean inline_limit_inited;
3287 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3289 MonoMethodHeader *header = mono_method_get_header (method);
3291 #ifdef MONO_ARCH_SOFT_FLOAT
3292 MonoMethodSignature *sig = mono_method_signature (method);
3296 if (cfg->generic_sharing_context)
3299 #ifdef MONO_ARCH_HAVE_LMF_OPS
3300 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3301 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3302 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3306 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3307 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3308 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3309 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3310 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3311 (method->klass->marshalbyref) ||
3312 !header || header->num_clauses)
3315 /* also consider num_locals? */
3316 /* Do the size check early to avoid creating vtables */
3317 if (!inline_limit_inited) {
3318 if (getenv ("MONO_INLINELIMIT"))
3319 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3321 inline_limit = INLINE_LENGTH_LIMIT;
3322 inline_limit_inited = TRUE;
3324 if (header->code_size >= inline_limit)
3328 * if we can initialize the class of the method right away, we do,
3329 * otherwise we don't allow inlining if the class needs initialization,
3330 * since it would mean inserting a call to mono_runtime_class_init()
3331 * inside the inlined code
3333 if (!(cfg->opt & MONO_OPT_SHARED)) {
3334 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3335 if (cfg->run_cctors && method->klass->has_cctor) {
3336 if (!method->klass->runtime_info)
3337 /* No vtable created yet */
3339 vtable = mono_class_vtable (cfg->domain, method->klass);
3342 /* This makes so that inline cannot trigger */
3343 /* .cctors: too many apps depend on them */
3344 /* running with a specific order... */
3345 if (! vtable->initialized)
3347 mono_runtime_class_init (vtable);
3349 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3350 if (!method->klass->runtime_info)
3351 /* No vtable created yet */
3353 vtable = mono_class_vtable (cfg->domain, method->klass);
3356 if (!vtable->initialized)
3361 * If we're compiling for shared code
3362 * the cctor will need to be run at aot method load time, for example,
3363 * or at the end of the compilation of the inlining method.
3365 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3370 * CAS - do not inline methods with declarative security
3371 * Note: this has to be before any possible return TRUE;
3373 if (mono_method_has_declsec (method))
3376 #ifdef MONO_ARCH_SOFT_FLOAT
3378 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3380 for (i = 0; i < sig->param_count; ++i)
3381 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3389 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3391 if (vtable->initialized && !cfg->compile_aot)
3394 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3397 if (!mono_class_needs_cctor_run (vtable->klass, method))
3400 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3401 /* The initialization is already done before the method is called */
3408 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3412 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3414 mono_class_init (klass);
3415 size = mono_class_array_element_size (klass);
3417 mult_reg = alloc_preg (cfg);
3418 array_reg = arr->dreg;
3419 index_reg = index->dreg;
3421 #if SIZEOF_VOID_P == 8
3422 /* The array reg is 64 bits but the index reg is only 32 */
3423 index2_reg = alloc_preg (cfg);
3424 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3426 index2_reg = index_reg;
3429 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3431 #if defined(__i386__) || defined(__x86_64__)
3432 if (size == 1 || size == 2 || size == 4 || size == 8) {
3433 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3435 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3436 ins->type = STACK_PTR;
3442 add_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3445 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3446 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3447 ins->type = STACK_PTR;
3448 MONO_ADD_INS (cfg->cbb, ins);
3453 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3455 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3457 int bounds_reg = alloc_preg (cfg);
3458 int add_reg = alloc_preg (cfg);
3459 int mult_reg = alloc_preg (cfg);
3460 int mult2_reg = alloc_preg (cfg);
3461 int low1_reg = alloc_preg (cfg);
3462 int low2_reg = alloc_preg (cfg);
3463 int high1_reg = alloc_preg (cfg);
3464 int high2_reg = alloc_preg (cfg);
3465 int realidx1_reg = alloc_preg (cfg);
3466 int realidx2_reg = alloc_preg (cfg);
3467 int sum_reg = alloc_preg (cfg);
3472 mono_class_init (klass);
3473 size = mono_class_array_element_size (klass);
3475 index1 = index_ins1->dreg;
3476 index2 = index_ins2->dreg;
3478 /* range checking */
3479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3480 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3483 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3484 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3486 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3487 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3488 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3490 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3491 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3492 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3494 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3496 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3498 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3499 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3501 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3502 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3504 ins->type = STACK_MP;
3506 MONO_ADD_INS (cfg->cbb, ins);
3513 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3517 MonoMethod *addr_method;
3520 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3523 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3525 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3526 /* emit_ldelema_2 depends on OP_LMUL */
3527 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3528 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3532 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3533 addr_method = mono_marshal_get_array_address (rank, element_size);
3534 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3540 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3542 MonoInst *ins = NULL;
3544 static MonoClass *runtime_helpers_class = NULL;
3545 if (! runtime_helpers_class)
3546 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3547 "System.Runtime.CompilerServices", "RuntimeHelpers");
3549 if (cmethod->klass == mono_defaults.string_class) {
3550 if (strcmp (cmethod->name, "get_Chars") == 0) {
3551 int dreg = alloc_ireg (cfg);
3552 int index_reg = alloc_preg (cfg);
3553 int mult_reg = alloc_preg (cfg);
3554 int add_reg = alloc_preg (cfg);
3556 #if SIZEOF_VOID_P == 8
3557 /* The array reg is 64 bits but the index reg is only 32 */
3558 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3560 index_reg = args [1]->dreg;
3562 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3564 #if defined(__i386__) || defined(__x86_64__)
3565 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3566 add_reg = ins->dreg;
3567 /* Avoid a warning */
3569 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3573 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3574 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3575 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3577 type_from_op (ins, NULL, NULL);
3579 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3580 int dreg = alloc_ireg (cfg);
3581 /* Decompose later to allow more optimizations */
3582 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3583 ins->type = STACK_I4;
3584 cfg->cbb->has_array_access = TRUE;
3585 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3588 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3589 int mult_reg = alloc_preg (cfg);
3590 int add_reg = alloc_preg (cfg);
3592 /* The corlib functions check for oob already. */
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3595 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3598 } else if (cmethod->klass == mono_defaults.object_class) {
3600 if (strcmp (cmethod->name, "GetType") == 0) {
3601 int dreg = alloc_preg (cfg);
3602 int vt_reg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3604 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3605 type_from_op (ins, NULL, NULL);
3608 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3609 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3610 int dreg = alloc_ireg (cfg);
3611 int t1 = alloc_ireg (cfg);
3613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3614 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3615 ins->type = STACK_I4;
3619 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3620 MONO_INST_NEW (cfg, ins, OP_NOP);
3621 MONO_ADD_INS (cfg->cbb, ins);
3625 } else if (cmethod->klass == mono_defaults.array_class) {
3626 if (cmethod->name [0] != 'g')
3629 if (strcmp (cmethod->name, "get_Rank") == 0) {
3630 int dreg = alloc_ireg (cfg);
3631 int vtable_reg = alloc_preg (cfg);
3632 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3633 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3634 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3635 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3636 type_from_op (ins, NULL, NULL);
3639 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3640 int dreg = alloc_ireg (cfg);
3642 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3643 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3644 type_from_op (ins, NULL, NULL);
3649 } else if (cmethod->klass == runtime_helpers_class) {
3651 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3652 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3656 } else if (cmethod->klass == mono_defaults.thread_class) {
3657 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3658 ins->dreg = alloc_preg (cfg);
3659 ins->type = STACK_OBJ;
3660 MONO_ADD_INS (cfg->cbb, ins);
3662 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3663 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3664 MONO_ADD_INS (cfg->cbb, ins);
3666 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3667 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3668 MONO_ADD_INS (cfg->cbb, ins);
3671 } else if (cmethod->klass == mono_defaults.monitor_class) {
3672 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3673 if (strcmp (cmethod->name, "Enter") == 0) {
3676 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3677 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3678 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3679 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3681 return (MonoInst*)call;
3682 } else if (strcmp (cmethod->name, "Exit") == 0) {
3685 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3686 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3687 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3688 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3690 return (MonoInst*)call;
3692 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3693 MonoMethod *fast_method = NULL;
3695 /* Avoid infinite recursion */
3696 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3697 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3698 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3701 if (strcmp (cmethod->name, "Enter") == 0 ||
3702 strcmp (cmethod->name, "Exit") == 0)
3703 fast_method = mono_monitor_get_fast_path (cmethod);
3707 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3709 } else if (mini_class_is_system_array (cmethod->klass) &&
3710 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3711 MonoInst *addr, *store, *load;
3712 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3714 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3715 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3718 } else if (cmethod->klass->image == mono_defaults.corlib &&
3719 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3720 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3723 #if SIZEOF_VOID_P == 8
3724 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3725 /* 64 bit reads are already atomic */
3726 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3727 ins->dreg = mono_alloc_preg (cfg);
3728 ins->inst_basereg = args [0]->dreg;
3729 ins->inst_offset = 0;
3730 MONO_ADD_INS (cfg->cbb, ins);
3734 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3735 if (strcmp (cmethod->name, "Increment") == 0) {
3736 MonoInst *ins_iconst;
3739 if (fsig->params [0]->type == MONO_TYPE_I4)
3740 opcode = OP_ATOMIC_ADD_NEW_I4;
3741 #if SIZEOF_VOID_P == 8
3742 else if (fsig->params [0]->type == MONO_TYPE_I8)
3743 opcode = OP_ATOMIC_ADD_NEW_I8;
3746 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3747 ins_iconst->inst_c0 = 1;
3748 ins_iconst->dreg = mono_alloc_ireg (cfg);
3749 MONO_ADD_INS (cfg->cbb, ins_iconst);
3751 MONO_INST_NEW (cfg, ins, opcode);
3752 ins->dreg = mono_alloc_ireg (cfg);
3753 ins->inst_basereg = args [0]->dreg;
3754 ins->inst_offset = 0;
3755 ins->sreg2 = ins_iconst->dreg;
3756 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3757 MONO_ADD_INS (cfg->cbb, ins);
3759 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3760 MonoInst *ins_iconst;
3763 if (fsig->params [0]->type == MONO_TYPE_I4)
3764 opcode = OP_ATOMIC_ADD_NEW_I4;
3765 #if SIZEOF_VOID_P == 8
3766 else if (fsig->params [0]->type == MONO_TYPE_I8)
3767 opcode = OP_ATOMIC_ADD_NEW_I8;
3770 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3771 ins_iconst->inst_c0 = -1;
3772 ins_iconst->dreg = mono_alloc_ireg (cfg);
3773 MONO_ADD_INS (cfg->cbb, ins_iconst);
3775 MONO_INST_NEW (cfg, ins, opcode);
3776 ins->dreg = mono_alloc_ireg (cfg);
3777 ins->inst_basereg = args [0]->dreg;
3778 ins->inst_offset = 0;
3779 ins->sreg2 = ins_iconst->dreg;
3780 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3781 MONO_ADD_INS (cfg->cbb, ins);
3783 } else if (strcmp (cmethod->name, "Add") == 0) {
3786 if (fsig->params [0]->type == MONO_TYPE_I4)
3787 opcode = OP_ATOMIC_ADD_NEW_I4;
3788 #if SIZEOF_VOID_P == 8
3789 else if (fsig->params [0]->type == MONO_TYPE_I8)
3790 opcode = OP_ATOMIC_ADD_NEW_I8;
3794 MONO_INST_NEW (cfg, ins, opcode);
3795 ins->dreg = mono_alloc_ireg (cfg);
3796 ins->inst_basereg = args [0]->dreg;
3797 ins->inst_offset = 0;
3798 ins->sreg2 = args [1]->dreg;
3799 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3800 MONO_ADD_INS (cfg->cbb, ins);
3803 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3805 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3806 if (strcmp (cmethod->name, "Exchange") == 0) {
3809 if (fsig->params [0]->type == MONO_TYPE_I4)
3810 opcode = OP_ATOMIC_EXCHANGE_I4;
3811 #if SIZEOF_VOID_P == 8
3812 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3813 (fsig->params [0]->type == MONO_TYPE_I) ||
3814 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3815 opcode = OP_ATOMIC_EXCHANGE_I8;
3817 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3818 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3819 opcode = OP_ATOMIC_EXCHANGE_I4;
3824 MONO_INST_NEW (cfg, ins, opcode);
3825 ins->dreg = mono_alloc_ireg (cfg);
3826 ins->inst_basereg = args [0]->dreg;
3827 ins->inst_offset = 0;
3828 ins->sreg2 = args [1]->dreg;
3829 MONO_ADD_INS (cfg->cbb, ins);
3831 switch (fsig->params [0]->type) {
3833 ins->type = STACK_I4;
3837 ins->type = STACK_I8;
3839 case MONO_TYPE_OBJECT:
3840 ins->type = STACK_OBJ;
3843 g_assert_not_reached ();
3846 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3848 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3850 * Can't implement CompareExchange methods this way since they have
3851 * three arguments. We can implement one of the common cases, where the new
3852 * value is a constant.
3854 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3855 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3856 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3857 && args [2]->opcode == OP_ICONST) {
3858 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3859 ins->dreg = alloc_ireg (cfg);
3860 ins->sreg1 = args [0]->dreg;
3861 ins->sreg2 = args [1]->dreg;
3862 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3863 ins->type = STACK_I4;
3864 MONO_ADD_INS (cfg->cbb, ins);
3866 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3868 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3872 } else if (cmethod->klass->image == mono_defaults.corlib) {
3873 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3874 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3875 MONO_INST_NEW (cfg, ins, OP_BREAK);
3876 MONO_ADD_INS (cfg->cbb, ins);
3879 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3880 && strcmp (cmethod->klass->name, "Environment") == 0) {
3881 #ifdef PLATFORM_WIN32
3882 EMIT_NEW_ICONST (cfg, ins, 1);
3884 EMIT_NEW_ICONST (cfg, ins, 0);
3888 } else if (cmethod->klass == mono_defaults.math_class) {
3890 * There is general branches code for Min/Max, but it does not work for
3892 * http://everything2.com/?node_id=1051618
3896 #ifdef MONO_ARCH_SIMD_INTRINSICS
3897 if (cfg->opt & MONO_OPT_SIMD) {
3898 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3904 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3908 * This entry point could be used later for arbitrary method
3911 inline static MonoInst*
3912 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3913 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3915 if (method->klass == mono_defaults.string_class) {
3916 /* managed string allocation support */
3917 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3918 MonoInst *iargs [2];
3919 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3920 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3923 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3924 iargs [1] = args [0];
3925 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3932 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3934 MonoInst *store, *temp;
3937 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3938 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3941 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3942 * would be different than the MonoInst's used to represent arguments, and
3943 * the ldelema implementation can't deal with that.
3944 * Solution: When ldelema is used on an inline argument, create a var for
3945 * it, emit ldelema on that var, and emit the saving code below in
3946 * inline_method () if needed.
3948 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3949 cfg->args [i] = temp;
3950 /* This uses cfg->args [i] which is set by the preceeding line */
3951 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3952 store->cil_code = sp [0]->cil_code;
3957 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3958 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3960 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3962 check_inline_called_method_name_limit (MonoMethod *called_method)
3965 static char *limit = NULL;
3967 if (limit == NULL) {
3968 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3970 if (limit_string != NULL)
3971 limit = limit_string;
3973 limit = (char *) "";
3976 if (limit [0] != '\0') {
3977 char *called_method_name = mono_method_full_name (called_method, TRUE);
3979 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3980 g_free (called_method_name);
3982 //return (strncmp_result <= 0);
3983 return (strncmp_result == 0);
3990 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3992 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3995 static char *limit = NULL;
3997 if (limit == NULL) {
3998 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3999 if (limit_string != NULL) {
4000 limit = limit_string;
4002 limit = (char *) "";
4006 if (limit [0] != '\0') {
4007 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4009 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4010 g_free (caller_method_name);
4012 //return (strncmp_result <= 0);
4013 return (strncmp_result == 0);
4021 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4022 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4024 MonoInst *ins, *rvar = NULL;
4025 MonoMethodHeader *cheader;
4026 MonoBasicBlock *ebblock, *sbblock;
4028 MonoMethod *prev_inlined_method;
4029 MonoInst **prev_locals, **prev_args;
4030 MonoType **prev_arg_types;
4031 guint prev_real_offset;
4032 GHashTable *prev_cbb_hash;
4033 MonoBasicBlock **prev_cil_offset_to_bb;
4034 MonoBasicBlock *prev_cbb;
4035 unsigned char* prev_cil_start;
4036 guint32 prev_cil_offset_to_bb_len;
4037 MonoMethod *prev_current_method;
4038 MonoGenericContext *prev_generic_context;
4040 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4042 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4043 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4046 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4047 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4051 if (cfg->verbose_level > 2)
4052 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4054 if (!cmethod->inline_info) {
4055 mono_jit_stats.inlineable_methods++;
4056 cmethod->inline_info = 1;
4058 /* allocate space to store the return value */
4059 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4060 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4063 /* allocate local variables */
4064 cheader = mono_method_get_header (cmethod);
4065 prev_locals = cfg->locals;
4066 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4067 for (i = 0; i < cheader->num_locals; ++i)
4068 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4070 /* allocate start and end blocks */
4071 /* This is needed so if the inline is aborted, we can clean up */
4072 NEW_BBLOCK (cfg, sbblock);
4073 sbblock->real_offset = real_offset;
4075 NEW_BBLOCK (cfg, ebblock);
4076 ebblock->block_num = cfg->num_bblocks++;
4077 ebblock->real_offset = real_offset;
4079 prev_args = cfg->args;
4080 prev_arg_types = cfg->arg_types;
4081 prev_inlined_method = cfg->inlined_method;
4082 cfg->inlined_method = cmethod;
4083 cfg->ret_var_set = FALSE;
4084 prev_real_offset = cfg->real_offset;
4085 prev_cbb_hash = cfg->cbb_hash;
4086 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4087 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4088 prev_cil_start = cfg->cil_start;
4089 prev_cbb = cfg->cbb;
4090 prev_current_method = cfg->current_method;
4091 prev_generic_context = cfg->generic_context;
4093 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4095 cfg->inlined_method = prev_inlined_method;
4096 cfg->real_offset = prev_real_offset;
4097 cfg->cbb_hash = prev_cbb_hash;
4098 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4099 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4100 cfg->cil_start = prev_cil_start;
4101 cfg->locals = prev_locals;
4102 cfg->args = prev_args;
4103 cfg->arg_types = prev_arg_types;
4104 cfg->current_method = prev_current_method;
4105 cfg->generic_context = prev_generic_context;
4107 if ((costs >= 0 && costs < 60) || inline_allways) {
4108 if (cfg->verbose_level > 2)
4109 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4111 mono_jit_stats.inlined_methods++;
4113 /* always add some code to avoid block split failures */
4114 MONO_INST_NEW (cfg, ins, OP_NOP);
4115 MONO_ADD_INS (prev_cbb, ins);
4117 prev_cbb->next_bb = sbblock;
4118 link_bblock (cfg, prev_cbb, sbblock);
4121 * Get rid of the begin and end bblocks if possible to aid local
4124 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4126 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4127 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4129 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4130 MonoBasicBlock *prev = ebblock->in_bb [0];
4131 mono_merge_basic_blocks (cfg, prev, ebblock);
4133 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4134 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4135 cfg->cbb = prev_cbb;
4143 * If the inlined method contains only a throw, then the ret var is not
4144 * set, so set it to a dummy value.
4146 if (!cfg->ret_var_set) {
4147 static double r8_0 = 0.0;
4149 switch (rvar->type) {
4151 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4154 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4159 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4162 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4163 ins->type = STACK_R8;
4164 ins->inst_p0 = (void*)&r8_0;
4165 ins->dreg = rvar->dreg;
4166 MONO_ADD_INS (cfg->cbb, ins);
4169 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4172 g_assert_not_reached ();
4176 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4181 if (cfg->verbose_level > 2)
4182 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4183 cfg->exception_type = MONO_EXCEPTION_NONE;
4184 mono_loader_clear_error ();
4186 /* This gets rid of the newly added bblocks */
4187 cfg->cbb = prev_cbb;
4193 * Some of these comments may well be out-of-date.
4194 * Design decisions: we do a single pass over the IL code (and we do bblock
4195 * splitting/merging in the few cases when it's required: a back jump to an IL
4196 * address that was not already seen as bblock starting point).
4197 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4198 * Complex operations are decomposed in simpler ones right away. We need to let the
4199 * arch-specific code peek and poke inside this process somehow (except when the
4200 * optimizations can take advantage of the full semantic info of coarse opcodes).
4201 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4202 * MonoInst->opcode initially is the IL opcode or some simplification of that
4203 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4204 * opcode with value bigger than OP_LAST.
4205 * At this point the IR can be handed over to an interpreter, a dumb code generator
4206 * or to the optimizing code generator that will translate it to SSA form.
4208 * Profiling directed optimizations.
4209 * We may compile by default with few or no optimizations and instrument the code
4210 * or the user may indicate what methods to optimize the most either in a config file
4211 * or through repeated runs where the compiler applies offline the optimizations to
4212 * each method and then decides if it was worth it.
4215 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4216 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4217 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4218 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4219 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4220 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4221 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4222 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4224 /* offset from br.s -> br like opcodes */
4225 #define BIG_BRANCH_OFFSET 13
4228 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4230 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4232 return b == NULL || b == bb;
4236 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4238 unsigned char *ip = start;
4239 unsigned char *target;
4242 MonoBasicBlock *bblock;
4243 const MonoOpcode *opcode;
4246 cli_addr = ip - start;
4247 i = mono_opcode_value ((const guint8 **)&ip, end);
4250 opcode = &mono_opcodes [i];
4251 switch (opcode->argument) {
4252 case MonoInlineNone:
4255 case MonoInlineString:
4256 case MonoInlineType:
4257 case MonoInlineField:
4258 case MonoInlineMethod:
4261 case MonoShortInlineR:
4268 case MonoShortInlineVar:
4269 case MonoShortInlineI:
4272 case MonoShortInlineBrTarget:
4273 target = start + cli_addr + 2 + (signed char)ip [1];
4274 GET_BBLOCK (cfg, bblock, target);
4277 GET_BBLOCK (cfg, bblock, ip);
4279 case MonoInlineBrTarget:
4280 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4281 GET_BBLOCK (cfg, bblock, target);
4284 GET_BBLOCK (cfg, bblock, ip);
4286 case MonoInlineSwitch: {
4287 guint32 n = read32 (ip + 1);
4290 cli_addr += 5 + 4 * n;
4291 target = start + cli_addr;
4292 GET_BBLOCK (cfg, bblock, target);
4294 for (j = 0; j < n; ++j) {
4295 target = start + cli_addr + (gint32)read32 (ip);
4296 GET_BBLOCK (cfg, bblock, target);
4306 g_assert_not_reached ();
4309 if (i == CEE_THROW) {
4310 unsigned char *bb_start = ip - 1;
4312 /* Find the start of the bblock containing the throw */
4314 while ((bb_start >= start) && !bblock) {
4315 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4319 bblock->out_of_line = 1;
4328 static inline MonoMethod *
4329 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4333 if (m->wrapper_type != MONO_WRAPPER_NONE)
4334 return mono_method_get_wrapper_data (m, token);
4336 method = mono_get_method_full (m->klass->image, token, klass, context);
4341 static inline MonoMethod *
4342 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4344 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4346 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4352 static inline MonoClass*
4353 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4357 if (method->wrapper_type != MONO_WRAPPER_NONE)
4358 klass = mono_method_get_wrapper_data (method, token);
4360 klass = mono_class_get_full (method->klass->image, token, context);
4362 mono_class_init (klass);
4367 * Returns TRUE if the JIT should abort inlining because "callee"
4368 * is influenced by security attributes.
4371 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4375 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4379 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4380 if (result == MONO_JIT_SECURITY_OK)
4383 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4384 /* Generate code to throw a SecurityException before the actual call/link */
4385 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4388 NEW_ICONST (cfg, args [0], 4);
4389 NEW_METHODCONST (cfg, args [1], caller);
4390 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4391 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4392 /* don't hide previous results */
4393 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4394 cfg->exception_data = result;
4402 method_access_exception (void)
4404 static MonoMethod *method = NULL;
4407 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4408 method = mono_class_get_method_from_name (secman->securitymanager,
4409 "MethodAccessException", 2);
4416 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4417 MonoBasicBlock *bblock, unsigned char *ip)
4419 MonoMethod *thrower = method_access_exception ();
4422 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4423 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4424 mono_emit_method_call (cfg, thrower, args, NULL);
4428 verification_exception (void)
4430 static MonoMethod *method = NULL;
4433 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4434 method = mono_class_get_method_from_name (secman->securitymanager,
4435 "VerificationException", 0);
4442 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4444 MonoMethod *thrower = verification_exception ();
4446 mono_emit_method_call (cfg, thrower, NULL, NULL);
4450 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4451 MonoBasicBlock *bblock, unsigned char *ip)
4453 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4454 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4455 gboolean is_safe = TRUE;
4457 if (!(caller_level >= callee_level ||
4458 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4459 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4464 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4468 method_is_safe (MonoMethod *method)
4471 if (strcmp (method->name, "unsafeMethod") == 0)
4478 * Check that the IL instructions at ip are the array initialization
4479 * sequence and return the pointer to the data and the size.
4482 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4485 * newarr[System.Int32]
4487 * ldtoken field valuetype ...
4488 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4490 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4491 guint32 token = read32 (ip + 7);
4492 guint32 field_token = read32 (ip + 2);
4493 guint32 field_index = field_token & 0xffffff;
4495 const char *data_ptr;
4497 MonoMethod *cmethod;
4498 MonoClass *dummy_class;
4499 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4505 *out_field_token = field_token;
4507 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4510 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4512 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4513 case MONO_TYPE_BOOLEAN:
4517 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4518 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4519 case MONO_TYPE_CHAR:
4529 return NULL; /* stupid ARM FP swapped format */
4539 if (size > mono_type_size (field->type, &dummy_align))
4542 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4543 if (!method->klass->image->dynamic) {
4544 field_index = read32 (ip + 2) & 0xffffff;
4545 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4546 data_ptr = mono_image_rva_map (method->klass->image, rva);
4547 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4548 /* for aot code we do the lookup on load */
4549 if (aot && data_ptr)
4550 return GUINT_TO_POINTER (rva);
4552 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4554 data_ptr = mono_field_get_data (field);
4562 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4564 char *method_fname = mono_method_full_name (method, TRUE);
4567 if (mono_method_get_header (method)->code_size == 0)
4568 method_code = g_strdup ("method body is empty.");
4570 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4571 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4572 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4573 g_free (method_fname);
4574 g_free (method_code);
4578 set_exception_object (MonoCompile *cfg, MonoException *exception)
4580 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4581 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4582 cfg->exception_ptr = exception;
4586 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4590 if (cfg->generic_sharing_context)
4591 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4593 type = &klass->byval_arg;
4594 return MONO_TYPE_IS_REFERENCE (type);
4598 * mono_decompose_array_access_opts:
4600 * Decompose array access opcodes.
4601 * This should be in decompose.c, but it emits calls so it has to stay here until
4602 * the old JIT is gone.
4605 mono_decompose_array_access_opts (MonoCompile *cfg)
4607 MonoBasicBlock *bb, *first_bb;
4610 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4611 * can be executed anytime. It should be run before decompose_long
4615 * Create a dummy bblock and emit code into it so we can use the normal
4616 * code generation macros.
4618 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4619 first_bb = cfg->cbb;
4621 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4623 MonoInst *prev = NULL;
4625 MonoInst *iargs [3];
4628 if (!bb->has_array_access)
4631 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4633 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4639 for (ins = bb->code; ins; ins = ins->next) {
4640 switch (ins->opcode) {
4642 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4643 G_STRUCT_OFFSET (MonoArray, max_length));
4644 MONO_ADD_INS (cfg->cbb, dest);
4646 case OP_BOUNDS_CHECK:
4647 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4650 if (cfg->opt & MONO_OPT_SHARED) {
4651 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4652 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4653 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4654 iargs [2]->dreg = ins->sreg1;
4656 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4657 dest->dreg = ins->dreg;
4659 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4662 NEW_VTABLECONST (cfg, iargs [0], vtable);
4663 MONO_ADD_INS (cfg->cbb, iargs [0]);
4664 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4665 iargs [1]->dreg = ins->sreg1;
4667 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4668 dest->dreg = ins->dreg;
4672 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4673 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4674 MONO_ADD_INS (cfg->cbb, dest);
4680 g_assert (cfg->cbb == first_bb);
4682 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4683 /* Replace the original instruction with the new code sequence */
4685 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4686 first_bb->code = first_bb->last_ins = NULL;
4687 first_bb->in_count = first_bb->out_count = 0;
4688 cfg->cbb = first_bb;
4695 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4705 #ifdef MONO_ARCH_SOFT_FLOAT
4708 * mono_decompose_soft_float:
4710 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4711 * similar to long support on 32 bit platforms. 32 bit float values require special
4712 * handling when used as locals, arguments, and in calls.
4713 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4716 mono_decompose_soft_float (MonoCompile *cfg)
4718 MonoBasicBlock *bb, *first_bb;
4721 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4725 * Create a dummy bblock and emit code into it so we can use the normal
4726 * code generation macros.
4728 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4729 first_bb = cfg->cbb;
4731 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4733 MonoInst *prev = NULL;
4736 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4738 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4744 for (ins = bb->code; ins; ins = ins->next) {
4745 const char *spec = INS_INFO (ins->opcode);
4747 /* Most fp operations are handled automatically by opcode emulation */
4749 switch (ins->opcode) {
4752 d.vald = *(double*)ins->inst_p0;
4753 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4758 /* We load the r8 value */
4759 d.vald = *(float*)ins->inst_p0;
4760 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4764 ins->opcode = OP_LMOVE;
4767 ins->opcode = OP_MOVE;
4768 ins->sreg1 = ins->sreg1 + 1;
4771 ins->opcode = OP_MOVE;
4772 ins->sreg1 = ins->sreg1 + 2;
4775 int reg = ins->sreg1;
4777 ins->opcode = OP_SETLRET;
4779 ins->sreg1 = reg + 1;
4780 ins->sreg2 = reg + 2;
4783 case OP_LOADR8_MEMBASE:
4784 ins->opcode = OP_LOADI8_MEMBASE;
4786 case OP_STORER8_MEMBASE_REG:
4787 ins->opcode = OP_STOREI8_MEMBASE_REG;
4789 case OP_STORER4_MEMBASE_REG: {
4790 MonoInst *iargs [2];
4793 /* Arg 1 is the double value */
4794 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4795 iargs [0]->dreg = ins->sreg1;
4797 /* Arg 2 is the address to store to */
4798 addr_reg = mono_alloc_preg (cfg);
4799 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4800 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4804 case OP_LOADR4_MEMBASE: {
4805 MonoInst *iargs [1];
4809 addr_reg = mono_alloc_preg (cfg);
4810 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4811 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4812 conv->dreg = ins->dreg;
4817 case OP_FCALL_MEMBASE: {
4818 MonoCallInst *call = (MonoCallInst*)ins;
4819 if (call->signature->ret->type == MONO_TYPE_R4) {
4820 MonoCallInst *call2;
4821 MonoInst *iargs [1];
4824 /* Convert the call into a call returning an int */
4825 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4826 memcpy (call2, call, sizeof (MonoCallInst));
4827 switch (ins->opcode) {
4829 call2->inst.opcode = OP_CALL;
4832 call2->inst.opcode = OP_CALL_REG;
4834 case OP_FCALL_MEMBASE:
4835 call2->inst.opcode = OP_CALL_MEMBASE;
4838 g_assert_not_reached ();
4840 call2->inst.dreg = mono_alloc_ireg (cfg);
4841 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4843 /* FIXME: Optimize this */
4845 /* Emit an r4->r8 conversion */
4846 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4847 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4848 conv->dreg = ins->dreg;
4850 switch (ins->opcode) {
4852 ins->opcode = OP_LCALL;
4855 ins->opcode = OP_LCALL_REG;
4857 case OP_FCALL_MEMBASE:
4858 ins->opcode = OP_LCALL_MEMBASE;
4861 g_assert_not_reached ();
4867 MonoJitICallInfo *info;
4868 MonoInst *iargs [2];
4869 MonoInst *call, *cmp, *br;
4871 /* Convert fcompare+fbcc to icall+icompare+beq */
4873 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4876 /* Create dummy MonoInst's for the arguments */
4877 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4878 iargs [0]->dreg = ins->sreg1;
4879 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4880 iargs [1]->dreg = ins->sreg2;
4882 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4884 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4885 cmp->sreg1 = call->dreg;
4887 MONO_ADD_INS (cfg->cbb, cmp);
4889 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4890 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4891 br->inst_true_bb = ins->next->inst_true_bb;
4892 br->inst_false_bb = ins->next->inst_false_bb;
4893 MONO_ADD_INS (cfg->cbb, br);
4895 /* The call sequence might include fp ins */
4898 /* Skip fbcc or fccc */
4899 NULLIFY_INS (ins->next);
4907 MonoJitICallInfo *info;
4908 MonoInst *iargs [2];
4911 /* Convert fccc to icall+icompare+iceq */
4913 info = mono_find_jit_opcode_emulation (ins->opcode);
4916 /* Create dummy MonoInst's for the arguments */
4917 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4918 iargs [0]->dreg = ins->sreg1;
4919 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4920 iargs [1]->dreg = ins->sreg2;
4922 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4925 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4927 /* The call sequence might include fp ins */
4932 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4933 mono_print_ins (ins);
4934 g_assert_not_reached ();
4939 g_assert (cfg->cbb == first_bb);
4941 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4942 /* Replace the original instruction with the new code sequence */
4944 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4945 first_bb->code = first_bb->last_ins = NULL;
4946 first_bb->in_count = first_bb->out_count = 0;
4947 cfg->cbb = first_bb;
4954 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4957 mono_decompose_long_opts (cfg);
4963 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4966 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4967 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4968 /* Optimize reg-reg moves away */
4970 * Can't optimize other opcodes, since sp[0] might point to
4971 * the last ins of a decomposed opcode.
4973 sp [0]->dreg = (cfg)->locals [n]->dreg;
4975 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4980 * ldloca inhibits many optimizations so try to get rid of it in common
4983 static inline unsigned char *
4984 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4993 local = read16 (ip + 2);
4997 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4998 gboolean skip = FALSE;
5000 /* From the INITOBJ case */
5001 token = read32 (ip + 2);
5002 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5003 CHECK_TYPELOAD (klass);
5004 if (generic_class_is_reference_type (cfg, klass)) {
5005 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5006 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5007 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5008 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5009 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5022 * mono_method_to_ir:
5024 * Translate the .net IL into linear IR.
5027 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5028 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5029 guint inline_offset, gboolean is_virtual_call)
5031 MonoInst *ins, **sp, **stack_start;
5032 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5033 MonoMethod *cmethod, *method_definition;
5034 MonoInst **arg_array;
5035 MonoMethodHeader *header;
5037 guint32 token, ins_flag;
5039 MonoClass *constrained_call = NULL;
5040 unsigned char *ip, *end, *target, *err_pos;
5041 static double r8_0 = 0.0;
5042 MonoMethodSignature *sig;
5043 MonoGenericContext *generic_context = NULL;
5044 MonoGenericContainer *generic_container = NULL;
5045 MonoType **param_types;
5046 int i, n, start_new_bblock, dreg;
5047 int num_calls = 0, inline_costs = 0;
5048 int breakpoint_id = 0;
5050 MonoBoolean security, pinvoke;
5051 MonoSecurityManager* secman = NULL;
5052 MonoDeclSecurityActions actions;
5053 GSList *class_inits = NULL;
5054 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5057 /* serialization and xdomain stuff may need access to private fields and methods */
5058 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5059 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5060 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5061 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5062 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5063 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5065 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5067 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5068 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5069 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5070 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5072 image = method->klass->image;
5073 header = mono_method_get_header (method);
5074 generic_container = mono_method_get_generic_container (method);
5075 sig = mono_method_signature (method);
5076 num_args = sig->hasthis + sig->param_count;
5077 ip = (unsigned char*)header->code;
5078 cfg->cil_start = ip;
5079 end = ip + header->code_size;
5080 mono_jit_stats.cil_code_size += header->code_size;
5082 method_definition = method;
5083 while (method_definition->is_inflated) {
5084 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5085 method_definition = imethod->declaring;
5088 /* SkipVerification is not allowed if core-clr is enabled */
5089 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5091 dont_verify_stloc = TRUE;
5094 if (!dont_verify && mini_method_verify (cfg, method_definition))
5095 goto exception_exit;
5097 if (mono_debug_using_mono_debugger ())
5098 cfg->keep_cil_nops = TRUE;
5100 if (sig->is_inflated)
5101 generic_context = mono_method_get_context (method);
5102 else if (generic_container)
5103 generic_context = &generic_container->context;
5104 cfg->generic_context = generic_context;
5106 if (!cfg->generic_sharing_context)
5107 g_assert (!sig->has_type_parameters);
5109 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5110 g_assert (method->is_inflated);
5111 g_assert (mono_method_get_context (method)->method_inst);
5113 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5114 g_assert (sig->generic_param_count);
5116 if (cfg->method == method) {
5117 cfg->real_offset = 0;
5119 cfg->real_offset = inline_offset;
5122 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5123 cfg->cil_offset_to_bb_len = header->code_size;
5125 cfg->current_method = method;
5127 if (cfg->verbose_level > 2)
5128 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5130 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5132 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5133 for (n = 0; n < sig->param_count; ++n)
5134 param_types [n + sig->hasthis] = sig->params [n];
5135 cfg->arg_types = param_types;
5137 dont_inline = g_list_prepend (dont_inline, method);
5138 if (cfg->method == method) {
5140 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5141 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5144 NEW_BBLOCK (cfg, start_bblock);
5145 cfg->bb_entry = start_bblock;
5146 start_bblock->cil_code = NULL;
5147 start_bblock->cil_length = 0;
5150 NEW_BBLOCK (cfg, end_bblock);
5151 cfg->bb_exit = end_bblock;
5152 end_bblock->cil_code = NULL;
5153 end_bblock->cil_length = 0;
5154 g_assert (cfg->num_bblocks == 2);
5156 arg_array = cfg->args;
5158 if (header->num_clauses) {
5159 cfg->spvars = g_hash_table_new (NULL, NULL);
5160 cfg->exvars = g_hash_table_new (NULL, NULL);
5162 /* handle exception clauses */
5163 for (i = 0; i < header->num_clauses; ++i) {
5164 MonoBasicBlock *try_bb;
5165 MonoExceptionClause *clause = &header->clauses [i];
5166 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5167 try_bb->real_offset = clause->try_offset;
5168 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5169 tblock->real_offset = clause->handler_offset;
5170 tblock->flags |= BB_EXCEPTION_HANDLER;
5172 link_bblock (cfg, try_bb, tblock);
5174 if (*(ip + clause->handler_offset) == CEE_POP)
5175 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5177 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5178 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5179 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5180 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5181 MONO_ADD_INS (tblock, ins);
5183 /* todo: is a fault block unsafe to optimize? */
5184 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5185 tblock->flags |= BB_EXCEPTION_UNSAFE;
5189 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5191 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5193 /* catch and filter blocks get the exception object on the stack */
5194 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5195 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5196 MonoInst *dummy_use;
5198 /* mostly like handle_stack_args (), but just sets the input args */
5199 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5200 tblock->in_scount = 1;
5201 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5202 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5205 * Add a dummy use for the exvar so its liveness info will be
5209 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5211 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5212 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5213 tblock->flags |= BB_EXCEPTION_HANDLER;
5214 tblock->real_offset = clause->data.filter_offset;
5215 tblock->in_scount = 1;
5216 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5217 /* The filter block shares the exvar with the handler block */
5218 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5219 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5220 MONO_ADD_INS (tblock, ins);
5224 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5225 clause->data.catch_class &&
5226 cfg->generic_sharing_context &&
5227 mono_class_check_context_used (clause->data.catch_class)) {
5228 if (mono_method_get_context (method)->method_inst)
5229 GENERIC_SHARING_FAILURE (CEE_NOP);
5232 * In shared generic code with catch
5233 * clauses containing type variables
5234 * the exception handling code has to
5235 * be able to get to the rgctx.
5236 * Therefore we have to make sure that
5237 * the vtable/mrgctx argument (for
5238 * static or generic methods) or the
5239 * "this" argument (for non-static
5240 * methods) are live.
5242 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5243 mini_method_get_context (method)->method_inst ||
5244 method->klass->valuetype) {
5245 mono_get_vtable_var (cfg);
5247 MonoInst *dummy_use;
5249 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5254 arg_array = alloca (sizeof (MonoInst *) * num_args);
5255 cfg->cbb = start_bblock;
5256 cfg->args = arg_array;
5257 mono_save_args (cfg, sig, inline_args);
5260 /* FIRST CODE BLOCK */
5261 NEW_BBLOCK (cfg, bblock);
5262 bblock->cil_code = ip;
5266 ADD_BBLOCK (cfg, bblock);
5268 if (cfg->method == method) {
5269 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5270 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5271 MONO_INST_NEW (cfg, ins, OP_BREAK);
5272 MONO_ADD_INS (bblock, ins);
5276 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5277 secman = mono_security_manager_get_methods ();
5279 security = (secman && mono_method_has_declsec (method));
5280 /* at this point having security doesn't mean we have any code to generate */
5281 if (security && (cfg->method == method)) {
5282 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5283 * And we do not want to enter the next section (with allocation) if we
5284 * have nothing to generate */
5285 security = mono_declsec_get_demands (method, &actions);
5288 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5289 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5291 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5292 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5293 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5295 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5296 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5300 mono_custom_attrs_free (custom);
5303 custom = mono_custom_attrs_from_class (wrapped->klass);
5304 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5308 mono_custom_attrs_free (custom);
5311 /* not a P/Invoke after all */
5316 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5317 /* we use a separate basic block for the initialization code */
5318 NEW_BBLOCK (cfg, init_localsbb);
5319 cfg->bb_init = init_localsbb;
5320 init_localsbb->real_offset = cfg->real_offset;
5321 start_bblock->next_bb = init_localsbb;
5322 init_localsbb->next_bb = bblock;
5323 link_bblock (cfg, start_bblock, init_localsbb);
5324 link_bblock (cfg, init_localsbb, bblock);
5326 cfg->cbb = init_localsbb;
5328 start_bblock->next_bb = bblock;
5329 link_bblock (cfg, start_bblock, bblock);
5332 /* at this point we know, if security is TRUE, that some code needs to be generated */
5333 if (security && (cfg->method == method)) {
5336 mono_jit_stats.cas_demand_generation++;
5338 if (actions.demand.blob) {
5339 /* Add code for SecurityAction.Demand */
5340 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5341 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5342 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5343 mono_emit_method_call (cfg, secman->demand, args, NULL);
5345 if (actions.noncasdemand.blob) {
5346 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5347 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5348 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5349 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5350 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5351 mono_emit_method_call (cfg, secman->demand, args, NULL);
5353 if (actions.demandchoice.blob) {
5354 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5355 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5356 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5357 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5358 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5362 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5364 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5367 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5368 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5369 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5370 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5371 if (!(method->klass && method->klass->image &&
5372 mono_security_core_clr_is_platform_image (method->klass->image))) {
5373 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5377 if (!method_is_safe (method))
5378 emit_throw_verification_exception (cfg, bblock, ip);
5381 if (header->code_size == 0)
5384 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5389 if (cfg->method == method)
5390 mono_debug_init_method (cfg, bblock, breakpoint_id);
5392 for (n = 0; n < header->num_locals; ++n) {
5393 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5398 /* add a check for this != NULL to inlined methods */
5399 if (is_virtual_call) {
5402 NEW_ARGLOAD (cfg, arg_ins, 0);
5403 MONO_ADD_INS (cfg->cbb, arg_ins);
5404 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5405 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5406 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5409 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5410 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5413 start_new_bblock = 0;
5417 if (cfg->method == method)
5418 cfg->real_offset = ip - header->code;
5420 cfg->real_offset = inline_offset;
5425 if (start_new_bblock) {
5426 bblock->cil_length = ip - bblock->cil_code;
5427 if (start_new_bblock == 2) {
5428 g_assert (ip == tblock->cil_code);
5430 GET_BBLOCK (cfg, tblock, ip);
5432 bblock->next_bb = tblock;
5435 start_new_bblock = 0;
5436 for (i = 0; i < bblock->in_scount; ++i) {
5437 if (cfg->verbose_level > 3)
5438 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5439 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5443 g_slist_free (class_inits);
5446 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5447 link_bblock (cfg, bblock, tblock);
5448 if (sp != stack_start) {
5449 handle_stack_args (cfg, stack_start, sp - stack_start);
5451 CHECK_UNVERIFIABLE (cfg);
5453 bblock->next_bb = tblock;
5456 for (i = 0; i < bblock->in_scount; ++i) {
5457 if (cfg->verbose_level > 3)
5458 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5459 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5462 g_slist_free (class_inits);
5467 bblock->real_offset = cfg->real_offset;
5469 if ((cfg->method == method) && cfg->coverage_info) {
5470 guint32 cil_offset = ip - header->code;
5471 cfg->coverage_info->data [cil_offset].cil_code = ip;
5473 /* TODO: Use an increment here */
5474 #if defined(__i386__)
5475 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5476 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5478 MONO_ADD_INS (cfg->cbb, ins);
5480 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5481 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5485 if (cfg->verbose_level > 3)
5486 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5490 if (cfg->keep_cil_nops)
5491 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5493 MONO_INST_NEW (cfg, ins, OP_NOP);
5495 MONO_ADD_INS (bblock, ins);
5498 MONO_INST_NEW (cfg, ins, OP_BREAK);
5500 MONO_ADD_INS (bblock, ins);
5506 CHECK_STACK_OVF (1);
5507 n = (*ip)-CEE_LDARG_0;
5509 EMIT_NEW_ARGLOAD (cfg, ins, n);
5517 CHECK_STACK_OVF (1);
5518 n = (*ip)-CEE_LDLOC_0;
5520 EMIT_NEW_LOCLOAD (cfg, ins, n);
5529 n = (*ip)-CEE_STLOC_0;
5532 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5534 emit_stloc_ir (cfg, sp, header, n);
5541 CHECK_STACK_OVF (1);
5544 EMIT_NEW_ARGLOAD (cfg, ins, n);
5550 CHECK_STACK_OVF (1);
5553 NEW_ARGLOADA (cfg, ins, n);
5554 MONO_ADD_INS (cfg->cbb, ins);
5564 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5566 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5571 CHECK_STACK_OVF (1);
5574 EMIT_NEW_LOCLOAD (cfg, ins, n);
5578 case CEE_LDLOCA_S: {
5579 unsigned char *tmp_ip;
5581 CHECK_STACK_OVF (1);
5582 CHECK_LOCAL (ip [1]);
5584 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5590 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5599 CHECK_LOCAL (ip [1]);
5600 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5602 emit_stloc_ir (cfg, sp, header, ip [1]);
5607 CHECK_STACK_OVF (1);
5608 EMIT_NEW_PCONST (cfg, ins, NULL);
5609 ins->type = STACK_OBJ;
5614 CHECK_STACK_OVF (1);
5615 EMIT_NEW_ICONST (cfg, ins, -1);
5628 CHECK_STACK_OVF (1);
5629 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5635 CHECK_STACK_OVF (1);
5637 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5643 CHECK_STACK_OVF (1);
5644 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5650 CHECK_STACK_OVF (1);
5651 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5652 ins->type = STACK_I8;
5653 ins->dreg = alloc_dreg (cfg, STACK_I8);
5655 ins->inst_l = (gint64)read64 (ip);
5656 MONO_ADD_INS (bblock, ins);
5662 /* FIXME: we should really allocate this only late in the compilation process */
5663 mono_domain_lock (cfg->domain);
5664 f = mono_domain_alloc (cfg->domain, sizeof (float));
5665 mono_domain_unlock (cfg->domain);
5667 CHECK_STACK_OVF (1);
5668 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5669 ins->type = STACK_R8;
5670 ins->dreg = alloc_dreg (cfg, STACK_R8);
5674 MONO_ADD_INS (bblock, ins);
5682 /* FIXME: we should really allocate this only late in the compilation process */
5683 mono_domain_lock (cfg->domain);
5684 d = mono_domain_alloc (cfg->domain, sizeof (double));
5685 mono_domain_unlock (cfg->domain);
5687 CHECK_STACK_OVF (1);
5688 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5689 ins->type = STACK_R8;
5690 ins->dreg = alloc_dreg (cfg, STACK_R8);
5694 MONO_ADD_INS (bblock, ins);
5701 MonoInst *temp, *store;
5703 CHECK_STACK_OVF (1);
5707 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5708 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5710 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5713 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5726 if (sp [0]->type == STACK_R8)
5727 /* we need to pop the value from the x86 FP stack */
5728 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5735 if (stack_start != sp)
5737 token = read32 (ip + 1);
5738 /* FIXME: check the signature matches */
5739 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5744 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5745 GENERIC_SHARING_FAILURE (CEE_JMP);
5747 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5748 if (check_linkdemand (cfg, method, cmethod))
5750 CHECK_CFG_EXCEPTION;
5755 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5758 /* Handle tail calls similarly to calls */
5759 n = fsig->param_count + fsig->hasthis;
5761 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5762 call->method = cmethod;
5763 call->tail_call = TRUE;
5764 call->signature = mono_method_signature (cmethod);
5765 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5766 call->inst.inst_p0 = cmethod;
5767 for (i = 0; i < n; ++i)
5768 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5770 mono_arch_emit_call (cfg, call);
5771 MONO_ADD_INS (bblock, (MonoInst*)call);
5774 for (i = 0; i < num_args; ++i)
5775 /* Prevent arguments from being optimized away */
5776 arg_array [i]->flags |= MONO_INST_VOLATILE;
5778 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5779 ins = (MonoInst*)call;
5780 ins->inst_p0 = cmethod;
5781 MONO_ADD_INS (bblock, ins);
5785 start_new_bblock = 1;
5790 case CEE_CALLVIRT: {
5791 MonoInst *addr = NULL;
5792 MonoMethodSignature *fsig = NULL;
5794 int virtual = *ip == CEE_CALLVIRT;
5795 int calli = *ip == CEE_CALLI;
5796 gboolean pass_imt_from_rgctx = FALSE;
5797 MonoInst *imt_arg = NULL;
5798 gboolean pass_vtable = FALSE;
5799 gboolean pass_mrgctx = FALSE;
5800 MonoInst *vtable_arg = NULL;
5801 gboolean check_this = FALSE;
5804 token = read32 (ip + 1);
5811 if (method->wrapper_type != MONO_WRAPPER_NONE)
5812 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5814 fsig = mono_metadata_parse_signature (image, token);
5816 n = fsig->param_count + fsig->hasthis;
5818 MonoMethod *cil_method;
5820 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5821 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5822 cil_method = cmethod;
5823 } else if (constrained_call) {
5824 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5826 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5827 cil_method = cmethod;
5832 if (!dont_verify && !cfg->skip_visibility) {
5833 MonoMethod *target_method = cil_method;
5834 if (method->is_inflated) {
5835 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5837 if (!mono_method_can_access_method (method_definition, target_method) &&
5838 !mono_method_can_access_method (method, cil_method))
5839 METHOD_ACCESS_FAILURE;
5842 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5843 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5845 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5846 /* MS.NET seems to silently convert this to a callvirt */
5849 if (!cmethod->klass->inited)
5850 if (!mono_class_init (cmethod->klass))
5853 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5854 mini_class_is_system_array (cmethod->klass)) {
5855 array_rank = cmethod->klass->rank;
5856 fsig = mono_method_signature (cmethod);
5858 if (mono_method_signature (cmethod)->pinvoke) {
5859 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5860 check_for_pending_exc, FALSE);
5861 fsig = mono_method_signature (wrapper);
5862 } else if (constrained_call) {
5863 fsig = mono_method_signature (cmethod);
5865 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5869 mono_save_token_info (cfg, image, token, cil_method);
5871 n = fsig->param_count + fsig->hasthis;
5873 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5874 if (check_linkdemand (cfg, method, cmethod))
5876 CHECK_CFG_EXCEPTION;
5879 if (cmethod->string_ctor)
5880 g_assert_not_reached ();
5883 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5886 if (!cfg->generic_sharing_context && cmethod)
5887 g_assert (!mono_method_check_context_used (cmethod));
5891 //g_assert (!virtual || fsig->hasthis);
5895 if (constrained_call) {
5897 * We have the `constrained.' prefix opcode.
5899 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5903 * The type parameter is instantiated as a valuetype,
5904 * but that type doesn't override the method we're
5905 * calling, so we need to box `this'.
5907 dreg = alloc_dreg (cfg, STACK_VTYPE);
5908 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5909 ins->klass = constrained_call;
5910 sp [0] = handle_box (cfg, ins, constrained_call);
5911 } else if (!constrained_call->valuetype) {
5912 int dreg = alloc_preg (cfg);
5915 * The type parameter is instantiated as a reference
5916 * type. We have a managed pointer on the stack, so
5917 * we need to dereference it here.
5919 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5920 ins->type = STACK_OBJ;
5922 } else if (cmethod->klass->valuetype)
5924 constrained_call = NULL;
5927 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5931 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5932 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5933 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5934 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5935 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5938 * Pass vtable iff target method might
5939 * be shared, which means that sharing
5940 * is enabled for its class and its
5941 * context is sharable (and it's not a
5944 if (sharing_enabled && context_sharable &&
5945 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5949 if (cmethod && mini_method_get_context (cmethod) &&
5950 mini_method_get_context (cmethod)->method_inst) {
5951 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5952 MonoGenericContext *context = mini_method_get_context (cmethod);
5953 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5955 g_assert (!pass_vtable);
5957 if (sharing_enabled && context_sharable)
5961 if (cfg->generic_sharing_context && cmethod) {
5962 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5964 context_used = mono_method_check_context_used (cmethod);
5966 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5967 /* Generic method interface
5968 calls are resolved via a
5969 helper function and don't
5971 if (!cmethod_context || !cmethod_context->method_inst)
5972 pass_imt_from_rgctx = TRUE;
5976 * If a shared method calls another
5977 * shared method then the caller must
5978 * have a generic sharing context
5979 * because the magic trampoline
5980 * requires it. FIXME: We shouldn't
5981 * have to force the vtable/mrgctx
5982 * variable here. Instead there
5983 * should be a flag in the cfg to
5984 * request a generic sharing context.
5987 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5988 mono_get_vtable_var (cfg);
5993 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5995 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5997 CHECK_TYPELOAD (cmethod->klass);
5998 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6003 g_assert (!vtable_arg);
6006 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6008 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6011 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6012 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
6019 if (pass_imt_from_rgctx) {
6020 g_assert (!pass_vtable);
6023 imt_arg = emit_get_rgctx_method (cfg, context_used,
6024 cmethod, MONO_RGCTX_INFO_METHOD);
6030 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6031 check->sreg1 = sp [0]->dreg;
6032 MONO_ADD_INS (cfg->cbb, check);
6035 /* Calling virtual generic methods */
6036 if (cmethod && virtual &&
6037 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6038 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
6039 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6040 mono_method_signature (cmethod)->generic_param_count) {
6041 MonoInst *this_temp, *this_arg_temp, *store;
6042 MonoInst *iargs [4];
6044 g_assert (mono_method_signature (cmethod)->is_inflated);
6046 /* Prevent inlining of methods that contain indirect calls */
6049 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6050 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6051 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6052 g_assert (!imt_arg);
6054 imt_arg = emit_get_rgctx_method (cfg, context_used,
6055 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6059 cfg->disable_aot = TRUE;
6060 g_assert (cmethod->is_inflated);
6061 EMIT_NEW_PCONST (cfg, imt_arg,
6062 ((MonoMethodInflated*)cmethod)->context.method_inst);
6064 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6068 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6069 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6070 MONO_ADD_INS (bblock, store);
6072 /* FIXME: This should be a managed pointer */
6073 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6075 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6077 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6078 cmethod, MONO_RGCTX_INFO_METHOD);
6079 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6080 addr = mono_emit_jit_icall (cfg,
6081 mono_helper_compile_generic_method, iargs);
6083 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6084 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6085 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6088 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6090 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6093 if (!MONO_TYPE_IS_VOID (fsig->ret))
6102 /* FIXME: runtime generic context pointer for jumps? */
6103 /* FIXME: handle this for generic sharing eventually */
6104 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6105 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6108 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6111 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6112 call->tail_call = TRUE;
6113 call->method = cmethod;
6114 call->signature = mono_method_signature (cmethod);
6117 /* Handle tail calls similarly to calls */
6118 call->inst.opcode = OP_TAILCALL;
6120 mono_arch_emit_call (cfg, call);
6123 * We implement tail calls by storing the actual arguments into the
6124 * argument variables, then emitting a CEE_JMP.
6126 for (i = 0; i < n; ++i) {
6127 /* Prevent argument from being register allocated */
6128 arg_array [i]->flags |= MONO_INST_VOLATILE;
6129 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6133 ins = (MonoInst*)call;
6134 ins->inst_p0 = cmethod;
6135 ins->inst_p1 = arg_array [0];
6136 MONO_ADD_INS (bblock, ins);
6137 link_bblock (cfg, bblock, end_bblock);
6138 start_new_bblock = 1;
6139 /* skip CEE_RET as well */
6145 /* Conversion to a JIT intrinsic */
6146 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6147 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6148 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6159 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6160 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6161 mono_method_check_inlining (cfg, cmethod) &&
6162 !g_list_find (dont_inline, cmethod)) {
6164 gboolean allways = FALSE;
6166 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6167 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6168 /* Prevent inlining of methods that call wrappers */
6170 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6174 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6176 cfg->real_offset += 5;
6179 if (!MONO_TYPE_IS_VOID (fsig->ret))
6180 /* *sp is already set by inline_method */
6183 inline_costs += costs;
6189 inline_costs += 10 * num_calls++;
6191 /* Tail recursion elimination */
6192 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6193 gboolean has_vtargs = FALSE;
6196 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6199 /* keep it simple */
6200 for (i = fsig->param_count - 1; i >= 0; i--) {
6201 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6206 for (i = 0; i < n; ++i)
6207 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6208 MONO_INST_NEW (cfg, ins, OP_BR);
6209 MONO_ADD_INS (bblock, ins);
6210 tblock = start_bblock->out_bb [0];
6211 link_bblock (cfg, bblock, tblock);
6212 ins->inst_target_bb = tblock;
6213 start_new_bblock = 1;
6215 /* skip the CEE_RET, too */
6216 if (ip_in_bb (cfg, bblock, ip + 5))
6226 /* Generic sharing */
6227 /* FIXME: only do this for generic methods if
6228 they are not shared! */
6229 if (context_used && !imt_arg && !array_rank &&
6230 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6231 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6232 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6233 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6236 g_assert (cfg->generic_sharing_context && cmethod);
6240 * We are compiling a call to a
6241 * generic method from shared code,
6242 * which means that we have to look up
6243 * the method in the rgctx and do an
6246 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6249 /* Indirect calls */
6251 g_assert (!imt_arg);
6253 if (*ip == CEE_CALL)
6254 g_assert (context_used);
6255 else if (*ip == CEE_CALLI)
6256 g_assert (!vtable_arg);
6258 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6259 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6261 /* Prevent inlining of methods with indirect calls */
6265 #ifdef MONO_ARCH_RGCTX_REG
6267 int rgctx_reg = mono_alloc_preg (cfg);
6269 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6270 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6271 call = (MonoCallInst*)ins;
6272 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6273 cfg->uses_rgctx_reg = TRUE;
6278 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6280 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6281 if (fsig->pinvoke && !fsig->ret->byref) {
6285 * Native code might return non register sized integers
6286 * without initializing the upper bits.
6288 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6289 case OP_LOADI1_MEMBASE:
6290 widen_op = OP_ICONV_TO_I1;
6292 case OP_LOADU1_MEMBASE:
6293 widen_op = OP_ICONV_TO_U1;
6295 case OP_LOADI2_MEMBASE:
6296 widen_op = OP_ICONV_TO_I2;
6298 case OP_LOADU2_MEMBASE:
6299 widen_op = OP_ICONV_TO_U2;
6305 if (widen_op != -1) {
6306 int dreg = alloc_preg (cfg);
6309 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6310 widen->type = ins->type;
6327 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6328 if (sp [fsig->param_count]->type == STACK_OBJ) {
6329 MonoInst *iargs [2];
6332 iargs [1] = sp [fsig->param_count];
6334 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6337 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6338 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6339 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6340 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6342 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6345 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6346 if (!cmethod->klass->element_class->valuetype && !readonly)
6347 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6350 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6353 g_assert_not_reached ();
6361 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6363 if (!MONO_TYPE_IS_VOID (fsig->ret))
6374 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6376 } else if (imt_arg) {
6377 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6379 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6382 if (!MONO_TYPE_IS_VOID (fsig->ret))
6390 if (cfg->method != method) {
6391 /* return from inlined method */
6393 * If in_count == 0, that means the ret is unreachable due to
6394 * being preceeded by a throw. In that case, inline_method () will
6395 * handle setting the return value
6396 * (test case: test_0_inline_throw ()).
6398 if (return_var && cfg->cbb->in_count) {
6402 //g_assert (returnvar != -1);
6403 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6404 cfg->ret_var_set = TRUE;
6408 MonoType *ret_type = mono_method_signature (method)->ret;
6410 g_assert (!return_var);
6413 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6416 if (!cfg->vret_addr) {
6419 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6421 EMIT_NEW_RETLOADA (cfg, ret_addr);
6423 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6424 ins->klass = mono_class_from_mono_type (ret_type);
6427 #ifdef MONO_ARCH_SOFT_FLOAT
6428 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6429 MonoInst *iargs [1];
6433 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6434 mono_arch_emit_setret (cfg, method, conv);
6436 mono_arch_emit_setret (cfg, method, *sp);
6439 mono_arch_emit_setret (cfg, method, *sp);
6444 if (sp != stack_start)
6446 MONO_INST_NEW (cfg, ins, OP_BR);
6448 ins->inst_target_bb = end_bblock;
6449 MONO_ADD_INS (bblock, ins);
6450 link_bblock (cfg, bblock, end_bblock);
6451 start_new_bblock = 1;
6455 MONO_INST_NEW (cfg, ins, OP_BR);
6457 target = ip + 1 + (signed char)(*ip);
6459 GET_BBLOCK (cfg, tblock, target);
6460 link_bblock (cfg, bblock, tblock);
6461 ins->inst_target_bb = tblock;
6462 if (sp != stack_start) {
6463 handle_stack_args (cfg, stack_start, sp - stack_start);
6465 CHECK_UNVERIFIABLE (cfg);
6467 MONO_ADD_INS (bblock, ins);
6468 start_new_bblock = 1;
6469 inline_costs += BRANCH_COST;
6483 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6485 target = ip + 1 + *(signed char*)ip;
6491 inline_costs += BRANCH_COST;
6495 MONO_INST_NEW (cfg, ins, OP_BR);
6498 target = ip + 4 + (gint32)read32(ip);
6500 GET_BBLOCK (cfg, tblock, target);
6501 link_bblock (cfg, bblock, tblock);
6502 ins->inst_target_bb = tblock;
6503 if (sp != stack_start) {
6504 handle_stack_args (cfg, stack_start, sp - stack_start);
6506 CHECK_UNVERIFIABLE (cfg);
6509 MONO_ADD_INS (bblock, ins);
6511 start_new_bblock = 1;
6512 inline_costs += BRANCH_COST;
6519 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6520 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6521 guint32 opsize = is_short ? 1 : 4;
6523 CHECK_OPSIZE (opsize);
6525 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6528 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6533 GET_BBLOCK (cfg, tblock, target);
6534 link_bblock (cfg, bblock, tblock);
6535 GET_BBLOCK (cfg, tblock, ip);
6536 link_bblock (cfg, bblock, tblock);
6538 if (sp != stack_start) {
6539 handle_stack_args (cfg, stack_start, sp - stack_start);
6540 CHECK_UNVERIFIABLE (cfg);
6543 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6544 cmp->sreg1 = sp [0]->dreg;
6545 type_from_op (cmp, sp [0], NULL);
6548 #if SIZEOF_VOID_P == 4
6549 if (cmp->opcode == OP_LCOMPARE_IMM) {
6550 /* Convert it to OP_LCOMPARE */
6551 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6552 ins->type = STACK_I8;
6553 ins->dreg = alloc_dreg (cfg, STACK_I8);
6555 MONO_ADD_INS (bblock, ins);
6556 cmp->opcode = OP_LCOMPARE;
6557 cmp->sreg2 = ins->dreg;
6560 MONO_ADD_INS (bblock, cmp);
6562 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6563 type_from_op (ins, sp [0], NULL);
6564 MONO_ADD_INS (bblock, ins);
6565 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6566 GET_BBLOCK (cfg, tblock, target);
6567 ins->inst_true_bb = tblock;
6568 GET_BBLOCK (cfg, tblock, ip);
6569 ins->inst_false_bb = tblock;
6570 start_new_bblock = 2;
6573 inline_costs += BRANCH_COST;
6588 MONO_INST_NEW (cfg, ins, *ip);
6590 target = ip + 4 + (gint32)read32(ip);
6596 inline_costs += BRANCH_COST;
6600 MonoBasicBlock **targets;
6601 MonoBasicBlock *default_bblock;
6602 MonoJumpInfoBBTable *table;
6603 int offset_reg = alloc_preg (cfg);
6604 int target_reg = alloc_preg (cfg);
6605 int table_reg = alloc_preg (cfg);
6606 int sum_reg = alloc_preg (cfg);
6607 gboolean use_op_switch;
6611 n = read32 (ip + 1);
6614 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6618 CHECK_OPSIZE (n * sizeof (guint32));
6619 target = ip + n * sizeof (guint32);
6621 GET_BBLOCK (cfg, default_bblock, target);
6623 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6624 for (i = 0; i < n; ++i) {
6625 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6626 targets [i] = tblock;
6630 if (sp != stack_start) {
6632 * Link the current bb with the targets as well, so handle_stack_args
6633 * will set their in_stack correctly.
6635 link_bblock (cfg, bblock, default_bblock);
6636 for (i = 0; i < n; ++i)
6637 link_bblock (cfg, bblock, targets [i]);
6639 handle_stack_args (cfg, stack_start, sp - stack_start);
6641 CHECK_UNVERIFIABLE (cfg);
6644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6645 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6648 for (i = 0; i < n; ++i)
6649 link_bblock (cfg, bblock, targets [i]);
6651 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6652 table->table = targets;
6653 table->table_size = n;
6655 use_op_switch = FALSE;
6657 /* ARM implements SWITCH statements differently */
6658 /* FIXME: Make it use the generic implementation */
6659 if (!cfg->compile_aot)
6660 use_op_switch = TRUE;
6663 if (use_op_switch) {
6664 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6665 ins->sreg1 = src1->dreg;
6666 ins->inst_p0 = table;
6667 ins->inst_many_bb = targets;
6668 ins->klass = GUINT_TO_POINTER (n);
6669 MONO_ADD_INS (cfg->cbb, ins);
6671 if (sizeof (gpointer) == 8)
6672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6676 #if SIZEOF_VOID_P == 8
6677 /* The upper word might not be zero, and we add it to a 64 bit address later */
6678 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6681 if (cfg->compile_aot) {
6682 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6684 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6685 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6686 ins->inst_p0 = table;
6687 ins->dreg = table_reg;
6688 MONO_ADD_INS (cfg->cbb, ins);
6691 /* FIXME: Use load_memindex */
6692 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6694 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6696 start_new_bblock = 1;
6697 inline_costs += (BRANCH_COST * 2);
6717 dreg = alloc_freg (cfg);
6720 dreg = alloc_lreg (cfg);
6723 dreg = alloc_preg (cfg);
6726 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6727 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6728 ins->flags |= ins_flag;
6730 MONO_ADD_INS (bblock, ins);
6745 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6746 ins->flags |= ins_flag;
6748 MONO_ADD_INS (bblock, ins);
6756 MONO_INST_NEW (cfg, ins, (*ip));
6758 ins->sreg1 = sp [0]->dreg;
6759 ins->sreg2 = sp [1]->dreg;
6760 type_from_op (ins, sp [0], sp [1]);
6762 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6764 /* Use the immediate opcodes if possible */
6765 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6766 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6767 if (imm_opcode != -1) {
6768 ins->opcode = imm_opcode;
6769 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6772 sp [1]->opcode = OP_NOP;
6776 MONO_ADD_INS ((cfg)->cbb, (ins));
6779 mono_decompose_opcode (cfg, ins);
6796 MONO_INST_NEW (cfg, ins, (*ip));
6798 ins->sreg1 = sp [0]->dreg;
6799 ins->sreg2 = sp [1]->dreg;
6800 type_from_op (ins, sp [0], sp [1]);
6802 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6803 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6805 /* FIXME: Pass opcode to is_inst_imm */
6807 /* Use the immediate opcodes if possible */
6808 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6811 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6812 if (imm_opcode != -1) {
6813 ins->opcode = imm_opcode;
6814 if (sp [1]->opcode == OP_I8CONST) {
6815 #if SIZEOF_VOID_P == 8
6816 ins->inst_imm = sp [1]->inst_l;
6818 ins->inst_ls_word = sp [1]->inst_ls_word;
6819 ins->inst_ms_word = sp [1]->inst_ms_word;
6823 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6826 sp [1]->opcode = OP_NOP;
6829 MONO_ADD_INS ((cfg)->cbb, (ins));
6832 mono_decompose_opcode (cfg, ins);
6845 case CEE_CONV_OVF_I8:
6846 case CEE_CONV_OVF_U8:
6850 /* Special case this earlier so we have long constants in the IR */
6851 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6852 int data = sp [-1]->inst_c0;
6853 sp [-1]->opcode = OP_I8CONST;
6854 sp [-1]->type = STACK_I8;
6855 #if SIZEOF_VOID_P == 8
6856 if ((*ip) == CEE_CONV_U8)
6857 sp [-1]->inst_c0 = (guint32)data;
6859 sp [-1]->inst_c0 = data;
6861 sp [-1]->inst_ls_word = data;
6862 if ((*ip) == CEE_CONV_U8)
6863 sp [-1]->inst_ms_word = 0;
6865 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6867 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6874 case CEE_CONV_OVF_I4:
6875 case CEE_CONV_OVF_I1:
6876 case CEE_CONV_OVF_I2:
6877 case CEE_CONV_OVF_I:
6878 case CEE_CONV_OVF_U:
6881 if (sp [-1]->type == STACK_R8) {
6882 ADD_UNOP (CEE_CONV_OVF_I8);
6889 case CEE_CONV_OVF_U1:
6890 case CEE_CONV_OVF_U2:
6891 case CEE_CONV_OVF_U4:
6894 if (sp [-1]->type == STACK_R8) {
6895 ADD_UNOP (CEE_CONV_OVF_U8);
6902 case CEE_CONV_OVF_I1_UN:
6903 case CEE_CONV_OVF_I2_UN:
6904 case CEE_CONV_OVF_I4_UN:
6905 case CEE_CONV_OVF_I8_UN:
6906 case CEE_CONV_OVF_U1_UN:
6907 case CEE_CONV_OVF_U2_UN:
6908 case CEE_CONV_OVF_U4_UN:
6909 case CEE_CONV_OVF_U8_UN:
6910 case CEE_CONV_OVF_I_UN:
6911 case CEE_CONV_OVF_U_UN:
6921 case CEE_ADD_OVF_UN:
6923 case CEE_MUL_OVF_UN:
6925 case CEE_SUB_OVF_UN:
6933 token = read32 (ip + 1);
6934 klass = mini_get_class (method, token, generic_context);
6935 CHECK_TYPELOAD (klass);
6937 if (generic_class_is_reference_type (cfg, klass)) {
6938 MonoInst *store, *load;
6939 int dreg = alloc_preg (cfg);
6941 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6942 load->flags |= ins_flag;
6943 MONO_ADD_INS (cfg->cbb, load);
6945 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6946 store->flags |= ins_flag;
6947 MONO_ADD_INS (cfg->cbb, store);
6949 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6961 token = read32 (ip + 1);
6962 klass = mini_get_class (method, token, generic_context);
6963 CHECK_TYPELOAD (klass);
6965 /* Optimize the common ldobj+stloc combination */
6975 loc_index = ip [5] - CEE_STLOC_0;
6982 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6983 CHECK_LOCAL (loc_index);
6985 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6986 ins->dreg = cfg->locals [loc_index]->dreg;
6992 /* Optimize the ldobj+stobj combination */
6993 /* The reference case ends up being a load+store anyway */
6994 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6999 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7006 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7015 CHECK_STACK_OVF (1);
7017 n = read32 (ip + 1);
7019 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7020 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7021 ins->type = STACK_OBJ;
7024 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7025 MonoInst *iargs [1];
7027 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7028 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7030 if (cfg->opt & MONO_OPT_SHARED) {
7031 MonoInst *iargs [3];
7033 if (cfg->compile_aot) {
7034 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7036 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7037 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7038 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7039 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7040 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7042 if (bblock->out_of_line) {
7043 MonoInst *iargs [2];
7045 if (cfg->method->klass->image == mono_defaults.corlib) {
7047 * Avoid relocations in AOT and save some space by using a
7048 * version of helper_ldstr specialized to mscorlib.
7050 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7051 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7053 /* Avoid creating the string object */
7054 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7055 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7056 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7060 if (cfg->compile_aot) {
7061 NEW_LDSTRCONST (cfg, ins, image, n);
7063 MONO_ADD_INS (bblock, ins);
7066 NEW_PCONST (cfg, ins, NULL);
7067 ins->type = STACK_OBJ;
7068 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7070 MONO_ADD_INS (bblock, ins);
7079 MonoInst *iargs [2];
7080 MonoMethodSignature *fsig;
7083 MonoInst *vtable_arg = NULL;
7086 token = read32 (ip + 1);
7087 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7090 fsig = mono_method_get_signature (cmethod, image, token);
7092 mono_save_token_info (cfg, image, token, cmethod);
7094 if (!mono_class_init (cmethod->klass))
7097 if (cfg->generic_sharing_context)
7098 context_used = mono_method_check_context_used (cmethod);
7100 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7101 if (check_linkdemand (cfg, method, cmethod))
7103 CHECK_CFG_EXCEPTION;
7104 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7105 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7108 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7109 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7110 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7112 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7113 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7115 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7119 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7120 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7122 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7124 CHECK_TYPELOAD (cmethod->klass);
7125 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7130 n = fsig->param_count;
7134 * Generate smaller code for the common newobj <exception> instruction in
7135 * argument checking code.
7137 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7138 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7139 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7140 MonoInst *iargs [3];
7142 g_assert (!vtable_arg);
7146 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7149 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7153 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7158 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7161 g_assert_not_reached ();
7169 /* move the args to allow room for 'this' in the first position */
7175 /* check_call_signature () requires sp[0] to be set */
7176 this_ins.type = STACK_OBJ;
7178 if (check_call_signature (cfg, fsig, sp))
7183 if (mini_class_is_system_array (cmethod->klass)) {
7185 GENERIC_SHARING_FAILURE (*ip);
7186 g_assert (!context_used);
7187 g_assert (!vtable_arg);
7188 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7190 /* Avoid varargs in the common case */
7191 if (fsig->param_count == 1)
7192 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7193 else if (fsig->param_count == 2)
7194 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7196 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7197 } else if (cmethod->string_ctor) {
7198 g_assert (!context_used);
7199 g_assert (!vtable_arg);
7200 /* we simply pass a null pointer */
7201 EMIT_NEW_PCONST (cfg, *sp, NULL);
7202 /* now call the string ctor */
7203 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7205 MonoInst* callvirt_this_arg = NULL;
7207 if (cmethod->klass->valuetype) {
7208 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7209 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7210 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7215 * The code generated by mini_emit_virtual_call () expects
7216 * iargs [0] to be a boxed instance, but luckily the vcall
7217 * will be transformed into a normal call there.
7219 } else if (context_used) {
7223 if (cfg->opt & MONO_OPT_SHARED)
7224 rgctx_info = MONO_RGCTX_INFO_KLASS;
7226 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7227 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7229 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7232 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7234 CHECK_TYPELOAD (cmethod->klass);
7237 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7238 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7239 * As a workaround, we call class cctors before allocating objects.
7241 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7242 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7243 if (cfg->verbose_level > 2)
7244 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7245 class_inits = g_slist_prepend (class_inits, vtable);
7248 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7253 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7255 /* Now call the actual ctor */
7256 /* Avoid virtual calls to ctors if possible */
7257 if (cmethod->klass->marshalbyref)
7258 callvirt_this_arg = sp [0];
7260 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7261 mono_method_check_inlining (cfg, cmethod) &&
7262 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7263 !g_list_find (dont_inline, cmethod)) {
7266 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7267 cfg->real_offset += 5;
7270 inline_costs += costs - 5;
7273 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7275 } else if (context_used &&
7276 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7277 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7278 MonoInst *cmethod_addr;
7280 g_assert (!callvirt_this_arg);
7282 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7283 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7285 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7288 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7289 callvirt_this_arg, NULL, vtable_arg);
7293 if (alloc == NULL) {
7295 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7296 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7310 token = read32 (ip + 1);
7311 klass = mini_get_class (method, token, generic_context);
7312 CHECK_TYPELOAD (klass);
7313 if (sp [0]->type != STACK_OBJ)
7316 if (cfg->generic_sharing_context)
7317 context_used = mono_class_check_context_used (klass);
7326 args [1] = emit_get_rgctx_klass (cfg, context_used,
7327 klass, MONO_RGCTX_INFO_KLASS);
7329 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7333 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7334 MonoMethod *mono_castclass;
7335 MonoInst *iargs [1];
7338 mono_castclass = mono_marshal_get_castclass (klass);
7341 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7342 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7343 g_assert (costs > 0);
7346 cfg->real_offset += 5;
7351 inline_costs += costs;
7354 ins = handle_castclass (cfg, klass, *sp);
7364 token = read32 (ip + 1);
7365 klass = mini_get_class (method, token, generic_context);
7366 CHECK_TYPELOAD (klass);
7367 if (sp [0]->type != STACK_OBJ)
7370 if (cfg->generic_sharing_context)
7371 context_used = mono_class_check_context_used (klass);
7380 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7382 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7386 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7387 MonoMethod *mono_isinst;
7388 MonoInst *iargs [1];
7391 mono_isinst = mono_marshal_get_isinst (klass);
7394 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7395 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7396 g_assert (costs > 0);
7399 cfg->real_offset += 5;
7404 inline_costs += costs;
7407 ins = handle_isinst (cfg, klass, *sp);
7414 case CEE_UNBOX_ANY: {
7418 token = read32 (ip + 1);
7419 klass = mini_get_class (method, token, generic_context);
7420 CHECK_TYPELOAD (klass);
7422 mono_save_token_info (cfg, image, token, klass);
7424 if (cfg->generic_sharing_context)
7425 context_used = mono_class_check_context_used (klass);
7427 if (generic_class_is_reference_type (cfg, klass)) {
7430 MonoInst *iargs [2];
7435 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7436 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7440 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7441 MonoMethod *mono_castclass;
7442 MonoInst *iargs [1];
7445 mono_castclass = mono_marshal_get_castclass (klass);
7448 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7449 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7451 g_assert (costs > 0);
7454 cfg->real_offset += 5;
7458 inline_costs += costs;
7460 ins = handle_castclass (cfg, klass, *sp);
7468 if (mono_class_is_nullable (klass)) {
7469 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7476 ins = handle_unbox (cfg, klass, sp, context_used);
7482 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7495 token = read32 (ip + 1);
7496 klass = mini_get_class (method, token, generic_context);
7497 CHECK_TYPELOAD (klass);
7499 mono_save_token_info (cfg, image, token, klass);
7501 if (cfg->generic_sharing_context)
7502 context_used = mono_class_check_context_used (klass);
7504 if (generic_class_is_reference_type (cfg, klass)) {
7510 if (klass == mono_defaults.void_class)
7512 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7514 /* frequent check in generic code: box (struct), brtrue */
7515 if (!mono_class_is_nullable (klass) &&
7516 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7517 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7519 MONO_INST_NEW (cfg, ins, OP_BR);
7520 if (*ip == CEE_BRTRUE_S) {
7523 target = ip + 1 + (signed char)(*ip);
7528 target = ip + 4 + (gint)(read32 (ip));
7531 GET_BBLOCK (cfg, tblock, target);
7532 link_bblock (cfg, bblock, tblock);
7533 ins->inst_target_bb = tblock;
7534 GET_BBLOCK (cfg, tblock, ip);
7536 * This leads to some inconsistency, since the two bblocks are
7537 * not really connected, but it is needed for handling stack
7538 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7539 * FIXME: This should only be needed if sp != stack_start, but that
7540 * doesn't work for some reason (test failure in mcs/tests on x86).
7542 link_bblock (cfg, bblock, tblock);
7543 if (sp != stack_start) {
7544 handle_stack_args (cfg, stack_start, sp - stack_start);
7546 CHECK_UNVERIFIABLE (cfg);
7548 MONO_ADD_INS (bblock, ins);
7549 start_new_bblock = 1;
7557 if (cfg->opt & MONO_OPT_SHARED)
7558 rgctx_info = MONO_RGCTX_INFO_KLASS;
7560 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7561 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7562 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7564 *sp++ = handle_box (cfg, val, klass);
7575 token = read32 (ip + 1);
7576 klass = mini_get_class (method, token, generic_context);
7577 CHECK_TYPELOAD (klass);
7579 mono_save_token_info (cfg, image, token, klass);
7581 if (cfg->generic_sharing_context)
7582 context_used = mono_class_check_context_used (klass);
7584 if (mono_class_is_nullable (klass)) {
7587 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7588 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7592 ins = handle_unbox (cfg, klass, sp, context_used);
7602 MonoClassField *field;
7606 if (*ip == CEE_STFLD) {
7613 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7615 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7618 token = read32 (ip + 1);
7619 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7620 field = mono_method_get_wrapper_data (method, token);
7621 klass = field->parent;
7624 field = mono_field_from_token (image, token, &klass, generic_context);
7628 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7629 FIELD_ACCESS_FAILURE;
7630 mono_class_init (klass);
7632 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7633 if (*ip == CEE_STFLD) {
7634 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7636 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7637 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7638 MonoInst *iargs [5];
7641 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7642 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7643 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7647 if (cfg->opt & MONO_OPT_INLINE) {
7648 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7649 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7650 g_assert (costs > 0);
7652 cfg->real_offset += 5;
7655 inline_costs += costs;
7657 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7662 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7664 store->flags |= ins_flag;
7671 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7672 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7673 MonoInst *iargs [4];
7676 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7677 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7678 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7679 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7680 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7681 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7683 g_assert (costs > 0);
7685 cfg->real_offset += 5;
7689 inline_costs += costs;
7691 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7695 if (sp [0]->type == STACK_VTYPE) {
7698 /* Have to compute the address of the variable */
7700 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7702 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7704 g_assert (var->klass == klass);
7706 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7710 if (*ip == CEE_LDFLDA) {
7711 dreg = alloc_preg (cfg);
7713 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7714 ins->klass = mono_class_from_mono_type (field->type);
7715 ins->type = STACK_MP;
7720 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7721 load->flags |= ins_flag;
7732 MonoClassField *field;
7733 gpointer addr = NULL;
7734 gboolean is_special_static;
7737 token = read32 (ip + 1);
7739 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7740 field = mono_method_get_wrapper_data (method, token);
7741 klass = field->parent;
7744 field = mono_field_from_token (image, token, &klass, generic_context);
7747 mono_class_init (klass);
7748 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7749 FIELD_ACCESS_FAILURE;
7752 * We can only support shared generic static
7753 * field access on architectures where the
7754 * trampoline code has been extended to handle
7755 * the generic class init.
7757 #ifndef MONO_ARCH_VTABLE_REG
7758 GENERIC_SHARING_FAILURE (*ip);
7761 if (cfg->generic_sharing_context)
7762 context_used = mono_class_check_context_used (klass);
7764 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7766 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7767 * to be called here.
7769 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7770 mono_class_vtable (cfg->domain, klass);
7771 CHECK_TYPELOAD (klass);
7773 mono_domain_lock (cfg->domain);
7774 if (cfg->domain->special_static_fields)
7775 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7776 mono_domain_unlock (cfg->domain);
7778 is_special_static = mono_class_field_is_special_static (field);
7780 /* Generate IR to compute the field address */
7782 if ((cfg->opt & MONO_OPT_SHARED) ||
7783 (cfg->compile_aot && is_special_static) ||
7784 (context_used && is_special_static)) {
7785 MonoInst *iargs [2];
7787 g_assert (field->parent);
7788 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7790 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7791 field, MONO_RGCTX_INFO_CLASS_FIELD);
7793 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7795 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7796 } else if (context_used) {
7797 MonoInst *static_data;
7800 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7801 method->klass->name_space, method->klass->name, method->name,
7802 depth, field->offset);
7805 if (mono_class_needs_cctor_run (klass, method)) {
7809 vtable = emit_get_rgctx_klass (cfg, context_used,
7810 klass, MONO_RGCTX_INFO_VTABLE);
7812 // FIXME: This doesn't work since it tries to pass the argument
7813 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7815 * The vtable pointer is always passed in a register regardless of
7816 * the calling convention, so assign it manually, and make a call
7817 * using a signature without parameters.
7819 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7820 #ifdef MONO_ARCH_VTABLE_REG
7821 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7822 cfg->uses_vtable_reg = TRUE;
7829 * The pointer we're computing here is
7831 * super_info.static_data + field->offset
7833 static_data = emit_get_rgctx_klass (cfg, context_used,
7834 klass, MONO_RGCTX_INFO_STATIC_DATA);
7836 if (field->offset == 0) {
7839 int addr_reg = mono_alloc_preg (cfg);
7840 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7842 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7843 MonoInst *iargs [2];
7845 g_assert (field->parent);
7846 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7847 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7848 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7850 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7852 CHECK_TYPELOAD (klass);
7854 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7855 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7856 if (cfg->verbose_level > 2)
7857 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7858 class_inits = g_slist_prepend (class_inits, vtable);
7860 if (cfg->run_cctors) {
7862 /* This makes so that inline cannot trigger */
7863 /* .cctors: too many apps depend on them */
7864 /* running with a specific order... */
7865 if (! vtable->initialized)
7867 ex = mono_runtime_class_init_full (vtable, FALSE);
7869 set_exception_object (cfg, ex);
7870 goto exception_exit;
7874 addr = (char*)vtable->data + field->offset;
7876 if (cfg->compile_aot)
7877 EMIT_NEW_SFLDACONST (cfg, ins, field);
7879 EMIT_NEW_PCONST (cfg, ins, addr);
7882 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7883 * This could be later optimized to do just a couple of
7884 * memory dereferences with constant offsets.
7886 MonoInst *iargs [1];
7887 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7888 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7892 /* Generate IR to do the actual load/store operation */
7894 if (*ip == CEE_LDSFLDA) {
7895 ins->klass = mono_class_from_mono_type (field->type);
7897 } else if (*ip == CEE_STSFLD) {
7902 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7903 store->flags |= ins_flag;
7905 gboolean is_const = FALSE;
7906 MonoVTable *vtable = NULL;
7908 if (!context_used) {
7909 vtable = mono_class_vtable (cfg->domain, klass);
7910 CHECK_TYPELOAD (klass);
7912 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7913 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7914 gpointer addr = (char*)vtable->data + field->offset;
7915 int ro_type = field->type->type;
7916 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7917 ro_type = field->type->data.klass->enum_basetype->type;
7919 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
7922 case MONO_TYPE_BOOLEAN:
7924 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7928 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7931 case MONO_TYPE_CHAR:
7933 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7937 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7942 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7946 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7951 case MONO_TYPE_STRING:
7952 case MONO_TYPE_OBJECT:
7953 case MONO_TYPE_CLASS:
7954 case MONO_TYPE_SZARRAY:
7956 case MONO_TYPE_FNPTR:
7957 case MONO_TYPE_ARRAY:
7958 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7959 type_to_eval_stack_type ((cfg), field->type, *sp);
7964 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7969 case MONO_TYPE_VALUETYPE:
7979 CHECK_STACK_OVF (1);
7981 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7982 load->flags |= ins_flag;
7995 token = read32 (ip + 1);
7996 klass = mini_get_class (method, token, generic_context);
7997 CHECK_TYPELOAD (klass);
7998 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7999 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8010 const char *data_ptr;
8012 guint32 field_token;
8018 token = read32 (ip + 1);
8020 klass = mini_get_class (method, token, generic_context);
8021 CHECK_TYPELOAD (klass);
8023 if (cfg->generic_sharing_context)
8024 context_used = mono_class_check_context_used (klass);
8029 /* FIXME: Decompose later to help abcrem */
8032 args [0] = emit_get_rgctx_klass (cfg, context_used,
8033 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8038 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8040 if (cfg->opt & MONO_OPT_SHARED) {
8041 /* Decompose now to avoid problems with references to the domainvar */
8042 MonoInst *iargs [3];
8044 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8045 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8048 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8050 /* Decompose later since it is needed by abcrem */
8051 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8052 ins->dreg = alloc_preg (cfg);
8053 ins->sreg1 = sp [0]->dreg;
8054 ins->inst_newa_class = klass;
8055 ins->type = STACK_OBJ;
8057 MONO_ADD_INS (cfg->cbb, ins);
8058 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8059 cfg->cbb->has_array_access = TRUE;
8061 /* Needed so mono_emit_load_get_addr () gets called */
8062 mono_get_got_var (cfg);
8072 * we inline/optimize the initialization sequence if possible.
8073 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8074 * for small sizes open code the memcpy
8075 * ensure the rva field is big enough
8077 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8078 MonoMethod *memcpy_method = get_memcpy_method ();
8079 MonoInst *iargs [3];
8080 int add_reg = alloc_preg (cfg);
8082 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8083 if (cfg->compile_aot) {
8084 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8086 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8088 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8089 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8098 if (sp [0]->type != STACK_OBJ)
8101 dreg = alloc_preg (cfg);
8102 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8103 ins->dreg = alloc_preg (cfg);
8104 ins->sreg1 = sp [0]->dreg;
8105 ins->type = STACK_I4;
8106 MONO_ADD_INS (cfg->cbb, ins);
8107 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8108 cfg->cbb->has_array_access = TRUE;
8116 if (sp [0]->type != STACK_OBJ)
8119 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8121 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8122 CHECK_TYPELOAD (klass);
8123 /* we need to make sure that this array is exactly the type it needs
8124 * to be for correctness. the wrappers are lax with their usage
8125 * so we need to ignore them here
8127 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8128 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8131 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8135 case CEE_LDELEM_ANY:
8146 case CEE_LDELEM_REF: {
8152 if (*ip == CEE_LDELEM_ANY) {
8154 token = read32 (ip + 1);
8155 klass = mini_get_class (method, token, generic_context);
8156 CHECK_TYPELOAD (klass);
8157 mono_class_init (klass);
8160 klass = array_access_to_klass (*ip);
8162 if (sp [0]->type != STACK_OBJ)
8165 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8167 if (sp [1]->opcode == OP_ICONST) {
8168 int array_reg = sp [0]->dreg;
8169 int index_reg = sp [1]->dreg;
8170 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8172 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8173 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8175 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8176 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8179 if (*ip == CEE_LDELEM_ANY)
8192 case CEE_STELEM_REF:
8193 case CEE_STELEM_ANY: {
8199 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8201 if (*ip == CEE_STELEM_ANY) {
8203 token = read32 (ip + 1);
8204 klass = mini_get_class (method, token, generic_context);
8205 CHECK_TYPELOAD (klass);
8206 mono_class_init (klass);
8209 klass = array_access_to_klass (*ip);
8211 if (sp [0]->type != STACK_OBJ)
8214 /* storing a NULL doesn't need any of the complex checks in stelemref */
8215 if (generic_class_is_reference_type (cfg, klass) &&
8216 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8217 MonoMethod* helper = mono_marshal_get_stelemref ();
8218 MonoInst *iargs [3];
8220 if (sp [0]->type != STACK_OBJ)
8222 if (sp [2]->type != STACK_OBJ)
8229 mono_emit_method_call (cfg, helper, iargs, NULL);
8231 if (sp [1]->opcode == OP_ICONST) {
8232 int array_reg = sp [0]->dreg;
8233 int index_reg = sp [1]->dreg;
8234 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8236 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8237 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8239 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8240 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8244 if (*ip == CEE_STELEM_ANY)
8251 case CEE_CKFINITE: {
8255 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8256 ins->sreg1 = sp [0]->dreg;
8257 ins->dreg = alloc_freg (cfg);
8258 ins->type = STACK_R8;
8259 MONO_ADD_INS (bblock, ins);
8262 mono_decompose_opcode (cfg, ins);
8267 case CEE_REFANYVAL: {
8268 MonoInst *src_var, *src;
8270 int klass_reg = alloc_preg (cfg);
8271 int dreg = alloc_preg (cfg);
8274 MONO_INST_NEW (cfg, ins, *ip);
8277 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8278 CHECK_TYPELOAD (klass);
8279 mono_class_init (klass);
8281 if (cfg->generic_sharing_context)
8282 context_used = mono_class_check_context_used (klass);
8285 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8287 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8288 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8292 MonoInst *klass_ins;
8294 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8295 klass, MONO_RGCTX_INFO_KLASS);
8298 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8299 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8301 mini_emit_class_check (cfg, klass_reg, klass);
8303 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8304 ins->type = STACK_MP;
8309 case CEE_MKREFANY: {
8310 MonoInst *loc, *addr;
8313 MONO_INST_NEW (cfg, ins, *ip);
8316 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8317 CHECK_TYPELOAD (klass);
8318 mono_class_init (klass);
8320 if (cfg->generic_sharing_context)
8321 context_used = mono_class_check_context_used (klass);
8323 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8324 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8327 MonoInst *const_ins;
8328 int type_reg = alloc_preg (cfg);
8330 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8331 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8334 } else if (cfg->compile_aot) {
8335 int const_reg = alloc_preg (cfg);
8336 int type_reg = alloc_preg (cfg);
8338 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8343 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8344 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8348 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8349 ins->type = STACK_VTYPE;
8350 ins->klass = mono_defaults.typed_reference_class;
8357 MonoClass *handle_class;
8359 CHECK_STACK_OVF (1);
8362 n = read32 (ip + 1);
8364 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8365 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8366 handle = mono_method_get_wrapper_data (method, n);
8367 handle_class = mono_method_get_wrapper_data (method, n + 1);
8368 if (handle_class == mono_defaults.typehandle_class)
8369 handle = &((MonoClass*)handle)->byval_arg;
8372 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8376 mono_class_init (handle_class);
8377 if (cfg->generic_sharing_context) {
8378 if (handle_class == mono_defaults.typehandle_class) {
8379 /* If we get a MONO_TYPE_CLASS
8380 then we need to provide the
8382 instantiation of it. */
8383 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8386 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8387 } else if (handle_class == mono_defaults.fieldhandle_class)
8388 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8389 else if (handle_class == mono_defaults.methodhandle_class)
8390 context_used = mono_method_check_context_used (handle);
8392 g_assert_not_reached ();
8395 if ((cfg->opt & MONO_OPT_SHARED) &&
8396 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8397 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8398 MonoInst *addr, *vtvar, *iargs [3];
8399 int method_context_used;
8401 if (cfg->generic_sharing_context)
8402 method_context_used = mono_method_check_context_used (method);
8404 method_context_used = 0;
8406 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8408 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8409 EMIT_NEW_ICONST (cfg, iargs [1], n);
8410 if (method_context_used) {
8411 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8412 method, MONO_RGCTX_INFO_METHOD);
8413 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8415 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8416 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8418 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8420 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8422 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8424 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8425 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8426 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8427 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8428 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8429 MonoClass *tclass = mono_class_from_mono_type (handle);
8431 mono_class_init (tclass);
8433 ins = emit_get_rgctx_klass (cfg, context_used,
8434 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8435 } else if (cfg->compile_aot) {
8436 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8438 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8440 ins->type = STACK_OBJ;
8441 ins->klass = cmethod->klass;
8444 MonoInst *addr, *vtvar;
8446 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8449 if (handle_class == mono_defaults.typehandle_class) {
8450 ins = emit_get_rgctx_klass (cfg, context_used,
8451 mono_class_from_mono_type (handle),
8452 MONO_RGCTX_INFO_TYPE);
8453 } else if (handle_class == mono_defaults.methodhandle_class) {
8454 ins = emit_get_rgctx_method (cfg, context_used,
8455 handle, MONO_RGCTX_INFO_METHOD);
8456 } else if (handle_class == mono_defaults.fieldhandle_class) {
8457 ins = emit_get_rgctx_field (cfg, context_used,
8458 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8460 g_assert_not_reached ();
8462 } else if (cfg->compile_aot) {
8463 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8465 EMIT_NEW_PCONST (cfg, ins, handle);
8467 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8468 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8469 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8479 MONO_INST_NEW (cfg, ins, OP_THROW);
8481 ins->sreg1 = sp [0]->dreg;
8483 bblock->out_of_line = TRUE;
8484 MONO_ADD_INS (bblock, ins);
8485 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8486 MONO_ADD_INS (bblock, ins);
8489 link_bblock (cfg, bblock, end_bblock);
8490 start_new_bblock = 1;
8492 case CEE_ENDFINALLY:
8493 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8494 MONO_ADD_INS (bblock, ins);
8496 start_new_bblock = 1;
8499 * Control will leave the method so empty the stack, otherwise
8500 * the next basic block will start with a nonempty stack.
8502 while (sp != stack_start) {
8510 if (*ip == CEE_LEAVE) {
8512 target = ip + 5 + (gint32)read32(ip + 1);
8515 target = ip + 2 + (signed char)(ip [1]);
8518 /* empty the stack */
8519 while (sp != stack_start) {
8524 * If this leave statement is in a catch block, check for a
8525 * pending exception, and rethrow it if necessary.
8527 for (i = 0; i < header->num_clauses; ++i) {
8528 MonoExceptionClause *clause = &header->clauses [i];
8531 * Use <= in the final comparison to handle clauses with multiple
8532 * leave statements, like in bug #78024.
8533 * The ordering of the exception clauses guarantees that we find the
8536 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8538 MonoBasicBlock *dont_throw;
8543 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8546 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8548 NEW_BBLOCK (cfg, dont_throw);
8551 * Currently, we allways rethrow the abort exception, despite the
8552 * fact that this is not correct. See thread6.cs for an example.
8553 * But propagating the abort exception is more important than
8554 * getting the sematics right.
8556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8558 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8560 MONO_START_BB (cfg, dont_throw);
8565 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8567 for (tmp = handlers; tmp; tmp = tmp->next) {
8569 link_bblock (cfg, bblock, tblock);
8570 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8571 ins->inst_target_bb = tblock;
8572 MONO_ADD_INS (bblock, ins);
8574 g_list_free (handlers);
8577 MONO_INST_NEW (cfg, ins, OP_BR);
8578 MONO_ADD_INS (bblock, ins);
8579 GET_BBLOCK (cfg, tblock, target);
8580 link_bblock (cfg, bblock, tblock);
8581 ins->inst_target_bb = tblock;
8582 start_new_bblock = 1;
8584 if (*ip == CEE_LEAVE)
8593 * Mono specific opcodes
8595 case MONO_CUSTOM_PREFIX: {
8597 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8601 case CEE_MONO_ICALL: {
8603 MonoJitICallInfo *info;
8605 token = read32 (ip + 2);
8606 func = mono_method_get_wrapper_data (method, token);
8607 info = mono_find_jit_icall_by_addr (func);
8610 CHECK_STACK (info->sig->param_count);
8611 sp -= info->sig->param_count;
8613 ins = mono_emit_jit_icall (cfg, info->func, sp);
8614 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8618 inline_costs += 10 * num_calls++;
8622 case CEE_MONO_LDPTR: {
8625 CHECK_STACK_OVF (1);
8627 token = read32 (ip + 2);
8629 ptr = mono_method_get_wrapper_data (method, token);
8630 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8631 MonoJitICallInfo *callinfo;
8632 const char *icall_name;
8634 icall_name = method->name + strlen ("__icall_wrapper_");
8635 g_assert (icall_name);
8636 callinfo = mono_find_jit_icall_by_name (icall_name);
8637 g_assert (callinfo);
8639 if (ptr == callinfo->func) {
8640 /* Will be transformed into an AOTCONST later */
8641 EMIT_NEW_PCONST (cfg, ins, ptr);
8647 /* FIXME: Generalize this */
8648 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8649 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8654 EMIT_NEW_PCONST (cfg, ins, ptr);
8657 inline_costs += 10 * num_calls++;
8658 /* Can't embed random pointers into AOT code */
8659 cfg->disable_aot = 1;
8662 case CEE_MONO_ICALL_ADDR: {
8663 MonoMethod *cmethod;
8666 CHECK_STACK_OVF (1);
8668 token = read32 (ip + 2);
8670 cmethod = mono_method_get_wrapper_data (method, token);
8672 if (cfg->compile_aot) {
8673 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8675 ptr = mono_lookup_internal_call (cmethod);
8677 EMIT_NEW_PCONST (cfg, ins, ptr);
8683 case CEE_MONO_VTADDR: {
8684 MonoInst *src_var, *src;
8690 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8691 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8696 case CEE_MONO_NEWOBJ: {
8697 MonoInst *iargs [2];
8699 CHECK_STACK_OVF (1);
8701 token = read32 (ip + 2);
8702 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8703 mono_class_init (klass);
8704 NEW_DOMAINCONST (cfg, iargs [0]);
8705 MONO_ADD_INS (cfg->cbb, iargs [0]);
8706 NEW_CLASSCONST (cfg, iargs [1], klass);
8707 MONO_ADD_INS (cfg->cbb, iargs [1]);
8708 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8710 inline_costs += 10 * num_calls++;
8713 case CEE_MONO_OBJADDR:
8716 MONO_INST_NEW (cfg, ins, OP_MOVE);
8717 ins->dreg = alloc_preg (cfg);
8718 ins->sreg1 = sp [0]->dreg;
8719 ins->type = STACK_MP;
8720 MONO_ADD_INS (cfg->cbb, ins);
8724 case CEE_MONO_LDNATIVEOBJ:
8726 * Similar to LDOBJ, but instead load the unmanaged
8727 * representation of the vtype to the stack.
8732 token = read32 (ip + 2);
8733 klass = mono_method_get_wrapper_data (method, token);
8734 g_assert (klass->valuetype);
8735 mono_class_init (klass);
8738 MonoInst *src, *dest, *temp;
8741 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8742 temp->backend.is_pinvoke = 1;
8743 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8744 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8746 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8747 dest->type = STACK_VTYPE;
8748 dest->klass = klass;
8754 case CEE_MONO_RETOBJ: {
8756 * Same as RET, but return the native representation of a vtype
8759 g_assert (cfg->ret);
8760 g_assert (mono_method_signature (method)->pinvoke);
8765 token = read32 (ip + 2);
8766 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8768 if (!cfg->vret_addr) {
8769 g_assert (cfg->ret_var_is_local);
8771 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8773 EMIT_NEW_RETLOADA (cfg, ins);
8775 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8777 if (sp != stack_start)
8780 MONO_INST_NEW (cfg, ins, OP_BR);
8781 ins->inst_target_bb = end_bblock;
8782 MONO_ADD_INS (bblock, ins);
8783 link_bblock (cfg, bblock, end_bblock);
8784 start_new_bblock = 1;
8788 case CEE_MONO_CISINST:
8789 case CEE_MONO_CCASTCLASS: {
8794 token = read32 (ip + 2);
8795 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8796 if (ip [1] == CEE_MONO_CISINST)
8797 ins = handle_cisinst (cfg, klass, sp [0]);
8799 ins = handle_ccastclass (cfg, klass, sp [0]);
8805 case CEE_MONO_SAVE_LMF:
8806 case CEE_MONO_RESTORE_LMF:
8807 #ifdef MONO_ARCH_HAVE_LMF_OPS
8808 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8809 MONO_ADD_INS (bblock, ins);
8810 cfg->need_lmf_area = TRUE;
8814 case CEE_MONO_CLASSCONST:
8815 CHECK_STACK_OVF (1);
8817 token = read32 (ip + 2);
8818 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8821 inline_costs += 10 * num_calls++;
8823 case CEE_MONO_NOT_TAKEN:
8824 bblock->out_of_line = TRUE;
8828 CHECK_STACK_OVF (1);
8830 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8831 ins->dreg = alloc_preg (cfg);
8832 ins->inst_offset = (gint32)read32 (ip + 2);
8833 ins->type = STACK_PTR;
8834 MONO_ADD_INS (bblock, ins);
8839 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8849 /* somewhat similar to LDTOKEN */
8850 MonoInst *addr, *vtvar;
8851 CHECK_STACK_OVF (1);
8852 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8854 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8855 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8857 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8858 ins->type = STACK_VTYPE;
8859 ins->klass = mono_defaults.argumenthandle_class;
8872 * The following transforms:
8873 * CEE_CEQ into OP_CEQ
8874 * CEE_CGT into OP_CGT
8875 * CEE_CGT_UN into OP_CGT_UN
8876 * CEE_CLT into OP_CLT
8877 * CEE_CLT_UN into OP_CLT_UN
8879 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8881 MONO_INST_NEW (cfg, ins, cmp->opcode);
8883 cmp->sreg1 = sp [0]->dreg;
8884 cmp->sreg2 = sp [1]->dreg;
8885 type_from_op (cmp, sp [0], sp [1]);
8887 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8888 cmp->opcode = OP_LCOMPARE;
8889 else if (sp [0]->type == STACK_R8)
8890 cmp->opcode = OP_FCOMPARE;
8892 cmp->opcode = OP_ICOMPARE;
8893 MONO_ADD_INS (bblock, cmp);
8894 ins->type = STACK_I4;
8895 ins->dreg = alloc_dreg (cfg, ins->type);
8896 type_from_op (ins, sp [0], sp [1]);
8898 if (cmp->opcode == OP_FCOMPARE) {
8900 * The backends expect the fceq opcodes to do the
8903 cmp->opcode = OP_NOP;
8904 ins->sreg1 = cmp->sreg1;
8905 ins->sreg2 = cmp->sreg2;
8907 MONO_ADD_INS (bblock, ins);
8914 MonoMethod *cil_method, *ctor_method;
8915 gboolean needs_static_rgctx_invoke;
8917 CHECK_STACK_OVF (1);
8919 n = read32 (ip + 2);
8920 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8923 mono_class_init (cmethod->klass);
8925 mono_save_token_info (cfg, image, n, cmethod);
8927 if (cfg->generic_sharing_context)
8928 context_used = mono_method_check_context_used (cmethod);
8930 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8932 cil_method = cmethod;
8933 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8934 METHOD_ACCESS_FAILURE;
8936 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8937 if (check_linkdemand (cfg, method, cmethod))
8939 CHECK_CFG_EXCEPTION;
8940 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8941 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8945 * Optimize the common case of ldftn+delegate creation
8947 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8948 /* FIXME: SGEN support */
8949 /* FIXME: handle shared static generic methods */
8950 /* FIXME: handle this in shared code */
8951 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8952 MonoInst *target_ins;
8955 if (cfg->verbose_level > 3)
8956 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8957 target_ins = sp [-1];
8959 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8967 if (needs_static_rgctx_invoke)
8968 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8970 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8971 } else if (needs_static_rgctx_invoke) {
8972 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8974 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8976 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8980 inline_costs += 10 * num_calls++;
8983 case CEE_LDVIRTFTN: {
8988 n = read32 (ip + 2);
8989 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8992 mono_class_init (cmethod->klass);
8994 if (cfg->generic_sharing_context)
8995 context_used = mono_method_check_context_used (cmethod);
8997 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8998 if (check_linkdemand (cfg, method, cmethod))
9000 CHECK_CFG_EXCEPTION;
9001 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9002 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9009 args [1] = emit_get_rgctx_method (cfg, context_used,
9010 cmethod, MONO_RGCTX_INFO_METHOD);
9011 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9013 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9014 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9018 inline_costs += 10 * num_calls++;
9022 CHECK_STACK_OVF (1);
9024 n = read16 (ip + 2);
9026 EMIT_NEW_ARGLOAD (cfg, ins, n);
9031 CHECK_STACK_OVF (1);
9033 n = read16 (ip + 2);
9035 NEW_ARGLOADA (cfg, ins, n);
9036 MONO_ADD_INS (cfg->cbb, ins);
9044 n = read16 (ip + 2);
9046 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9048 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9052 CHECK_STACK_OVF (1);
9054 n = read16 (ip + 2);
9056 EMIT_NEW_LOCLOAD (cfg, ins, n);
9061 unsigned char *tmp_ip;
9062 CHECK_STACK_OVF (1);
9064 n = read16 (ip + 2);
9067 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9073 EMIT_NEW_LOCLOADA (cfg, ins, n);
9082 n = read16 (ip + 2);
9084 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9086 emit_stloc_ir (cfg, sp, header, n);
9093 if (sp != stack_start)
9095 if (cfg->method != method)
9097 * Inlining this into a loop in a parent could lead to
9098 * stack overflows which is different behavior than the
9099 * non-inlined case, thus disable inlining in this case.
9101 goto inline_failure;
9103 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9104 ins->dreg = alloc_preg (cfg);
9105 ins->sreg1 = sp [0]->dreg;
9106 ins->type = STACK_PTR;
9107 MONO_ADD_INS (cfg->cbb, ins);
9109 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9110 if (header->init_locals)
9111 ins->flags |= MONO_INST_INIT;
9116 case CEE_ENDFILTER: {
9117 MonoExceptionClause *clause, *nearest;
9118 int cc, nearest_num;
9122 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9124 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9125 ins->sreg1 = (*sp)->dreg;
9126 MONO_ADD_INS (bblock, ins);
9127 start_new_bblock = 1;
9132 for (cc = 0; cc < header->num_clauses; ++cc) {
9133 clause = &header->clauses [cc];
9134 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9135 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9136 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9142 if ((ip - header->code) != nearest->handler_offset)
9147 case CEE_UNALIGNED_:
9148 ins_flag |= MONO_INST_UNALIGNED;
9149 /* FIXME: record alignment? we can assume 1 for now */
9154 ins_flag |= MONO_INST_VOLATILE;
9158 ins_flag |= MONO_INST_TAILCALL;
9159 cfg->flags |= MONO_CFG_HAS_TAIL;
9160 /* Can't inline tail calls at this time */
9161 inline_costs += 100000;
9168 token = read32 (ip + 2);
9169 klass = mini_get_class (method, token, generic_context);
9170 CHECK_TYPELOAD (klass);
9171 if (generic_class_is_reference_type (cfg, klass))
9172 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9174 mini_emit_initobj (cfg, *sp, NULL, klass);
9178 case CEE_CONSTRAINED_:
9180 token = read32 (ip + 2);
9181 constrained_call = mono_class_get_full (image, token, generic_context);
9182 CHECK_TYPELOAD (constrained_call);
9187 MonoInst *iargs [3];
9191 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9192 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9193 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9194 /* emit_memset only works when val == 0 */
9195 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9200 if (ip [1] == CEE_CPBLK) {
9201 MonoMethod *memcpy_method = get_memcpy_method ();
9202 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9204 MonoMethod *memset_method = get_memset_method ();
9205 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9215 ins_flag |= MONO_INST_NOTYPECHECK;
9217 ins_flag |= MONO_INST_NORANGECHECK;
9218 /* we ignore the no-nullcheck for now since we
9219 * really do it explicitly only when doing callvirt->call
9225 int handler_offset = -1;
9227 for (i = 0; i < header->num_clauses; ++i) {
9228 MonoExceptionClause *clause = &header->clauses [i];
9229 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9230 handler_offset = clause->handler_offset;
9235 bblock->flags |= BB_EXCEPTION_UNSAFE;
9237 g_assert (handler_offset != -1);
9239 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9240 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9241 ins->sreg1 = load->dreg;
9242 MONO_ADD_INS (bblock, ins);
9244 link_bblock (cfg, bblock, end_bblock);
9245 start_new_bblock = 1;
9253 CHECK_STACK_OVF (1);
9255 token = read32 (ip + 2);
9256 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9257 MonoType *type = mono_type_create_from_typespec (image, token);
9258 token = mono_type_size (type, &ialign);
9260 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9261 CHECK_TYPELOAD (klass);
9262 mono_class_init (klass);
9263 token = mono_class_value_size (klass, &align);
9265 EMIT_NEW_ICONST (cfg, ins, token);
9270 case CEE_REFANYTYPE: {
9271 MonoInst *src_var, *src;
9277 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9279 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9280 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9281 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9291 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9296 g_error ("opcode 0x%02x not handled", *ip);
9299 if (start_new_bblock != 1)
9302 bblock->cil_length = ip - bblock->cil_code;
9303 bblock->next_bb = end_bblock;
9305 if (cfg->method == method && cfg->domainvar) {
9307 MonoInst *get_domain;
9309 cfg->cbb = init_localsbb;
9311 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9312 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9315 get_domain->dreg = alloc_preg (cfg);
9316 MONO_ADD_INS (cfg->cbb, get_domain);
9318 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9319 MONO_ADD_INS (cfg->cbb, store);
9322 if (cfg->method == method && cfg->got_var)
9323 mono_emit_load_got_addr (cfg);
9325 if (header->init_locals) {
9328 cfg->cbb = init_localsbb;
9329 cfg->ip = header->code;
9330 for (i = 0; i < header->num_locals; ++i) {
9331 MonoType *ptype = header->locals [i];
9332 int t = ptype->type;
9333 dreg = cfg->locals [i]->dreg;
9335 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9336 t = ptype->data.klass->enum_basetype->type;
9338 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9339 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9340 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9341 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9342 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9343 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9344 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9345 ins->type = STACK_R8;
9346 ins->inst_p0 = (void*)&r8_0;
9347 ins->dreg = alloc_dreg (cfg, STACK_R8);
9348 MONO_ADD_INS (init_localsbb, ins);
9349 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9350 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9351 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9352 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9354 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9361 if (cfg->method == method) {
9363 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9364 bb->region = mono_find_block_region (cfg, bb->real_offset);
9366 mono_create_spvar_for_region (cfg, bb->region);
9367 if (cfg->verbose_level > 2)
9368 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9372 g_slist_free (class_inits);
9373 dont_inline = g_list_remove (dont_inline, method);
9375 if (inline_costs < 0) {
9378 /* Method is too large */
9379 mname = mono_method_full_name (method, TRUE);
9380 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9381 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9386 if ((cfg->verbose_level > 2) && (cfg->method == method))
9387 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9389 return inline_costs;
9392 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9393 g_slist_free (class_inits);
9394 dont_inline = g_list_remove (dont_inline, method);
9398 g_slist_free (class_inits);
9399 dont_inline = g_list_remove (dont_inline, method);
9403 g_slist_free (class_inits);
9404 dont_inline = g_list_remove (dont_inline, method);
9405 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9409 g_slist_free (class_inits);
9410 dont_inline = g_list_remove (dont_inline, method);
9411 set_exception_type_from_invalid_il (cfg, method, ip);
9416 store_membase_reg_to_store_membase_imm (int opcode)
9419 case OP_STORE_MEMBASE_REG:
9420 return OP_STORE_MEMBASE_IMM;
9421 case OP_STOREI1_MEMBASE_REG:
9422 return OP_STOREI1_MEMBASE_IMM;
9423 case OP_STOREI2_MEMBASE_REG:
9424 return OP_STOREI2_MEMBASE_IMM;
9425 case OP_STOREI4_MEMBASE_REG:
9426 return OP_STOREI4_MEMBASE_IMM;
9427 case OP_STOREI8_MEMBASE_REG:
9428 return OP_STOREI8_MEMBASE_IMM;
9430 g_assert_not_reached ();
9436 #endif /* DISABLE_JIT */
9439 mono_op_to_op_imm (int opcode)
9449 return OP_IDIV_UN_IMM;
9453 return OP_IREM_UN_IMM;
9467 return OP_ISHR_UN_IMM;
9484 return OP_LSHR_UN_IMM;
9487 return OP_COMPARE_IMM;
9489 return OP_ICOMPARE_IMM;
9491 return OP_LCOMPARE_IMM;
9493 case OP_STORE_MEMBASE_REG:
9494 return OP_STORE_MEMBASE_IMM;
9495 case OP_STOREI1_MEMBASE_REG:
9496 return OP_STOREI1_MEMBASE_IMM;
9497 case OP_STOREI2_MEMBASE_REG:
9498 return OP_STOREI2_MEMBASE_IMM;
9499 case OP_STOREI4_MEMBASE_REG:
9500 return OP_STOREI4_MEMBASE_IMM;
9502 #if defined(__i386__) || defined (__x86_64__)
9504 return OP_X86_PUSH_IMM;
9505 case OP_X86_COMPARE_MEMBASE_REG:
9506 return OP_X86_COMPARE_MEMBASE_IMM;
9508 #if defined(__x86_64__)
9509 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9510 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9512 case OP_VOIDCALL_REG:
9521 return OP_LOCALLOC_IMM;
9528 ldind_to_load_membase (int opcode)
9532 return OP_LOADI1_MEMBASE;
9534 return OP_LOADU1_MEMBASE;
9536 return OP_LOADI2_MEMBASE;
9538 return OP_LOADU2_MEMBASE;
9540 return OP_LOADI4_MEMBASE;
9542 return OP_LOADU4_MEMBASE;
9544 return OP_LOAD_MEMBASE;
9546 return OP_LOAD_MEMBASE;
9548 return OP_LOADI8_MEMBASE;
9550 return OP_LOADR4_MEMBASE;
9552 return OP_LOADR8_MEMBASE;
9554 g_assert_not_reached ();
9561 stind_to_store_membase (int opcode)
9565 return OP_STOREI1_MEMBASE_REG;
9567 return OP_STOREI2_MEMBASE_REG;
9569 return OP_STOREI4_MEMBASE_REG;
9572 return OP_STORE_MEMBASE_REG;
9574 return OP_STOREI8_MEMBASE_REG;
9576 return OP_STORER4_MEMBASE_REG;
9578 return OP_STORER8_MEMBASE_REG;
9580 g_assert_not_reached ();
9587 mono_load_membase_to_load_mem (int opcode)
9589 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9590 #if defined(__i386__) || defined(__x86_64__)
9592 case OP_LOAD_MEMBASE:
9594 case OP_LOADU1_MEMBASE:
9595 return OP_LOADU1_MEM;
9596 case OP_LOADU2_MEMBASE:
9597 return OP_LOADU2_MEM;
9598 case OP_LOADI4_MEMBASE:
9599 return OP_LOADI4_MEM;
9600 case OP_LOADU4_MEMBASE:
9601 return OP_LOADU4_MEM;
9602 #if SIZEOF_VOID_P == 8
9603 case OP_LOADI8_MEMBASE:
9604 return OP_LOADI8_MEM;
9613 op_to_op_dest_membase (int store_opcode, int opcode)
9615 #if defined(__i386__)
9616 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9621 return OP_X86_ADD_MEMBASE_REG;
9623 return OP_X86_SUB_MEMBASE_REG;
9625 return OP_X86_AND_MEMBASE_REG;
9627 return OP_X86_OR_MEMBASE_REG;
9629 return OP_X86_XOR_MEMBASE_REG;
9632 return OP_X86_ADD_MEMBASE_IMM;
9635 return OP_X86_SUB_MEMBASE_IMM;
9638 return OP_X86_AND_MEMBASE_IMM;
9641 return OP_X86_OR_MEMBASE_IMM;
9644 return OP_X86_XOR_MEMBASE_IMM;
9650 #if defined(__x86_64__)
9651 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9656 return OP_X86_ADD_MEMBASE_REG;
9658 return OP_X86_SUB_MEMBASE_REG;
9660 return OP_X86_AND_MEMBASE_REG;
9662 return OP_X86_OR_MEMBASE_REG;
9664 return OP_X86_XOR_MEMBASE_REG;
9666 return OP_X86_ADD_MEMBASE_IMM;
9668 return OP_X86_SUB_MEMBASE_IMM;
9670 return OP_X86_AND_MEMBASE_IMM;
9672 return OP_X86_OR_MEMBASE_IMM;
9674 return OP_X86_XOR_MEMBASE_IMM;
9676 return OP_AMD64_ADD_MEMBASE_REG;
9678 return OP_AMD64_SUB_MEMBASE_REG;
9680 return OP_AMD64_AND_MEMBASE_REG;
9682 return OP_AMD64_OR_MEMBASE_REG;
9684 return OP_AMD64_XOR_MEMBASE_REG;
9687 return OP_AMD64_ADD_MEMBASE_IMM;
9690 return OP_AMD64_SUB_MEMBASE_IMM;
9693 return OP_AMD64_AND_MEMBASE_IMM;
9696 return OP_AMD64_OR_MEMBASE_IMM;
9699 return OP_AMD64_XOR_MEMBASE_IMM;
9709 op_to_op_store_membase (int store_opcode, int opcode)
9711 #if defined(__i386__) || defined(__x86_64__)
9714 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9715 return OP_X86_SETEQ_MEMBASE;
9717 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9718 return OP_X86_SETNE_MEMBASE;
9726 op_to_op_src1_membase (int load_opcode, int opcode)
9729 /* FIXME: This has sign extension issues */
9731 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9732 return OP_X86_COMPARE_MEMBASE8_IMM;
9735 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9740 return OP_X86_PUSH_MEMBASE;
9741 case OP_COMPARE_IMM:
9742 case OP_ICOMPARE_IMM:
9743 return OP_X86_COMPARE_MEMBASE_IMM;
9746 return OP_X86_COMPARE_MEMBASE_REG;
9751 /* FIXME: This has sign extension issues */
9753 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9754 return OP_X86_COMPARE_MEMBASE8_IMM;
9759 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9760 return OP_X86_PUSH_MEMBASE;
9762 /* FIXME: This only works for 32 bit immediates
9763 case OP_COMPARE_IMM:
9764 case OP_LCOMPARE_IMM:
9765 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9766 return OP_AMD64_COMPARE_MEMBASE_IMM;
9768 case OP_ICOMPARE_IMM:
9769 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9770 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9774 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9775 return OP_AMD64_COMPARE_MEMBASE_REG;
9778 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9779 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9788 op_to_op_src2_membase (int load_opcode, int opcode)
9791 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9797 return OP_X86_COMPARE_REG_MEMBASE;
9799 return OP_X86_ADD_REG_MEMBASE;
9801 return OP_X86_SUB_REG_MEMBASE;
9803 return OP_X86_AND_REG_MEMBASE;
9805 return OP_X86_OR_REG_MEMBASE;
9807 return OP_X86_XOR_REG_MEMBASE;
9814 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9815 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9819 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9820 return OP_AMD64_COMPARE_REG_MEMBASE;
9823 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9824 return OP_X86_ADD_REG_MEMBASE;
9826 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9827 return OP_X86_SUB_REG_MEMBASE;
9829 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9830 return OP_X86_AND_REG_MEMBASE;
9832 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9833 return OP_X86_OR_REG_MEMBASE;
9835 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9836 return OP_X86_XOR_REG_MEMBASE;
9838 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9839 return OP_AMD64_ADD_REG_MEMBASE;
9841 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9842 return OP_AMD64_SUB_REG_MEMBASE;
9844 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9845 return OP_AMD64_AND_REG_MEMBASE;
9847 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9848 return OP_AMD64_OR_REG_MEMBASE;
9850 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9851 return OP_AMD64_XOR_REG_MEMBASE;
9859 mono_op_to_op_imm_noemul (int opcode)
9862 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9867 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9875 return mono_op_to_op_imm (opcode);
9882 * mono_handle_global_vregs:
9884 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9888 mono_handle_global_vregs (MonoCompile *cfg)
9894 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9896 #ifdef MONO_ARCH_SIMD_INTRINSICS
9897 if (cfg->uses_simd_intrinsics)
9898 mono_simd_simplify_indirection (cfg);
9901 /* Find local vregs used in more than one bb */
9902 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9903 MonoInst *ins = bb->code;
9904 int block_num = bb->block_num;
9906 if (cfg->verbose_level > 2)
9907 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9910 for (; ins; ins = ins->next) {
9911 const char *spec = INS_INFO (ins->opcode);
9912 int regtype, regindex;
9915 if (G_UNLIKELY (cfg->verbose_level > 2))
9916 mono_print_ins (ins);
9918 g_assert (ins->opcode >= MONO_CEE_LAST);
9920 for (regindex = 0; regindex < 3; regindex ++) {
9923 if (regindex == 0) {
9924 regtype = spec [MONO_INST_DEST];
9928 } else if (regindex == 1) {
9929 regtype = spec [MONO_INST_SRC1];
9934 regtype = spec [MONO_INST_SRC2];
9940 #if SIZEOF_VOID_P == 4
9941 if (regtype == 'l') {
9943 * Since some instructions reference the original long vreg,
9944 * and some reference the two component vregs, it is quite hard
9945 * to determine when it needs to be global. So be conservative.
9947 if (!get_vreg_to_inst (cfg, vreg)) {
9948 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9950 if (cfg->verbose_level > 2)
9951 printf ("LONG VREG R%d made global.\n", vreg);
9955 * Make the component vregs volatile since the optimizations can
9956 * get confused otherwise.
9958 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9959 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9963 g_assert (vreg != -1);
9965 prev_bb = vreg_to_bb [vreg];
9967 /* 0 is a valid block num */
9968 vreg_to_bb [vreg] = block_num + 1;
9969 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9970 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9973 if (!get_vreg_to_inst (cfg, vreg)) {
9974 if (G_UNLIKELY (cfg->verbose_level > 2))
9975 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9979 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9982 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9985 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9988 g_assert_not_reached ();
9992 /* Flag as having been used in more than one bb */
9993 vreg_to_bb [vreg] = -1;
9999 /* If a variable is used in only one bblock, convert it into a local vreg */
10000 for (i = 0; i < cfg->num_varinfo; i++) {
10001 MonoInst *var = cfg->varinfo [i];
10002 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10004 switch (var->type) {
10010 #if SIZEOF_VOID_P == 8
10013 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10014 /* Enabling this screws up the fp stack on x86 */
10017 /* Arguments are implicitly global */
10018 /* Putting R4 vars into registers doesn't work currently */
10019 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10021 * Make that the variable's liveness interval doesn't contain a call, since
10022 * that would cause the lvreg to be spilled, making the whole optimization
10025 /* This is too slow for JIT compilation */
10027 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10029 int def_index, call_index, ins_index;
10030 gboolean spilled = FALSE;
10035 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10036 const char *spec = INS_INFO (ins->opcode);
10038 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10039 def_index = ins_index;
10041 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10042 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10043 if (call_index > def_index) {
10049 if (MONO_IS_CALL (ins))
10050 call_index = ins_index;
10060 if (G_UNLIKELY (cfg->verbose_level > 2))
10061 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10062 var->flags |= MONO_INST_IS_DEAD;
10063 cfg->vreg_to_inst [var->dreg] = NULL;
10070 * Compress the varinfo and vars tables so the liveness computation is faster and
10071 * takes up less space.
10074 for (i = 0; i < cfg->num_varinfo; ++i) {
10075 MonoInst *var = cfg->varinfo [i];
10076 if (pos < i && cfg->locals_start == i)
10077 cfg->locals_start = pos;
10078 if (!(var->flags & MONO_INST_IS_DEAD)) {
10080 cfg->varinfo [pos] = cfg->varinfo [i];
10081 cfg->varinfo [pos]->inst_c0 = pos;
10082 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10083 cfg->vars [pos].idx = pos;
10084 #if SIZEOF_VOID_P == 4
10085 if (cfg->varinfo [pos]->type == STACK_I8) {
10086 /* Modify the two component vars too */
10089 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10090 var1->inst_c0 = pos;
10091 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10092 var1->inst_c0 = pos;
10099 cfg->num_varinfo = pos;
10100 if (cfg->locals_start > cfg->num_varinfo)
10101 cfg->locals_start = cfg->num_varinfo;
10105 * mono_spill_global_vars:
10107 * Generate spill code for variables which are not allocated to registers,
10108 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10109 * code is generated which could be optimized by the local optimization passes.
10112 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10114 MonoBasicBlock *bb;
10116 int orig_next_vreg;
10117 guint32 *vreg_to_lvreg;
10119 guint32 i, lvregs_len;
10120 gboolean dest_has_lvreg = FALSE;
10121 guint32 stacktypes [128];
10123 *need_local_opts = FALSE;
10125 memset (spec2, 0, sizeof (spec2));
10127 /* FIXME: Move this function to mini.c */
10128 stacktypes ['i'] = STACK_PTR;
10129 stacktypes ['l'] = STACK_I8;
10130 stacktypes ['f'] = STACK_R8;
10131 #ifdef MONO_ARCH_SIMD_INTRINSICS
10132 stacktypes ['x'] = STACK_VTYPE;
10135 #if SIZEOF_VOID_P == 4
10136 /* Create MonoInsts for longs */
10137 for (i = 0; i < cfg->num_varinfo; i++) {
10138 MonoInst *ins = cfg->varinfo [i];
10140 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10141 switch (ins->type) {
10142 #ifdef MONO_ARCH_SOFT_FLOAT
10148 g_assert (ins->opcode == OP_REGOFFSET);
10150 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10152 tree->opcode = OP_REGOFFSET;
10153 tree->inst_basereg = ins->inst_basereg;
10154 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10156 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10158 tree->opcode = OP_REGOFFSET;
10159 tree->inst_basereg = ins->inst_basereg;
10160 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10170 /* FIXME: widening and truncation */
10173 * As an optimization, when a variable allocated to the stack is first loaded into
10174 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10175 * the variable again.
10177 orig_next_vreg = cfg->next_vreg;
10178 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10179 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10182 /* Add spill loads/stores */
10183 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10186 if (cfg->verbose_level > 2)
10187 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10189 /* Clear vreg_to_lvreg array */
10190 for (i = 0; i < lvregs_len; i++)
10191 vreg_to_lvreg [lvregs [i]] = 0;
10195 MONO_BB_FOR_EACH_INS (bb, ins) {
10196 const char *spec = INS_INFO (ins->opcode);
10197 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10198 gboolean store, no_lvreg;
10200 if (G_UNLIKELY (cfg->verbose_level > 2))
10201 mono_print_ins (ins);
10203 if (ins->opcode == OP_NOP)
10207 * We handle LDADDR here as well, since it can only be decomposed
10208 * when variable addresses are known.
10210 if (ins->opcode == OP_LDADDR) {
10211 MonoInst *var = ins->inst_p0;
10213 if (var->opcode == OP_VTARG_ADDR) {
10214 /* Happens on SPARC/S390 where vtypes are passed by reference */
10215 MonoInst *vtaddr = var->inst_left;
10216 if (vtaddr->opcode == OP_REGVAR) {
10217 ins->opcode = OP_MOVE;
10218 ins->sreg1 = vtaddr->dreg;
10220 else if (var->inst_left->opcode == OP_REGOFFSET) {
10221 ins->opcode = OP_LOAD_MEMBASE;
10222 ins->inst_basereg = vtaddr->inst_basereg;
10223 ins->inst_offset = vtaddr->inst_offset;
10227 g_assert (var->opcode == OP_REGOFFSET);
10229 ins->opcode = OP_ADD_IMM;
10230 ins->sreg1 = var->inst_basereg;
10231 ins->inst_imm = var->inst_offset;
10234 *need_local_opts = TRUE;
10235 spec = INS_INFO (ins->opcode);
10238 if (ins->opcode < MONO_CEE_LAST) {
10239 mono_print_ins (ins);
10240 g_assert_not_reached ();
10244 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10248 if (MONO_IS_STORE_MEMBASE (ins)) {
10249 tmp_reg = ins->dreg;
10250 ins->dreg = ins->sreg2;
10251 ins->sreg2 = tmp_reg;
10254 spec2 [MONO_INST_DEST] = ' ';
10255 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10256 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10258 } else if (MONO_IS_STORE_MEMINDEX (ins))
10259 g_assert_not_reached ();
10264 if (G_UNLIKELY (cfg->verbose_level > 2))
10265 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10270 regtype = spec [MONO_INST_DEST];
10271 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10274 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10275 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10276 MonoInst *store_ins;
10279 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10281 if (var->opcode == OP_REGVAR) {
10282 ins->dreg = var->dreg;
10283 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10285 * Instead of emitting a load+store, use a _membase opcode.
10287 g_assert (var->opcode == OP_REGOFFSET);
10288 if (ins->opcode == OP_MOVE) {
10291 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10292 ins->inst_basereg = var->inst_basereg;
10293 ins->inst_offset = var->inst_offset;
10296 spec = INS_INFO (ins->opcode);
10300 g_assert (var->opcode == OP_REGOFFSET);
10302 prev_dreg = ins->dreg;
10304 /* Invalidate any previous lvreg for this vreg */
10305 vreg_to_lvreg [ins->dreg] = 0;
10309 #ifdef MONO_ARCH_SOFT_FLOAT
10310 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10312 store_opcode = OP_STOREI8_MEMBASE_REG;
10316 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10318 if (regtype == 'l') {
10319 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10320 mono_bblock_insert_after_ins (bb, ins, store_ins);
10321 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10322 mono_bblock_insert_after_ins (bb, ins, store_ins);
10325 g_assert (store_opcode != OP_STOREV_MEMBASE);
10327 /* Try to fuse the store into the instruction itself */
10328 /* FIXME: Add more instructions */
10329 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10330 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10331 ins->inst_imm = ins->inst_c0;
10332 ins->inst_destbasereg = var->inst_basereg;
10333 ins->inst_offset = var->inst_offset;
10334 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10335 ins->opcode = store_opcode;
10336 ins->inst_destbasereg = var->inst_basereg;
10337 ins->inst_offset = var->inst_offset;
10341 tmp_reg = ins->dreg;
10342 ins->dreg = ins->sreg2;
10343 ins->sreg2 = tmp_reg;
10346 spec2 [MONO_INST_DEST] = ' ';
10347 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10348 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10350 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10351 // FIXME: The backends expect the base reg to be in inst_basereg
10352 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10354 ins->inst_basereg = var->inst_basereg;
10355 ins->inst_offset = var->inst_offset;
10356 spec = INS_INFO (ins->opcode);
10358 /* printf ("INS: "); mono_print_ins (ins); */
10359 /* Create a store instruction */
10360 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10362 /* Insert it after the instruction */
10363 mono_bblock_insert_after_ins (bb, ins, store_ins);
10366 * We can't assign ins->dreg to var->dreg here, since the
10367 * sregs could use it. So set a flag, and do it after
10370 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10371 dest_has_lvreg = TRUE;
10380 for (srcindex = 0; srcindex < 2; ++srcindex) {
10381 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10382 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10384 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10385 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10386 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10387 MonoInst *load_ins;
10388 guint32 load_opcode;
10390 if (var->opcode == OP_REGVAR) {
10392 ins->sreg1 = var->dreg;
10394 ins->sreg2 = var->dreg;
10398 g_assert (var->opcode == OP_REGOFFSET);
10400 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10402 g_assert (load_opcode != OP_LOADV_MEMBASE);
10404 if (vreg_to_lvreg [sreg]) {
10405 /* The variable is already loaded to an lvreg */
10406 if (G_UNLIKELY (cfg->verbose_level > 2))
10407 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10409 ins->sreg1 = vreg_to_lvreg [sreg];
10411 ins->sreg2 = vreg_to_lvreg [sreg];
10415 /* Try to fuse the load into the instruction */
10416 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10417 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10418 ins->inst_basereg = var->inst_basereg;
10419 ins->inst_offset = var->inst_offset;
10420 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10421 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10422 ins->sreg2 = var->inst_basereg;
10423 ins->inst_offset = var->inst_offset;
10425 if (MONO_IS_REAL_MOVE (ins)) {
10426 ins->opcode = OP_NOP;
10429 //printf ("%d ", srcindex); mono_print_ins (ins);
10431 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10433 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10434 if (var->dreg == prev_dreg) {
10436 * sreg refers to the value loaded by the load
10437 * emitted below, but we need to use ins->dreg
10438 * since it refers to the store emitted earlier.
10442 vreg_to_lvreg [var->dreg] = sreg;
10443 g_assert (lvregs_len < 1024);
10444 lvregs [lvregs_len ++] = var->dreg;
10453 if (regtype == 'l') {
10454 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10455 mono_bblock_insert_before_ins (bb, ins, load_ins);
10456 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10457 mono_bblock_insert_before_ins (bb, ins, load_ins);
10460 #if SIZEOF_VOID_P == 4
10461 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10463 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10464 mono_bblock_insert_before_ins (bb, ins, load_ins);
10470 if (dest_has_lvreg) {
10471 vreg_to_lvreg [prev_dreg] = ins->dreg;
10472 g_assert (lvregs_len < 1024);
10473 lvregs [lvregs_len ++] = prev_dreg;
10474 dest_has_lvreg = FALSE;
10478 tmp_reg = ins->dreg;
10479 ins->dreg = ins->sreg2;
10480 ins->sreg2 = tmp_reg;
10483 if (MONO_IS_CALL (ins)) {
10484 /* Clear vreg_to_lvreg array */
10485 for (i = 0; i < lvregs_len; i++)
10486 vreg_to_lvreg [lvregs [i]] = 0;
10490 if (cfg->verbose_level > 2)
10491 mono_print_ins_index (1, ins);
10498 * - use 'iadd' instead of 'int_add'
10499 * - handling ovf opcodes: decompose in method_to_ir.
10500 * - unify iregs/fregs
10501 * -> partly done, the missing parts are:
10502 * - a more complete unification would involve unifying the hregs as well, so
10503 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10504 * would no longer map to the machine hregs, so the code generators would need to
10505 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10506 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10507 * fp/non-fp branches speeds it up by about 15%.
10508 * - use sext/zext opcodes instead of shifts
10510 * - get rid of TEMPLOADs if possible and use vregs instead
10511 * - clean up usage of OP_P/OP_ opcodes
10512 * - cleanup usage of DUMMY_USE
10513 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10515 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10516 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10517 * - make sure handle_stack_args () is called before the branch is emitted
10518 * - when the new IR is done, get rid of all unused stuff
10519 * - COMPARE/BEQ as separate instructions or unify them ?
10520 * - keeping them separate allows specialized compare instructions like
10521 * compare_imm, compare_membase
10522 * - most back ends unify fp compare+branch, fp compare+ceq
10523 * - integrate mono_save_args into inline_method
10524 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10525 * - handle long shift opts on 32 bit platforms somehow: they require
10526 * 3 sregs (2 for arg1 and 1 for arg2)
10527 * - make byref a 'normal' type.
10528 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10529 * variable if needed.
10530 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10531 * like inline_method.
10532 * - remove inlining restrictions
10533 * - fix LNEG and enable cfold of INEG
10534 * - generalize x86 optimizations like ldelema as a peephole optimization
10535 * - add store_mem_imm for amd64
10536 * - optimize the loading of the interruption flag in the managed->native wrappers
10537 * - avoid special handling of OP_NOP in passes
10538 * - move code inserting instructions into one function/macro.
10539 * - try a coalescing phase after liveness analysis
10540 * - add float -> vreg conversion + local optimizations on !x86
10541 * - figure out how to handle decomposed branches during optimizations, ie.
10542 * compare+branch, op_jump_table+op_br etc.
10543 * - promote RuntimeXHandles to vregs
10544 * - vtype cleanups:
10545 * - add a NEW_VARLOADA_VREG macro
10546 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10547 * accessing vtype fields.
10548 * - get rid of I8CONST on 64 bit platforms
10549 * - dealing with the increase in code size due to branches created during opcode
10551 * - use extended basic blocks
10552 * - all parts of the JIT
10553 * - handle_global_vregs () && local regalloc
10554 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10555 * - sources of increase in code size:
10558 * - isinst and castclass
10559 * - lvregs not allocated to global registers even if used multiple times
10560 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10562 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10563 * - add all micro optimizations from the old JIT
10564 * - put tree optimizations into the deadce pass
10565 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10566 * specific function.
10567 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10568 * fcompare + branchCC.
10569 * - create a helper function for allocating a stack slot, taking into account
10570 * MONO_CFG_HAS_SPILLUP.
10571 * - merge new GC changes in mini.c.
10573 * - merge the ia64 switch changes.
10574 * - merge the mips conditional changes.
10575 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10576 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10577 * - optimize mono_regstate2_alloc_int/float.
10578 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10579 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10580 * parts of the tree could be separated by other instructions, killing the tree
10581 * arguments, or stores killing loads etc. Also, should we fold loads into other
10582 * instructions if the result of the load is used multiple times ?
10583 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10584 * - LAST MERGE: 108395.
10585 * - when returning vtypes in registers, generate IR and append it to the end of the
10586 * last bb instead of doing it in the epilog.
10587 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10595 - When to decompose opcodes:
10596 - earlier: this makes some optimizations hard to implement, since the low level IR
10597 no longer contains the neccessary information. But it is easier to do.
10598 - later: harder to implement, enables more optimizations.
10599 - Branches inside bblocks:
10600 - created when decomposing complex opcodes.
10601 - branches to another bblock: harmless, but not tracked by the branch
10602 optimizations, so need to branch to a label at the start of the bblock.
10603 - branches to inside the same bblock: very problematic, trips up the local
10604 reg allocator. Can be fixed by spitting the current bblock, but that is a
10605 complex operation, since some local vregs can become global vregs etc.
10606 - Local/global vregs:
10607 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10608 local register allocator.
10609 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10610 structure, created by mono_create_var (). Assigned to hregs or the stack by
10611 the global register allocator.
10612 - When to do optimizations like alu->alu_imm:
10613 - earlier -> saves work later on since the IR will be smaller/simpler
10614 - later -> can work on more instructions
10615 - Handling of valuetypes:
10616 - When a vtype is pushed on the stack, a new temporary is created, an
10617 instruction computing its address (LDADDR) is emitted and pushed on
10618 the stack. Need to optimize cases when the vtype is used immediately as in
10619 argument passing, stloc etc.
10620 - Instead of the to_end stuff in the old JIT, simply call the function handling
10621 the values on the stack before emitting the last instruction of the bb.
10624 #endif /* DISABLE_JIT */