2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
57 #define BRANCH_COST 100
58 #define INLINE_LENGTH_LIMIT 20
59 #define INLINE_FAILURE do {\
60 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
63 #define CHECK_CFG_EXCEPTION do {\
64 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
67 #define METHOD_ACCESS_FAILURE do { \
68 char *method_fname = mono_method_full_name (method, TRUE); \
69 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
70 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
71 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
72 g_free (method_fname); \
73 g_free (cil_method_fname); \
74 goto exception_exit; \
76 #define FIELD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *field_fname = mono_field_full_name (field); \
79 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
80 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (field_fname); \
83 goto exception_exit; \
85 #define GENERIC_SHARING_FAILURE(opcode) do { \
86 if (cfg->generic_sharing_context) { \
87 if (cfg->verbose_level > 2) \
88 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
89 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
90 goto exception_exit; \
94 /* Determine whenever 'ins' represents a load of the 'this' argument */
95 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
97 static int ldind_to_load_membase (int opcode);
98 static int stind_to_store_membase (int opcode);
100 int mono_op_to_op_imm (int opcode);
101 int mono_op_to_op_imm_noemul (int opcode);
103 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
104 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
105 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
107 /* helper methods signature */
108 extern MonoMethodSignature *helper_sig_class_init_trampoline;
109 extern MonoMethodSignature *helper_sig_domain_get;
110 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
111 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
112 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
115 * Instruction metadata
120 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
126 #if SIZEOF_VOID_P == 8
131 /* keep in sync with the enum in mini.h */
134 #include "mini-ops.h"
138 extern GHashTable *jit_icall_name_hash;
140 #define MONO_INIT_VARINFO(vi,id) do { \
141 (vi)->range.first_use.pos.bid = 0xffff; \
147 mono_alloc_ireg (MonoCompile *cfg)
149 return alloc_ireg (cfg);
153 mono_alloc_freg (MonoCompile *cfg)
155 return alloc_freg (cfg);
159 mono_alloc_preg (MonoCompile *cfg)
161 return alloc_preg (cfg);
165 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
167 return alloc_dreg (cfg, stack_type);
171 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 switch (type->type) {
180 case MONO_TYPE_BOOLEAN:
192 case MONO_TYPE_FNPTR:
194 case MONO_TYPE_CLASS:
195 case MONO_TYPE_STRING:
196 case MONO_TYPE_OBJECT:
197 case MONO_TYPE_SZARRAY:
198 case MONO_TYPE_ARRAY:
202 #if SIZEOF_VOID_P == 8
211 case MONO_TYPE_VALUETYPE:
212 if (type->data.klass->enumtype) {
213 type = type->data.klass->enum_basetype;
216 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
219 case MONO_TYPE_TYPEDBYREF:
221 case MONO_TYPE_GENERICINST:
222 type = &type->data.generic_class->container_class->byval_arg;
226 g_assert (cfg->generic_sharing_context);
229 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 mono_print_bb (MonoBasicBlock *bb, const char *msg)
240 printf ("\n%s %d: [IN: ", msg, bb->block_num);
241 for (i = 0; i < bb->in_count; ++i)
242 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
244 for (i = 0; i < bb->out_count; ++i)
245 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
247 for (tree = bb->code; tree; tree = tree->next)
248 mono_print_ins_index (-1, tree);
252 * Can't put this at the beginning, since other files reference stuff from this
257 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
259 #define GET_BBLOCK(cfg,tblock,ip) do { \
260 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
262 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
263 NEW_BBLOCK (cfg, (tblock)); \
264 (tblock)->cil_code = (ip); \
265 ADD_BBLOCK (cfg, (tblock)); \
269 #if defined(__i386__) || defined(__x86_64__)
270 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
271 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
272 (dest)->dreg = alloc_preg ((cfg)); \
273 (dest)->sreg1 = (sr1); \
274 (dest)->sreg2 = (sr2); \
275 (dest)->inst_imm = (imm); \
276 (dest)->backend.shift_amount = (shift); \
277 MONO_ADD_INS ((cfg)->cbb, (dest)); \
281 #if SIZEOF_VOID_P == 8
282 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
283 /* FIXME: Need to add many more cases */ \
284 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
286 int dr = alloc_preg (cfg); \
287 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
288 (ins)->sreg2 = widen->dreg; \
292 #define ADD_WIDEN_OP(ins, arg1, arg2)
295 #define ADD_BINOP(op) do { \
296 MONO_INST_NEW (cfg, ins, (op)); \
298 ins->sreg1 = sp [0]->dreg; \
299 ins->sreg2 = sp [1]->dreg; \
300 type_from_op (ins, sp [0], sp [1]); \
302 /* Have to insert a widening op */ \
303 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
304 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
305 MONO_ADD_INS ((cfg)->cbb, (ins)); \
307 mono_decompose_opcode ((cfg), (ins)); \
310 #define ADD_UNOP(op) do { \
311 MONO_INST_NEW (cfg, ins, (op)); \
313 ins->sreg1 = sp [0]->dreg; \
314 type_from_op (ins, sp [0], NULL); \
316 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
317 MONO_ADD_INS ((cfg)->cbb, (ins)); \
319 mono_decompose_opcode (cfg, ins); \
322 #define ADD_BINCOND(next_block) do { \
325 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
326 cmp->sreg1 = sp [0]->dreg; \
327 cmp->sreg2 = sp [1]->dreg; \
328 type_from_op (cmp, sp [0], sp [1]); \
330 type_from_op (ins, sp [0], sp [1]); \
331 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
332 GET_BBLOCK (cfg, tblock, target); \
333 link_bblock (cfg, bblock, tblock); \
334 ins->inst_true_bb = tblock; \
335 if ((next_block)) { \
336 link_bblock (cfg, bblock, (next_block)); \
337 ins->inst_false_bb = (next_block); \
338 start_new_bblock = 1; \
340 GET_BBLOCK (cfg, tblock, ip); \
341 link_bblock (cfg, bblock, tblock); \
342 ins->inst_false_bb = tblock; \
343 start_new_bblock = 2; \
345 if (sp != stack_start) { \
346 handle_stack_args (cfg, stack_start, sp - stack_start); \
347 CHECK_UNVERIFIABLE (cfg); \
349 MONO_ADD_INS (bblock, cmp); \
350 MONO_ADD_INS (bblock, ins); \
354 * link_bblock: Links two basic blocks
356 * links two basic blocks in the control flow graph, the 'from'
357 * argument is the starting block and the 'to' argument is the block
358 * the control flow ends to after 'from'.
361 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
363 MonoBasicBlock **newa;
367 if (from->cil_code) {
369 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
371 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
374 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
376 printf ("edge from entry to exit\n");
381 for (i = 0; i < from->out_count; ++i) {
382 if (to == from->out_bb [i]) {
388 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
389 for (i = 0; i < from->out_count; ++i) {
390 newa [i] = from->out_bb [i];
398 for (i = 0; i < to->in_count; ++i) {
399 if (from == to->in_bb [i]) {
405 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
406 for (i = 0; i < to->in_count; ++i) {
407 newa [i] = to->in_bb [i];
416 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
418 link_bblock (cfg, from, to);
422 * mono_find_block_region:
424 * We mark each basic block with a region ID. We use that to avoid BB
425 * optimizations when blocks are in different regions.
428 * A region token that encodes where this region is, and information
429 * about the clause owner for this block.
431 * The region encodes the try/catch/filter clause that owns this block
432 * as well as the type. -1 is a special value that represents a block
433 * that is in none of try/catch/filter.
436 mono_find_block_region (MonoCompile *cfg, int offset)
438 MonoMethod *method = cfg->method;
439 MonoMethodHeader *header = mono_method_get_header (method);
440 MonoExceptionClause *clause;
443 /* first search for handlers and filters */
444 for (i = 0; i < header->num_clauses; ++i) {
445 clause = &header->clauses [i];
446 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
447 (offset < (clause->handler_offset)))
448 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
450 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
452 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
453 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
454 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
456 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
460 /* search the try blocks */
461 for (i = 0; i < header->num_clauses; ++i) {
462 clause = &header->clauses [i];
463 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
464 return ((i + 1) << 8) | clause->flags;
471 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
473 MonoMethod *method = cfg->method;
474 MonoMethodHeader *header = mono_method_get_header (method);
475 MonoExceptionClause *clause;
476 MonoBasicBlock *handler;
480 for (i = 0; i < header->num_clauses; ++i) {
481 clause = &header->clauses [i];
482 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
483 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
484 if (clause->flags == type) {
485 handler = cfg->cil_offset_to_bb [clause->handler_offset];
487 res = g_list_append (res, handler);
495 mono_create_spvar_for_region (MonoCompile *cfg, int region)
499 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
504 /* prevent it from being register allocated */
505 var->flags |= MONO_INST_INDIRECT;
507 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
511 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
513 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
517 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
521 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
525 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 * Returns the type used in the eval stack when @type is loaded.
536 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
539 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
543 inst->klass = klass = mono_class_from_mono_type (type);
545 inst->type = STACK_MP;
550 switch (type->type) {
552 inst->type = STACK_INV;
556 case MONO_TYPE_BOOLEAN:
562 inst->type = STACK_I4;
567 case MONO_TYPE_FNPTR:
568 inst->type = STACK_PTR;
570 case MONO_TYPE_CLASS:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_OBJECT:
573 case MONO_TYPE_SZARRAY:
574 case MONO_TYPE_ARRAY:
575 inst->type = STACK_OBJ;
579 inst->type = STACK_I8;
583 inst->type = STACK_R8;
585 case MONO_TYPE_VALUETYPE:
586 if (type->data.klass->enumtype) {
587 type = type->data.klass->enum_basetype;
591 inst->type = STACK_VTYPE;
594 case MONO_TYPE_TYPEDBYREF:
595 inst->klass = mono_defaults.typed_reference_class;
596 inst->type = STACK_VTYPE;
598 case MONO_TYPE_GENERICINST:
599 type = &type->data.generic_class->container_class->byval_arg;
602 case MONO_TYPE_MVAR :
603 /* FIXME: all the arguments must be references for now,
604 * later look inside cfg and see if the arg num is
607 g_assert (cfg->generic_sharing_context);
608 inst->type = STACK_OBJ;
611 g_error ("unknown type 0x%02x in eval stack type", type->type);
616 * The following tables are used to quickly validate the IL code in type_from_op ().
619 bin_num_table [STACK_MAX] [STACK_MAX] = {
620 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
621 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
622 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
623 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
632 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
635 /* reduce the size of this table */
637 bin_int_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 bin_comp_table [STACK_MAX] [STACK_MAX] = {
650 /* Inv i L p F & O vt */
652 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
653 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
654 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
655 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
656 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
657 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
658 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
661 /* reduce the size of this table */
663 shift_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 * Tables to map from the non-specific opcode to the matching
676 * type-specific opcode.
678 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
680 binops_op_map [STACK_MAX] = {
681 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
684 /* handles from CEE_NEG to CEE_CONV_U8 */
686 unops_op_map [STACK_MAX] = {
687 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
690 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
692 ovfops_op_map [STACK_MAX] = {
693 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
696 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
698 ovf2ops_op_map [STACK_MAX] = {
699 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
702 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
704 ovf3ops_op_map [STACK_MAX] = {
705 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
708 /* handles from CEE_BEQ to CEE_BLT_UN */
710 beqops_op_map [STACK_MAX] = {
711 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
714 /* handles from CEE_CEQ to CEE_CLT_UN */
716 ceqops_op_map [STACK_MAX] = {
717 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
721 * Sets ins->type (the type on the eval stack) according to the
722 * type of the opcode and the arguments to it.
723 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
725 * FIXME: this function sets ins->type unconditionally in some cases, but
726 * it should set it to invalid for some types (a conv.x on an object)
729 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
731 switch (ins->opcode) {
738 /* FIXME: check unverifiable args for STACK_MP */
739 ins->type = bin_num_table [src1->type] [src2->type];
740 ins->opcode += binops_op_map [ins->type];
747 ins->type = bin_int_table [src1->type] [src2->type];
748 ins->opcode += binops_op_map [ins->type];
753 ins->type = shift_table [src1->type] [src2->type];
754 ins->opcode += binops_op_map [ins->type];
759 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
760 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
761 ins->opcode = OP_LCOMPARE;
762 else if (src1->type == STACK_R8)
763 ins->opcode = OP_FCOMPARE;
765 ins->opcode = OP_ICOMPARE;
767 case OP_ICOMPARE_IMM:
768 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
769 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
770 ins->opcode = OP_LCOMPARE_IMM;
782 ins->opcode += beqops_op_map [src1->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 ins->opcode += ceqops_op_map [src1->type];
792 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
793 ins->opcode += ceqops_op_map [src1->type];
797 ins->type = neg_table [src1->type];
798 ins->opcode += unops_op_map [ins->type];
801 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
802 ins->type = src1->type;
804 ins->type = STACK_INV;
805 ins->opcode += unops_op_map [ins->type];
811 ins->type = STACK_I4;
812 ins->opcode += unops_op_map [src1->type];
815 ins->type = STACK_R8;
816 switch (src1->type) {
819 ins->opcode = OP_ICONV_TO_R_UN;
822 ins->opcode = OP_LCONV_TO_R_UN;
826 case CEE_CONV_OVF_I1:
827 case CEE_CONV_OVF_U1:
828 case CEE_CONV_OVF_I2:
829 case CEE_CONV_OVF_U2:
830 case CEE_CONV_OVF_I4:
831 case CEE_CONV_OVF_U4:
832 ins->type = STACK_I4;
833 ins->opcode += ovf3ops_op_map [src1->type];
835 case CEE_CONV_OVF_I_UN:
836 case CEE_CONV_OVF_U_UN:
837 ins->type = STACK_PTR;
838 ins->opcode += ovf2ops_op_map [src1->type];
840 case CEE_CONV_OVF_I1_UN:
841 case CEE_CONV_OVF_I2_UN:
842 case CEE_CONV_OVF_I4_UN:
843 case CEE_CONV_OVF_U1_UN:
844 case CEE_CONV_OVF_U2_UN:
845 case CEE_CONV_OVF_U4_UN:
846 ins->type = STACK_I4;
847 ins->opcode += ovf2ops_op_map [src1->type];
850 ins->type = STACK_PTR;
851 switch (src1->type) {
853 ins->opcode = OP_ICONV_TO_U;
857 #if SIZEOF_VOID_P == 8
858 ins->opcode = OP_LCONV_TO_U;
860 ins->opcode = OP_MOVE;
864 ins->opcode = OP_LCONV_TO_U;
867 ins->opcode = OP_FCONV_TO_U;
873 ins->type = STACK_I8;
874 ins->opcode += unops_op_map [src1->type];
876 case CEE_CONV_OVF_I8:
877 case CEE_CONV_OVF_U8:
878 ins->type = STACK_I8;
879 ins->opcode += ovf3ops_op_map [src1->type];
881 case CEE_CONV_OVF_U8_UN:
882 case CEE_CONV_OVF_I8_UN:
883 ins->type = STACK_I8;
884 ins->opcode += ovf2ops_op_map [src1->type];
888 ins->type = STACK_R8;
889 ins->opcode += unops_op_map [src1->type];
892 ins->type = STACK_R8;
896 ins->type = STACK_I4;
897 ins->opcode += ovfops_op_map [src1->type];
902 ins->type = STACK_PTR;
903 ins->opcode += ovfops_op_map [src1->type];
911 ins->type = bin_num_table [src1->type] [src2->type];
912 ins->opcode += ovfops_op_map [src1->type];
913 if (ins->type == STACK_R8)
914 ins->type = STACK_INV;
916 case OP_LOAD_MEMBASE:
917 ins->type = STACK_PTR;
919 case OP_LOADI1_MEMBASE:
920 case OP_LOADU1_MEMBASE:
921 case OP_LOADI2_MEMBASE:
922 case OP_LOADU2_MEMBASE:
923 case OP_LOADI4_MEMBASE:
924 case OP_LOADU4_MEMBASE:
925 ins->type = STACK_PTR;
927 case OP_LOADI8_MEMBASE:
928 ins->type = STACK_I8;
930 case OP_LOADR4_MEMBASE:
931 case OP_LOADR8_MEMBASE:
932 ins->type = STACK_R8;
935 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
939 if (ins->type == STACK_MP)
940 ins->klass = mono_defaults.object_class;
945 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
951 param_table [STACK_MAX] [STACK_MAX] = {
956 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
960 switch (args->type) {
970 for (i = 0; i < sig->param_count; ++i) {
971 switch (args [i].type) {
975 if (!sig->params [i]->byref)
979 if (sig->params [i]->byref)
981 switch (sig->params [i]->type) {
982 case MONO_TYPE_CLASS:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_OBJECT:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
993 if (sig->params [i]->byref)
995 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1004 /*if (!param_table [args [i].type] [sig->params [i]->type])
1012 * When we need a pointer to the current domain many times in a method, we
1013 * call mono_domain_get() once and we store the result in a local variable.
1014 * This function returns the variable that represents the MonoDomain*.
1016 inline static MonoInst *
1017 mono_get_domainvar (MonoCompile *cfg)
1019 if (!cfg->domainvar)
1020 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1021 return cfg->domainvar;
1025 * The got_var contains the address of the Global Offset Table when AOT
1028 inline static MonoInst *
1029 mono_get_got_var (MonoCompile *cfg)
1031 #ifdef MONO_ARCH_NEED_GOT_VAR
1032 if (!cfg->compile_aot)
1034 if (!cfg->got_var) {
1035 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1037 return cfg->got_var;
1044 mono_get_vtable_var (MonoCompile *cfg)
1046 g_assert (cfg->generic_sharing_context);
1048 if (!cfg->rgctx_var) {
1049 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 /* force the var to be stack allocated */
1051 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1054 return cfg->rgctx_var;
1058 type_from_stack_type (MonoInst *ins) {
1059 switch (ins->type) {
1060 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1061 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1062 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1063 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1065 return &ins->klass->this_arg;
1066 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1067 case STACK_VTYPE: return &ins->klass->byval_arg;
1069 g_error ("stack type %d to monotype not handled\n", ins->type);
1074 static G_GNUC_UNUSED int
1075 type_to_stack_type (MonoType *t)
1077 switch (mono_type_get_underlying_type (t)->type) {
1080 case MONO_TYPE_BOOLEAN:
1083 case MONO_TYPE_CHAR:
1090 case MONO_TYPE_FNPTR:
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1104 case MONO_TYPE_VALUETYPE:
1105 case MONO_TYPE_TYPEDBYREF:
1107 case MONO_TYPE_GENERICINST:
1108 if (mono_type_generic_inst_is_valuetype (t))
1114 g_assert_not_reached ();
1121 array_access_to_klass (int opcode)
1125 return mono_defaults.byte_class;
1127 return mono_defaults.uint16_class;
1130 return mono_defaults.int_class;
1133 return mono_defaults.sbyte_class;
1136 return mono_defaults.int16_class;
1139 return mono_defaults.int32_class;
1141 return mono_defaults.uint32_class;
1144 return mono_defaults.int64_class;
1147 return mono_defaults.single_class;
1150 return mono_defaults.double_class;
1151 case CEE_LDELEM_REF:
1152 case CEE_STELEM_REF:
1153 return mono_defaults.object_class;
1155 g_assert_not_reached ();
1161 * We try to share variables when possible
1164 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1169 /* inlining can result in deeper stacks */
1170 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1171 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1173 pos = ins->type - 1 + slot * STACK_MAX;
1175 switch (ins->type) {
1182 if ((vnum = cfg->intvars [pos]))
1183 return cfg->varinfo [vnum];
1184 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1185 cfg->intvars [pos] = res->inst_c0;
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1197 * Don't use this if a generic_context is set, since that means AOT can't
1198 * look up the method using just the image+token.
1199 * table == 0 means this is a reference made from a wrapper.
1201 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1202 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1203 jump_info_token->image = image;
1204 jump_info_token->token = token;
1205 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1210 * This function is called to handle items that are left on the evaluation stack
1211 * at basic block boundaries. What happens is that we save the values to local variables
1212 * and we reload them later when first entering the target basic block (with the
1213 * handle_loaded_temps () function).
1214 * A single joint point will use the same variables (stored in the array bb->out_stack or
1215 * bb->in_stack, if the basic block is before or after the joint point).
1217 * This function needs to be called _before_ emitting the last instruction of
1218 * the bb (i.e. before emitting a branch).
1219 * If the stack merge fails at a join point, cfg->unverifiable is set.
1222 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1225 MonoBasicBlock *bb = cfg->cbb;
1226 MonoBasicBlock *outb;
1227 MonoInst *inst, **locals;
1232 if (cfg->verbose_level > 3)
1233 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1234 if (!bb->out_scount) {
1235 bb->out_scount = count;
1236 //printf ("bblock %d has out:", bb->block_num);
1238 for (i = 0; i < bb->out_count; ++i) {
1239 outb = bb->out_bb [i];
1240 /* exception handlers are linked, but they should not be considered for stack args */
1241 if (outb->flags & BB_EXCEPTION_HANDLER)
1243 //printf (" %d", outb->block_num);
1244 if (outb->in_stack) {
1246 bb->out_stack = outb->in_stack;
1252 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1253 for (i = 0; i < count; ++i) {
1255 * try to reuse temps already allocated for this purpouse, if they occupy the same
1256 * stack slot and if they are of the same type.
1257 * This won't cause conflicts since if 'local' is used to
1258 * store one of the values in the in_stack of a bblock, then
1259 * the same variable will be used for the same outgoing stack
1261 * This doesn't work when inlining methods, since the bblocks
1262 * in the inlined methods do not inherit their in_stack from
1263 * the bblock they are inlined to. See bug #58863 for an
1266 if (cfg->inlined_method)
1267 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1269 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1274 for (i = 0; i < bb->out_count; ++i) {
1275 outb = bb->out_bb [i];
1276 /* exception handlers are linked, but they should not be considered for stack args */
1277 if (outb->flags & BB_EXCEPTION_HANDLER)
1279 if (outb->in_scount) {
1280 if (outb->in_scount != bb->out_scount) {
1281 cfg->unverifiable = TRUE;
1284 continue; /* check they are the same locals */
1286 outb->in_scount = count;
1287 outb->in_stack = bb->out_stack;
1290 locals = bb->out_stack;
1292 for (i = 0; i < count; ++i) {
1293 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1294 inst->cil_code = sp [i]->cil_code;
1295 sp [i] = locals [i];
1296 if (cfg->verbose_level > 3)
1297 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1301 * It is possible that the out bblocks already have in_stack assigned, and
1302 * the in_stacks differ. In this case, we will store to all the different
1309 /* Find a bblock which has a different in_stack */
1311 while (bindex < bb->out_count) {
1312 outb = bb->out_bb [bindex];
1313 /* exception handlers are linked, but they should not be considered for stack args */
1314 if (outb->flags & BB_EXCEPTION_HANDLER) {
1318 if (outb->in_stack != locals) {
1319 for (i = 0; i < count; ++i) {
1320 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1321 inst->cil_code = sp [i]->cil_code;
1322 sp [i] = locals [i];
1323 if (cfg->verbose_level > 3)
1324 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1326 locals = outb->in_stack;
1335 /* Emit code which loads interface_offsets [klass->interface_id]
1336 * The array is stored in memory before vtable.
1339 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1341 if (cfg->compile_aot) {
1342 int ioffset_reg = alloc_preg (cfg);
1343 int iid_reg = alloc_preg (cfg);
1345 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1346 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1355 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1356 * stored in "klass_reg" implements the interface "klass".
1359 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1361 int ibitmap_reg = alloc_preg (cfg);
1362 int ibitmap_byte_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1366 if (cfg->compile_aot) {
1367 int iid_reg = alloc_preg (cfg);
1368 int shifted_iid_reg = alloc_preg (cfg);
1369 int ibitmap_byte_address_reg = alloc_preg (cfg);
1370 int masked_iid_reg = alloc_preg (cfg);
1371 int iid_one_bit_reg = alloc_preg (cfg);
1372 int iid_bit_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1378 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1388 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1389 * stored in "vtable_reg" implements the interface "klass".
1392 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1394 int ibitmap_reg = alloc_preg (cfg);
1395 int ibitmap_byte_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1399 if (cfg->compile_aot) {
1400 int iid_reg = alloc_preg (cfg);
1401 int shifted_iid_reg = alloc_preg (cfg);
1402 int ibitmap_byte_address_reg = alloc_preg (cfg);
1403 int masked_iid_reg = alloc_preg (cfg);
1404 int iid_one_bit_reg = alloc_preg (cfg);
1405 int iid_bit_reg = alloc_preg (cfg);
1406 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1411 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which checks whenever the interface id of @klass is smaller than
1422 * than the value given by max_iid_reg.
1425 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1426 MonoBasicBlock *false_target)
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1438 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1441 /* Same as above, but obtains max_iid from a vtable */
1443 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 int max_iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1449 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1452 /* Same as above, but obtains max_iid from a klass */
1454 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 int max_iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1460 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1464 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1466 int idepth_reg = alloc_preg (cfg);
1467 int stypes_reg = alloc_preg (cfg);
1468 int stype = alloc_preg (cfg);
1470 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1477 if (cfg->compile_aot) {
1478 int const_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1482 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1488 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int intf_reg = alloc_preg (cfg);
1492 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1493 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1502 * Variant of the above that takes a register to the class, not the vtable.
1505 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1507 int intf_bit_reg = alloc_preg (cfg);
1509 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1510 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1519 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1521 if (cfg->compile_aot) {
1522 int const_reg = alloc_preg (cfg);
1523 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1528 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1532 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1534 if (cfg->compile_aot) {
1535 int const_reg = alloc_preg (cfg);
1536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1545 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1548 int rank_reg = alloc_preg (cfg);
1549 int eclass_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1556 if (klass->cast_class == mono_defaults.object_class) {
1557 int parent_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1559 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1560 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1563 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1564 } else if (klass->cast_class == mono_defaults.enum_class) {
1565 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1569 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1570 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1574 /* Check that the object is a vector too */
1575 int bounds_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1581 int idepth_reg = alloc_preg (cfg);
1582 int stypes_reg = alloc_preg (cfg);
1583 int stype = alloc_preg (cfg);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1592 mini_emit_class_check (cfg, stype, klass);
1597 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1601 g_assert (val == 0);
1606 if ((size <= 4) && (size <= align)) {
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1612 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1617 #if SIZEOF_VOID_P == 8
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1625 val_reg = alloc_preg (cfg);
1627 if (sizeof (gpointer) == 8)
1628 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1630 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1633 /* This could be optimized further if neccesary */
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1642 #if !NO_UNALIGNED_ACCESS
1643 if (sizeof (gpointer) == 8) {
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1674 #endif /* DISABLE_JIT */
1677 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1685 /* This could be optimized further if neccesary */
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1696 #if !NO_UNALIGNED_ACCESS
1697 if (sizeof (gpointer) == 8) {
1699 cur_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1738 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1741 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1744 type = mini_get_basic_type_from_generic (gsctx, type);
1745 switch (type->type) {
1746 case MONO_TYPE_VOID:
1747 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1750 case MONO_TYPE_BOOLEAN:
1753 case MONO_TYPE_CHAR:
1756 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1760 case MONO_TYPE_FNPTR:
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 case MONO_TYPE_CLASS:
1763 case MONO_TYPE_STRING:
1764 case MONO_TYPE_OBJECT:
1765 case MONO_TYPE_SZARRAY:
1766 case MONO_TYPE_ARRAY:
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1773 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1774 case MONO_TYPE_VALUETYPE:
1775 if (type->data.klass->enumtype) {
1776 type = type->data.klass->enum_basetype;
1779 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1780 case MONO_TYPE_TYPEDBYREF:
1781 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1782 case MONO_TYPE_GENERICINST:
1783 type = &type->data.generic_class->container_class->byval_arg;
1786 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 * target_type_is_incompatible:
1793 * @cfg: MonoCompile context
1795 * Check that the item @arg on the evaluation stack can be stored
1796 * in the target type (can be a local, or field, etc).
1797 * The cfg arg can be used to check if we need verification or just
1800 * Returns: non-0 value if arg can't be stored on a target.
1803 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1805 MonoType *simple_type;
1808 if (target->byref) {
1809 /* FIXME: check that the pointed to types match */
1810 if (arg->type == STACK_MP)
1811 return arg->klass != mono_class_from_mono_type (target);
1812 if (arg->type == STACK_PTR)
1817 simple_type = mono_type_get_underlying_type (target);
1818 switch (simple_type->type) {
1819 case MONO_TYPE_VOID:
1823 case MONO_TYPE_BOOLEAN:
1826 case MONO_TYPE_CHAR:
1829 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1833 /* STACK_MP is needed when setting pinned locals */
1834 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 case MONO_TYPE_FNPTR:
1840 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1843 case MONO_TYPE_CLASS:
1844 case MONO_TYPE_STRING:
1845 case MONO_TYPE_OBJECT:
1846 case MONO_TYPE_SZARRAY:
1847 case MONO_TYPE_ARRAY:
1848 if (arg->type != STACK_OBJ)
1850 /* FIXME: check type compatibility */
1854 if (arg->type != STACK_I8)
1859 if (arg->type != STACK_R8)
1862 case MONO_TYPE_VALUETYPE:
1863 if (arg->type != STACK_VTYPE)
1865 klass = mono_class_from_mono_type (simple_type);
1866 if (klass != arg->klass)
1869 case MONO_TYPE_TYPEDBYREF:
1870 if (arg->type != STACK_VTYPE)
1872 klass = mono_class_from_mono_type (simple_type);
1873 if (klass != arg->klass)
1876 case MONO_TYPE_GENERICINST:
1877 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1878 if (arg->type != STACK_VTYPE)
1880 klass = mono_class_from_mono_type (simple_type);
1881 if (klass != arg->klass)
1885 if (arg->type != STACK_OBJ)
1887 /* FIXME: check type compatibility */
1891 case MONO_TYPE_MVAR:
1892 /* FIXME: all the arguments must be references for now,
1893 * later look inside cfg and see if the arg num is
1894 * really a reference
1896 g_assert (cfg->generic_sharing_context);
1897 if (arg->type != STACK_OBJ)
1901 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 * Prepare arguments for passing to a function call.
1908 * Return a non-zero value if the arguments can't be passed to the given
1910 * The type checks are not yet complete and some conversions may need
1911 * casts on 32 or 64 bit architectures.
1913 * FIXME: implement this using target_type_is_incompatible ()
1916 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1918 MonoType *simple_type;
1922 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1926 for (i = 0; i < sig->param_count; ++i) {
1927 if (sig->params [i]->byref) {
1928 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1932 simple_type = sig->params [i];
1933 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1935 switch (simple_type->type) {
1936 case MONO_TYPE_VOID:
1941 case MONO_TYPE_BOOLEAN:
1944 case MONO_TYPE_CHAR:
1947 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1953 case MONO_TYPE_FNPTR:
1954 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1957 case MONO_TYPE_CLASS:
1958 case MONO_TYPE_STRING:
1959 case MONO_TYPE_OBJECT:
1960 case MONO_TYPE_SZARRAY:
1961 case MONO_TYPE_ARRAY:
1962 if (args [i]->type != STACK_OBJ)
1967 if (args [i]->type != STACK_I8)
1972 if (args [i]->type != STACK_R8)
1975 case MONO_TYPE_VALUETYPE:
1976 if (simple_type->data.klass->enumtype) {
1977 simple_type = simple_type->data.klass->enum_basetype;
1980 if (args [i]->type != STACK_VTYPE)
1983 case MONO_TYPE_TYPEDBYREF:
1984 if (args [i]->type != STACK_VTYPE)
1987 case MONO_TYPE_GENERICINST:
1988 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1992 g_error ("unknown type 0x%02x in check_call_signature",
2000 callvirt_to_call (int opcode)
2005 case OP_VOIDCALLVIRT:
2014 g_assert_not_reached ();
2021 callvirt_to_call_membase (int opcode)
2025 return OP_CALL_MEMBASE;
2026 case OP_VOIDCALLVIRT:
2027 return OP_VOIDCALL_MEMBASE;
2029 return OP_FCALL_MEMBASE;
2031 return OP_LCALL_MEMBASE;
2033 return OP_VCALL_MEMBASE;
2035 g_assert_not_reached ();
2041 #ifdef MONO_ARCH_HAVE_IMT
2043 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2045 #ifdef MONO_ARCH_IMT_REG
2046 int method_reg = alloc_preg (cfg);
2049 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2050 } else if (cfg->compile_aot) {
2051 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2054 MONO_INST_NEW (cfg, ins, OP_PCONST);
2055 ins->inst_p0 = call->method;
2056 ins->dreg = method_reg;
2057 MONO_ADD_INS (cfg->cbb, ins);
2060 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2062 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 static MonoJumpInfo *
2068 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2070 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2074 ji->data.target = target;
2079 inline static MonoInst*
2080 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2082 inline static MonoCallInst *
2083 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2084 MonoInst **args, int calli, int virtual)
2087 #ifdef MONO_ARCH_SOFT_FLOAT
2091 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2094 call->signature = sig;
2096 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2098 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2099 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2102 temp->backend.is_pinvoke = sig->pinvoke;
2105 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2106 * address of return value to increase optimization opportunities.
2107 * Before vtype decomposition, the dreg of the call ins itself represents the
2108 * fact the call modifies the return value. After decomposition, the call will
2109 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2110 * will be transformed into an LDADDR.
2112 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2113 loada->dreg = alloc_preg (cfg);
2114 loada->inst_p0 = temp;
2115 /* We reference the call too since call->dreg could change during optimization */
2116 loada->inst_p1 = call;
2117 MONO_ADD_INS (cfg->cbb, loada);
2119 call->inst.dreg = temp->dreg;
2121 call->vret_var = loada;
2122 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2123 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2127 * If the call has a float argument, we would need to do an r8->r4 conversion using
2128 * an icall, but that cannot be done during the call sequence since it would clobber
2129 * the call registers + the stack. So we do it before emitting the call.
2131 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 MonoInst *in = call->args [i];
2135 if (i >= sig->hasthis)
2136 t = sig->params [i - sig->hasthis];
2138 t = &mono_defaults.int_class->byval_arg;
2139 t = mono_type_get_underlying_type (t);
2141 if (!t->byref && t->type == MONO_TYPE_R4) {
2142 MonoInst *iargs [1];
2146 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2148 /* The result will be in an int vreg */
2149 call->args [i] = conv;
2154 mono_arch_emit_call (cfg, call);
2156 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2157 cfg->flags |= MONO_CFG_HAS_CALLS;
2162 inline static MonoInst*
2163 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2165 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2167 call->inst.sreg1 = addr->dreg;
2169 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2171 return (MonoInst*)call;
2174 inline static MonoInst*
2175 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2177 #ifdef MONO_ARCH_RGCTX_REG
2182 rgctx_reg = mono_alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2185 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2187 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2188 cfg->uses_rgctx_reg = TRUE;
2190 return (MonoInst*)call;
2192 g_assert_not_reached ();
2198 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2199 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2201 gboolean virtual = this != NULL;
2202 gboolean enable_for_aot = TRUE;
2205 if (method->string_ctor) {
2206 /* Create the real signature */
2207 /* FIXME: Cache these */
2208 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2209 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2214 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2216 if (this && sig->hasthis &&
2217 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2218 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2219 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2221 call->method = method;
2223 call->inst.flags |= MONO_INST_HAS_METHOD;
2224 call->inst.inst_left = this;
2227 int vtable_reg, slot_reg, this_reg;
2229 this_reg = this->dreg;
2231 if ((!cfg->compile_aot || enable_for_aot) &&
2232 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2233 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2234 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2236 * the method is not virtual, we just need to ensure this is not null
2237 * and then we can call the method directly.
2239 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2240 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2243 if (!method->string_ctor) {
2244 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2245 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2246 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2249 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2251 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2253 return (MonoInst*)call;
2256 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2257 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2258 /* Make a call to delegate->invoke_impl */
2259 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2260 call->inst.inst_basereg = this_reg;
2261 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2262 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2264 return (MonoInst*)call;
2268 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2269 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2270 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2272 * the method is virtual, but we can statically dispatch since either
2273 * it's class or the method itself are sealed.
2274 * But first we need to ensure it's not a null reference.
2276 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2277 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2278 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2280 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2281 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2283 return (MonoInst*)call;
2286 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2288 vtable_reg = alloc_preg (cfg);
2289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2290 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2292 #ifdef MONO_ARCH_HAVE_IMT
2294 guint32 imt_slot = mono_method_get_imt_slot (method);
2295 emit_imt_argument (cfg, call, imt_arg);
2296 slot_reg = vtable_reg;
2297 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2300 if (slot_reg == -1) {
2301 slot_reg = alloc_preg (cfg);
2302 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2303 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2306 slot_reg = vtable_reg;
2307 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2308 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2309 #ifdef MONO_ARCH_HAVE_IMT
2311 g_assert (mono_method_signature (method)->generic_param_count);
2312 emit_imt_argument (cfg, call, imt_arg);
2317 call->inst.sreg1 = slot_reg;
2318 call->virtual = TRUE;
2321 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2323 return (MonoInst*)call;
2327 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2328 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2335 #ifdef MONO_ARCH_RGCTX_REG
2336 rgctx_reg = mono_alloc_preg (cfg);
2337 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2342 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2344 call = (MonoCallInst*)ins;
2346 #ifdef MONO_ARCH_RGCTX_REG
2347 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2348 cfg->uses_rgctx_reg = TRUE;
2357 static inline MonoInst*
2358 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2360 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2364 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2371 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2374 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2376 return (MonoInst*)call;
2379 inline static MonoInst*
2380 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2382 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2386 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2390 * mono_emit_abs_call:
2392 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2394 inline static MonoInst*
2395 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2396 MonoMethodSignature *sig, MonoInst **args)
2398 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2402 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2405 if (cfg->abs_patches == NULL)
2406 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2407 g_hash_table_insert (cfg->abs_patches, ji, ji);
2408 ins = mono_emit_native_call (cfg, ji, sig, args);
2409 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2414 get_memcpy_method (void)
2416 static MonoMethod *memcpy_method = NULL;
2417 if (!memcpy_method) {
2418 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2420 g_error ("Old corlib found. Install a new one");
2422 return memcpy_method;
2426 * Emit code to copy a valuetype of type @klass whose address is stored in
2427 * @src->dreg to memory whose address is stored at @dest->dreg.
2430 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2432 MonoInst *iargs [3];
2435 MonoMethod *memcpy_method;
2439 * This check breaks with spilled vars... need to handle it during verification anyway.
2440 * g_assert (klass && klass == src->klass && klass == dest->klass);
2444 n = mono_class_native_size (klass, &align);
2446 n = mono_class_value_size (klass, &align);
2448 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2449 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2450 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2454 EMIT_NEW_ICONST (cfg, iargs [2], n);
2456 memcpy_method = get_memcpy_method ();
2457 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2462 get_memset_method (void)
2464 static MonoMethod *memset_method = NULL;
2465 if (!memset_method) {
2466 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2468 g_error ("Old corlib found. Install a new one");
2470 return memset_method;
2474 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2476 MonoInst *iargs [3];
2479 MonoMethod *memset_method;
2481 /* FIXME: Optimize this for the case when dest is an LDADDR */
2483 mono_class_init (klass);
2484 n = mono_class_value_size (klass, &align);
2486 if (n <= sizeof (gpointer) * 5) {
2487 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2490 memset_method = get_memset_method ();
2492 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2493 EMIT_NEW_ICONST (cfg, iargs [2], n);
2494 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2499 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2501 MonoInst *this = NULL;
2503 g_assert (cfg->generic_sharing_context);
2505 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2506 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2507 !method->klass->valuetype)
2508 EMIT_NEW_ARGLOAD (cfg, this, 0);
2510 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2511 MonoInst *mrgctx_loc, *mrgctx_var;
2514 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2516 mrgctx_loc = mono_get_vtable_var (cfg);
2517 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2520 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2521 MonoInst *vtable_loc, *vtable_var;
2525 vtable_loc = mono_get_vtable_var (cfg);
2526 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2528 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2529 MonoInst *mrgctx_var = vtable_var;
2532 vtable_reg = alloc_preg (cfg);
2533 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2534 vtable_var->type = STACK_PTR;
2540 int vtable_reg, res_reg;
2542 vtable_reg = alloc_preg (cfg);
2543 res_reg = alloc_preg (cfg);
2544 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2549 static MonoJumpInfoRgctxEntry *
2550 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2552 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2553 res->method = method;
2554 res->in_mrgctx = in_mrgctx;
2555 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2556 res->data->type = patch_type;
2557 res->data->data.target = patch_data;
2558 res->info_type = info_type;
2563 static inline MonoInst*
2564 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2566 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2570 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2571 MonoClass *klass, int rgctx_type)
2573 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2574 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2576 return emit_rgctx_fetch (cfg, rgctx, entry);
2580 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2581 MonoMethod *cmethod, int rgctx_type)
2583 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2584 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2586 return emit_rgctx_fetch (cfg, rgctx, entry);
2590 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2591 MonoClassField *field, int rgctx_type)
2593 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2594 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2596 return emit_rgctx_fetch (cfg, rgctx, entry);
2600 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2602 int vtable_reg = alloc_preg (cfg);
2603 int context_used = 0;
2605 if (cfg->generic_sharing_context)
2606 context_used = mono_class_check_context_used (array_class);
2608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2610 if (cfg->opt & MONO_OPT_SHARED) {
2611 int class_reg = alloc_preg (cfg);
2612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2613 if (cfg->compile_aot) {
2614 int klass_reg = alloc_preg (cfg);
2615 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2616 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2620 } else if (context_used) {
2621 MonoInst *vtable_ins;
2623 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2624 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2626 if (cfg->compile_aot) {
2627 int vt_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2635 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2639 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2641 if (mini_get_debug_options ()->better_cast_details) {
2642 int to_klass_reg = alloc_preg (cfg);
2643 int vtable_reg = alloc_preg (cfg);
2644 int klass_reg = alloc_preg (cfg);
2645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2648 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2652 MONO_ADD_INS (cfg->cbb, tls_get);
2653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2657 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2663 reset_cast_details (MonoCompile *cfg)
2665 /* Reset the variables holding the cast details */
2666 if (mini_get_debug_options ()->better_cast_details) {
2667 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2669 MONO_ADD_INS (cfg->cbb, tls_get);
2670 /* It is enough to reset the from field */
2671 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2676 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2677 * generic code is generated.
2680 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2682 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2685 MonoInst *rgctx, *addr;
2687 /* FIXME: What if the class is shared? We might not
2688 have to get the address of the method from the
2690 addr = emit_get_rgctx_method (cfg, context_used, method,
2691 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2693 rgctx = emit_get_rgctx (cfg, method, context_used);
2695 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2697 return mono_emit_method_call (cfg, method, &val, NULL);
2702 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2706 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2707 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2708 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2709 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2711 obj_reg = sp [0]->dreg;
2712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2715 /* FIXME: generics */
2716 g_assert (klass->rank == 0);
2719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2720 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2726 MonoInst *element_class;
2728 /* This assertion is from the unboxcast insn */
2729 g_assert (klass->rank == 0);
2731 element_class = emit_get_rgctx_klass (cfg, context_used,
2732 klass->element_class, MONO_RGCTX_INFO_KLASS);
2734 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2735 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2737 save_cast_details (cfg, klass->element_class, obj_reg);
2738 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2739 reset_cast_details (cfg);
2742 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2743 MONO_ADD_INS (cfg->cbb, add);
2744 add->type = STACK_MP;
2751 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2753 MonoInst *iargs [2];
2756 if (cfg->opt & MONO_OPT_SHARED) {
2757 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2758 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2760 alloc_ftn = mono_object_new;
2761 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2762 /* This happens often in argument checking code, eg. throw new FooException... */
2763 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2764 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2765 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2767 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2768 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2771 if (managed_alloc) {
2772 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2773 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2775 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2777 guint32 lw = vtable->klass->instance_size;
2778 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2779 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2780 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2783 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2787 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2791 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2794 MonoInst *iargs [2];
2795 MonoMethod *managed_alloc = NULL;
2799 FIXME: we cannot get managed_alloc here because we can't get
2800 the class's vtable (because it's not a closed class)
2802 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2803 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2806 if (cfg->opt & MONO_OPT_SHARED) {
2807 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2808 iargs [1] = data_inst;
2809 alloc_ftn = mono_object_new;
2811 if (managed_alloc) {
2812 iargs [0] = data_inst;
2813 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2816 iargs [0] = data_inst;
2817 alloc_ftn = mono_object_new_specific;
2820 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2824 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2826 MonoInst *alloc, *ins;
2828 if (mono_class_is_nullable (klass)) {
2829 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2830 return mono_emit_method_call (cfg, method, &val, NULL);
2833 alloc = handle_alloc (cfg, klass, TRUE);
2835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2841 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2843 MonoInst *alloc, *ins;
2845 if (mono_class_is_nullable (klass)) {
2846 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2847 /* FIXME: What if the class is shared? We might not
2848 have to get the method address from the RGCTX. */
2849 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2850 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2851 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2853 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2855 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2857 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2864 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2866 MonoBasicBlock *is_null_bb;
2867 int obj_reg = src->dreg;
2868 int vtable_reg = alloc_preg (cfg);
2870 NEW_BBLOCK (cfg, is_null_bb);
2872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2875 save_cast_details (cfg, klass, obj_reg);
2877 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2878 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2879 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2881 int klass_reg = alloc_preg (cfg);
2883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2885 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2886 /* the remoting code is broken, access the class for now */
2888 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2894 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2897 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2901 MONO_START_BB (cfg, is_null_bb);
2903 reset_cast_details (cfg);
2909 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2912 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2913 int obj_reg = src->dreg;
2914 int vtable_reg = alloc_preg (cfg);
2915 int res_reg = alloc_preg (cfg);
2917 NEW_BBLOCK (cfg, is_null_bb);
2918 NEW_BBLOCK (cfg, false_bb);
2919 NEW_BBLOCK (cfg, end_bb);
2921 /* Do the assignment at the beginning, so the other assignment can be if converted */
2922 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2923 ins->type = STACK_OBJ;
2926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2927 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2929 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2931 /* the is_null_bb target simply copies the input register to the output */
2932 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2934 int klass_reg = alloc_preg (cfg);
2936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2939 int rank_reg = alloc_preg (cfg);
2940 int eclass_reg = alloc_preg (cfg);
2942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2946 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2947 if (klass->cast_class == mono_defaults.object_class) {
2948 int parent_reg = alloc_preg (cfg);
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2950 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2951 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2953 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2954 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2955 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2957 } else if (klass->cast_class == mono_defaults.enum_class) {
2958 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2960 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2961 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2963 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2964 /* Check that the object is a vector too */
2965 int bounds_reg = alloc_preg (cfg);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2968 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2971 /* the is_null_bb target simply copies the input register to the output */
2972 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2974 } else if (mono_class_is_nullable (klass)) {
2975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2976 /* the is_null_bb target simply copies the input register to the output */
2977 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2979 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2980 /* the remoting code is broken, access the class for now */
2982 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2985 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2991 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2992 /* the is_null_bb target simply copies the input register to the output */
2993 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2998 MONO_START_BB (cfg, false_bb);
3000 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3003 MONO_START_BB (cfg, is_null_bb);
3005 MONO_START_BB (cfg, end_bb);
3011 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3013 /* This opcode takes as input an object reference and a class, and returns:
3014 0) if the object is an instance of the class,
3015 1) if the object is not instance of the class,
3016 2) if the object is a proxy whose type cannot be determined */
3019 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3020 int obj_reg = src->dreg;
3021 int dreg = alloc_ireg (cfg);
3023 int klass_reg = alloc_preg (cfg);
3025 NEW_BBLOCK (cfg, true_bb);
3026 NEW_BBLOCK (cfg, false_bb);
3027 NEW_BBLOCK (cfg, false2_bb);
3028 NEW_BBLOCK (cfg, end_bb);
3029 NEW_BBLOCK (cfg, no_proxy_bb);
3031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3034 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3035 NEW_BBLOCK (cfg, interface_fail_bb);
3037 tmp_reg = alloc_preg (cfg);
3038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3039 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3040 MONO_START_BB (cfg, interface_fail_bb);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3043 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3045 tmp_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3047 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3048 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3050 tmp_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3054 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3055 tmp_reg = alloc_preg (cfg);
3056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3057 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3059 tmp_reg = alloc_preg (cfg);
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3064 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3067 MONO_START_BB (cfg, no_proxy_bb);
3069 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3072 MONO_START_BB (cfg, false_bb);
3074 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3077 MONO_START_BB (cfg, false2_bb);
3079 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3082 MONO_START_BB (cfg, true_bb);
3084 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3086 MONO_START_BB (cfg, end_bb);
3089 MONO_INST_NEW (cfg, ins, OP_ICONST);
3091 ins->type = STACK_I4;
3097 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3099 /* This opcode takes as input an object reference and a class, and returns:
3100 0) if the object is an instance of the class,
3101 1) if the object is a proxy whose type cannot be determined
3102 an InvalidCastException exception is thrown otherwhise*/
3105 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3106 int obj_reg = src->dreg;
3107 int dreg = alloc_ireg (cfg);
3108 int tmp_reg = alloc_preg (cfg);
3109 int klass_reg = alloc_preg (cfg);
3111 NEW_BBLOCK (cfg, end_bb);
3112 NEW_BBLOCK (cfg, ok_result_bb);
3114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3117 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3118 NEW_BBLOCK (cfg, interface_fail_bb);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3121 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3122 MONO_START_BB (cfg, interface_fail_bb);
3123 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3125 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3127 tmp_reg = alloc_preg (cfg);
3128 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3130 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3132 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3133 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3136 NEW_BBLOCK (cfg, no_proxy_bb);
3138 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3140 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3142 tmp_reg = alloc_preg (cfg);
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3146 tmp_reg = alloc_preg (cfg);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3151 NEW_BBLOCK (cfg, fail_1_bb);
3153 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3155 MONO_START_BB (cfg, fail_1_bb);
3157 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3160 MONO_START_BB (cfg, no_proxy_bb);
3162 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3165 MONO_START_BB (cfg, ok_result_bb);
3167 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3169 MONO_START_BB (cfg, end_bb);
3172 MONO_INST_NEW (cfg, ins, OP_ICONST);
3174 ins->type = STACK_I4;
3179 static G_GNUC_UNUSED MonoInst*
3180 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3182 gpointer *trampoline;
3183 MonoInst *obj, *method_ins, *tramp_ins;
3187 obj = handle_alloc (cfg, klass, FALSE);
3189 /* Inline the contents of mono_delegate_ctor */
3191 /* Set target field */
3192 /* Optimize away setting of NULL target */
3193 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3194 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3196 /* Set method field */
3197 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3201 * To avoid looking up the compiled code belonging to the target method
3202 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3203 * store it, and we fill it after the method has been compiled.
3205 if (!cfg->compile_aot && !method->dynamic) {
3206 MonoInst *code_slot_ins;
3208 domain = mono_domain_get ();
3209 mono_domain_lock (domain);
3210 if (!domain_jit_info (domain)->method_code_hash)
3211 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3212 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3214 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3215 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3217 mono_domain_unlock (domain);
3219 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3223 /* Set invoke_impl field */
3224 if (cfg->compile_aot) {
3225 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3227 trampoline = mono_create_delegate_trampoline (klass);
3228 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3232 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3238 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3240 MonoJitICallInfo *info;
3242 /* Need to register the icall so it gets an icall wrapper */
3243 info = mono_get_array_new_va_icall (rank);
3245 cfg->flags |= MONO_CFG_HAS_VARARGS;
3247 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3248 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3252 mono_emit_load_got_addr (MonoCompile *cfg)
3254 MonoInst *getaddr, *dummy_use;
3256 if (!cfg->got_var || cfg->got_var_allocated)
3259 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3260 getaddr->dreg = cfg->got_var->dreg;
3262 /* Add it to the start of the first bblock */
3263 if (cfg->bb_entry->code) {
3264 getaddr->next = cfg->bb_entry->code;
3265 cfg->bb_entry->code = getaddr;
3268 MONO_ADD_INS (cfg->bb_entry, getaddr);
3270 cfg->got_var_allocated = TRUE;
3273 * Add a dummy use to keep the got_var alive, since real uses might
3274 * only be generated by the back ends.
3275 * Add it to end_bblock, so the variable's lifetime covers the whole
3277 * It would be better to make the usage of the got var explicit in all
3278 * cases when the backend needs it (i.e. calls, throw etc.), so this
3279 * wouldn't be needed.
3281 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3282 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3285 static int inline_limit;
3286 static gboolean inline_limit_inited;
3289 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3291 MonoMethodHeader *header = mono_method_get_header (method);
3293 #ifdef MONO_ARCH_SOFT_FLOAT
3294 MonoMethodSignature *sig = mono_method_signature (method);
3298 if (cfg->generic_sharing_context)
3301 #ifdef MONO_ARCH_HAVE_LMF_OPS
3302 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3303 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3304 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3308 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3309 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3310 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3311 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3312 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3313 (method->klass->marshalbyref) ||
3314 !header || header->num_clauses)
3317 /* also consider num_locals? */
3318 /* Do the size check early to avoid creating vtables */
3319 if (!inline_limit_inited) {
3320 if (getenv ("MONO_INLINELIMIT"))
3321 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3323 inline_limit = INLINE_LENGTH_LIMIT;
3324 inline_limit_inited = TRUE;
3326 if (header->code_size >= inline_limit)
3330 * if we can initialize the class of the method right away, we do,
3331 * otherwise we don't allow inlining if the class needs initialization,
3332 * since it would mean inserting a call to mono_runtime_class_init()
3333 * inside the inlined code
3335 if (!(cfg->opt & MONO_OPT_SHARED)) {
3336 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3337 if (cfg->run_cctors && method->klass->has_cctor) {
3338 if (!method->klass->runtime_info)
3339 /* No vtable created yet */
3341 vtable = mono_class_vtable (cfg->domain, method->klass);
3344 /* This makes so that inline cannot trigger */
3345 /* .cctors: too many apps depend on them */
3346 /* running with a specific order... */
3347 if (! vtable->initialized)
3349 mono_runtime_class_init (vtable);
3351 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3352 if (!method->klass->runtime_info)
3353 /* No vtable created yet */
3355 vtable = mono_class_vtable (cfg->domain, method->klass);
3358 if (!vtable->initialized)
3363 * If we're compiling for shared code
3364 * the cctor will need to be run at aot method load time, for example,
3365 * or at the end of the compilation of the inlining method.
3367 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3372 * CAS - do not inline methods with declarative security
3373 * Note: this has to be before any possible return TRUE;
3375 if (mono_method_has_declsec (method))
3378 #ifdef MONO_ARCH_SOFT_FLOAT
3380 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3382 for (i = 0; i < sig->param_count; ++i)
3383 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3391 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3393 if (vtable->initialized && !cfg->compile_aot)
3396 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3399 if (!mono_class_needs_cctor_run (vtable->klass, method))
3402 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3403 /* The initialization is already done before the method is called */
3410 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3414 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3416 mono_class_init (klass);
3417 size = mono_class_array_element_size (klass);
3419 mult_reg = alloc_preg (cfg);
3420 array_reg = arr->dreg;
3421 index_reg = index->dreg;
3423 #if SIZEOF_VOID_P == 8
3424 /* The array reg is 64 bits but the index reg is only 32 */
3425 index2_reg = alloc_preg (cfg);
3426 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3428 index2_reg = index_reg;
3431 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3433 #if defined(__i386__) || defined(__x86_64__)
3434 if (size == 1 || size == 2 || size == 4 || size == 8) {
3435 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3437 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3438 ins->type = STACK_PTR;
3444 add_reg = alloc_preg (cfg);
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3447 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3448 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3449 ins->type = STACK_PTR;
3450 MONO_ADD_INS (cfg->cbb, ins);
3455 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3457 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3459 int bounds_reg = alloc_preg (cfg);
3460 int add_reg = alloc_preg (cfg);
3461 int mult_reg = alloc_preg (cfg);
3462 int mult2_reg = alloc_preg (cfg);
3463 int low1_reg = alloc_preg (cfg);
3464 int low2_reg = alloc_preg (cfg);
3465 int high1_reg = alloc_preg (cfg);
3466 int high2_reg = alloc_preg (cfg);
3467 int realidx1_reg = alloc_preg (cfg);
3468 int realidx2_reg = alloc_preg (cfg);
3469 int sum_reg = alloc_preg (cfg);
3474 mono_class_init (klass);
3475 size = mono_class_array_element_size (klass);
3477 index1 = index_ins1->dreg;
3478 index2 = index_ins2->dreg;
3480 /* range checking */
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3482 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3485 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3486 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3488 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3489 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3490 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3493 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3494 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3496 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3497 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3498 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3500 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3501 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3503 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3504 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3506 ins->type = STACK_MP;
3508 MONO_ADD_INS (cfg->cbb, ins);
3515 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3519 MonoMethod *addr_method;
3522 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3525 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3527 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3528 /* emit_ldelema_2 depends on OP_LMUL */
3529 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3530 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3534 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3535 addr_method = mono_marshal_get_array_address (rank, element_size);
3536 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3542 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3544 MonoInst *ins = NULL;
3546 static MonoClass *runtime_helpers_class = NULL;
3547 if (! runtime_helpers_class)
3548 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3549 "System.Runtime.CompilerServices", "RuntimeHelpers");
3551 if (cmethod->klass == mono_defaults.string_class) {
3552 if (strcmp (cmethod->name, "get_Chars") == 0) {
3553 int dreg = alloc_ireg (cfg);
3554 int index_reg = alloc_preg (cfg);
3555 int mult_reg = alloc_preg (cfg);
3556 int add_reg = alloc_preg (cfg);
3558 #if SIZEOF_VOID_P == 8
3559 /* The array reg is 64 bits but the index reg is only 32 */
3560 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3562 index_reg = args [1]->dreg;
3564 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3566 #if defined(__i386__) || defined(__x86_64__)
3567 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3568 add_reg = ins->dreg;
3569 /* Avoid a warning */
3571 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3575 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3576 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3577 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3579 type_from_op (ins, NULL, NULL);
3581 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3582 int dreg = alloc_ireg (cfg);
3583 /* Decompose later to allow more optimizations */
3584 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3585 ins->type = STACK_I4;
3586 cfg->cbb->has_array_access = TRUE;
3587 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3590 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3591 int mult_reg = alloc_preg (cfg);
3592 int add_reg = alloc_preg (cfg);
3594 /* The corlib functions check for oob already. */
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3596 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3597 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3600 } else if (cmethod->klass == mono_defaults.object_class) {
3602 if (strcmp (cmethod->name, "GetType") == 0) {
3603 int dreg = alloc_preg (cfg);
3604 int vt_reg = alloc_preg (cfg);
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3606 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3607 type_from_op (ins, NULL, NULL);
3610 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3611 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3612 int dreg = alloc_ireg (cfg);
3613 int t1 = alloc_ireg (cfg);
3615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3616 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3617 ins->type = STACK_I4;
3621 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3622 MONO_INST_NEW (cfg, ins, OP_NOP);
3623 MONO_ADD_INS (cfg->cbb, ins);
3627 } else if (cmethod->klass == mono_defaults.array_class) {
3628 if (cmethod->name [0] != 'g')
3631 if (strcmp (cmethod->name, "get_Rank") == 0) {
3632 int dreg = alloc_ireg (cfg);
3633 int vtable_reg = alloc_preg (cfg);
3634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3635 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3636 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3637 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3638 type_from_op (ins, NULL, NULL);
3641 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3642 int dreg = alloc_ireg (cfg);
3644 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3645 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3646 type_from_op (ins, NULL, NULL);
3651 } else if (cmethod->klass == runtime_helpers_class) {
3653 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3654 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3658 } else if (cmethod->klass == mono_defaults.thread_class) {
3659 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3660 ins->dreg = alloc_preg (cfg);
3661 ins->type = STACK_OBJ;
3662 MONO_ADD_INS (cfg->cbb, ins);
3664 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3665 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3666 MONO_ADD_INS (cfg->cbb, ins);
3668 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3669 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3670 MONO_ADD_INS (cfg->cbb, ins);
3673 } else if (cmethod->klass == mono_defaults.monitor_class) {
3674 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3675 if (strcmp (cmethod->name, "Enter") == 0) {
3678 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3679 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3680 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3681 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3683 return (MonoInst*)call;
3684 } else if (strcmp (cmethod->name, "Exit") == 0) {
3687 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3688 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3689 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3690 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3692 return (MonoInst*)call;
3694 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3695 MonoMethod *fast_method = NULL;
3697 /* Avoid infinite recursion */
3698 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3699 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3700 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3703 if (strcmp (cmethod->name, "Enter") == 0 ||
3704 strcmp (cmethod->name, "Exit") == 0)
3705 fast_method = mono_monitor_get_fast_path (cmethod);
3709 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3711 } else if (mini_class_is_system_array (cmethod->klass) &&
3712 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3713 MonoInst *addr, *store, *load;
3714 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3716 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3717 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3718 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3720 } else if (cmethod->klass->image == mono_defaults.corlib &&
3721 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3722 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3725 #if SIZEOF_VOID_P == 8
3726 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3727 /* 64 bit reads are already atomic */
3728 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3729 ins->dreg = mono_alloc_preg (cfg);
3730 ins->inst_basereg = args [0]->dreg;
3731 ins->inst_offset = 0;
3732 MONO_ADD_INS (cfg->cbb, ins);
3736 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3737 if (strcmp (cmethod->name, "Increment") == 0) {
3738 MonoInst *ins_iconst;
3741 if (fsig->params [0]->type == MONO_TYPE_I4)
3742 opcode = OP_ATOMIC_ADD_NEW_I4;
3743 #if SIZEOF_VOID_P == 8
3744 else if (fsig->params [0]->type == MONO_TYPE_I8)
3745 opcode = OP_ATOMIC_ADD_NEW_I8;
3748 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3749 ins_iconst->inst_c0 = 1;
3750 ins_iconst->dreg = mono_alloc_ireg (cfg);
3751 MONO_ADD_INS (cfg->cbb, ins_iconst);
3753 MONO_INST_NEW (cfg, ins, opcode);
3754 ins->dreg = mono_alloc_ireg (cfg);
3755 ins->inst_basereg = args [0]->dreg;
3756 ins->inst_offset = 0;
3757 ins->sreg2 = ins_iconst->dreg;
3758 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3759 MONO_ADD_INS (cfg->cbb, ins);
3761 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3762 MonoInst *ins_iconst;
3765 if (fsig->params [0]->type == MONO_TYPE_I4)
3766 opcode = OP_ATOMIC_ADD_NEW_I4;
3767 #if SIZEOF_VOID_P == 8
3768 else if (fsig->params [0]->type == MONO_TYPE_I8)
3769 opcode = OP_ATOMIC_ADD_NEW_I8;
3772 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3773 ins_iconst->inst_c0 = -1;
3774 ins_iconst->dreg = mono_alloc_ireg (cfg);
3775 MONO_ADD_INS (cfg->cbb, ins_iconst);
3777 MONO_INST_NEW (cfg, ins, opcode);
3778 ins->dreg = mono_alloc_ireg (cfg);
3779 ins->inst_basereg = args [0]->dreg;
3780 ins->inst_offset = 0;
3781 ins->sreg2 = ins_iconst->dreg;
3782 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3783 MONO_ADD_INS (cfg->cbb, ins);
3785 } else if (strcmp (cmethod->name, "Add") == 0) {
3788 if (fsig->params [0]->type == MONO_TYPE_I4)
3789 opcode = OP_ATOMIC_ADD_NEW_I4;
3790 #if SIZEOF_VOID_P == 8
3791 else if (fsig->params [0]->type == MONO_TYPE_I8)
3792 opcode = OP_ATOMIC_ADD_NEW_I8;
3796 MONO_INST_NEW (cfg, ins, opcode);
3797 ins->dreg = mono_alloc_ireg (cfg);
3798 ins->inst_basereg = args [0]->dreg;
3799 ins->inst_offset = 0;
3800 ins->sreg2 = args [1]->dreg;
3801 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3802 MONO_ADD_INS (cfg->cbb, ins);
3805 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3807 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3808 if (strcmp (cmethod->name, "Exchange") == 0) {
3811 if (fsig->params [0]->type == MONO_TYPE_I4)
3812 opcode = OP_ATOMIC_EXCHANGE_I4;
3813 #if SIZEOF_VOID_P == 8
3814 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3815 (fsig->params [0]->type == MONO_TYPE_I) ||
3816 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3817 opcode = OP_ATOMIC_EXCHANGE_I8;
3819 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3820 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3821 opcode = OP_ATOMIC_EXCHANGE_I4;
3826 MONO_INST_NEW (cfg, ins, opcode);
3827 ins->dreg = mono_alloc_ireg (cfg);
3828 ins->inst_basereg = args [0]->dreg;
3829 ins->inst_offset = 0;
3830 ins->sreg2 = args [1]->dreg;
3831 MONO_ADD_INS (cfg->cbb, ins);
3833 switch (fsig->params [0]->type) {
3835 ins->type = STACK_I4;
3839 ins->type = STACK_I8;
3841 case MONO_TYPE_OBJECT:
3842 ins->type = STACK_OBJ;
3845 g_assert_not_reached ();
3848 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3850 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3852 * Can't implement CompareExchange methods this way since they have
3853 * three arguments. We can implement one of the common cases, where the new
3854 * value is a constant.
3856 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3857 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3858 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3859 && args [2]->opcode == OP_ICONST) {
3860 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3861 ins->dreg = alloc_ireg (cfg);
3862 ins->sreg1 = args [0]->dreg;
3863 ins->sreg2 = args [1]->dreg;
3864 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3865 ins->type = STACK_I4;
3866 MONO_ADD_INS (cfg->cbb, ins);
3868 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3870 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3874 } else if (cmethod->klass->image == mono_defaults.corlib) {
3875 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3876 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3877 MONO_INST_NEW (cfg, ins, OP_BREAK);
3878 MONO_ADD_INS (cfg->cbb, ins);
3881 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3882 && strcmp (cmethod->klass->name, "Environment") == 0) {
3883 #ifdef PLATFORM_WIN32
3884 EMIT_NEW_ICONST (cfg, ins, 1);
3886 EMIT_NEW_ICONST (cfg, ins, 0);
3890 } else if (cmethod->klass == mono_defaults.math_class) {
3892 * There is general branches code for Min/Max, but it does not work for
3894 * http://everything2.com/?node_id=1051618
3898 #ifdef MONO_ARCH_SIMD_INTRINSICS
3899 if (cfg->opt & MONO_OPT_SIMD) {
3900 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3906 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3910 * This entry point could be used later for arbitrary method
3913 inline static MonoInst*
3914 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3915 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3917 if (method->klass == mono_defaults.string_class) {
3918 /* managed string allocation support */
3919 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3920 MonoInst *iargs [2];
3921 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3922 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3925 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3926 iargs [1] = args [0];
3927 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3934 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3936 MonoInst *store, *temp;
3939 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3940 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3943 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3944 * would be different than the MonoInst's used to represent arguments, and
3945 * the ldelema implementation can't deal with that.
3946 * Solution: When ldelema is used on an inline argument, create a var for
3947 * it, emit ldelema on that var, and emit the saving code below in
3948 * inline_method () if needed.
3950 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3951 cfg->args [i] = temp;
3952 /* This uses cfg->args [i] which is set by the preceeding line */
3953 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3954 store->cil_code = sp [0]->cil_code;
3959 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3960 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3962 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3964 check_inline_called_method_name_limit (MonoMethod *called_method)
3967 static char *limit = NULL;
3969 if (limit == NULL) {
3970 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3972 if (limit_string != NULL)
3973 limit = limit_string;
3975 limit = (char *) "";
3978 if (limit [0] != '\0') {
3979 char *called_method_name = mono_method_full_name (called_method, TRUE);
3981 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3982 g_free (called_method_name);
3984 //return (strncmp_result <= 0);
3985 return (strncmp_result == 0);
3992 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3994 check_inline_caller_method_name_limit (MonoMethod *caller_method)
3997 static char *limit = NULL;
3999 if (limit == NULL) {
4000 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4001 if (limit_string != NULL) {
4002 limit = limit_string;
4004 limit = (char *) "";
4008 if (limit [0] != '\0') {
4009 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4011 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4012 g_free (caller_method_name);
4014 //return (strncmp_result <= 0);
4015 return (strncmp_result == 0);
4023 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4024 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4026 MonoInst *ins, *rvar = NULL;
4027 MonoMethodHeader *cheader;
4028 MonoBasicBlock *ebblock, *sbblock;
4030 MonoMethod *prev_inlined_method;
4031 MonoInst **prev_locals, **prev_args;
4032 MonoType **prev_arg_types;
4033 guint prev_real_offset;
4034 GHashTable *prev_cbb_hash;
4035 MonoBasicBlock **prev_cil_offset_to_bb;
4036 MonoBasicBlock *prev_cbb;
4037 unsigned char* prev_cil_start;
4038 guint32 prev_cil_offset_to_bb_len;
4039 MonoMethod *prev_current_method;
4040 MonoGenericContext *prev_generic_context;
4042 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4044 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4045 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4048 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4049 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4053 if (cfg->verbose_level > 2)
4054 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4056 if (!cmethod->inline_info) {
4057 mono_jit_stats.inlineable_methods++;
4058 cmethod->inline_info = 1;
4060 /* allocate space to store the return value */
4061 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4062 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4065 /* allocate local variables */
4066 cheader = mono_method_get_header (cmethod);
4067 prev_locals = cfg->locals;
4068 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4069 for (i = 0; i < cheader->num_locals; ++i)
4070 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4072 /* allocate start and end blocks */
4073 /* This is needed so if the inline is aborted, we can clean up */
4074 NEW_BBLOCK (cfg, sbblock);
4075 sbblock->real_offset = real_offset;
4077 NEW_BBLOCK (cfg, ebblock);
4078 ebblock->block_num = cfg->num_bblocks++;
4079 ebblock->real_offset = real_offset;
4081 prev_args = cfg->args;
4082 prev_arg_types = cfg->arg_types;
4083 prev_inlined_method = cfg->inlined_method;
4084 cfg->inlined_method = cmethod;
4085 cfg->ret_var_set = FALSE;
4086 prev_real_offset = cfg->real_offset;
4087 prev_cbb_hash = cfg->cbb_hash;
4088 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4089 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4090 prev_cil_start = cfg->cil_start;
4091 prev_cbb = cfg->cbb;
4092 prev_current_method = cfg->current_method;
4093 prev_generic_context = cfg->generic_context;
4095 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4097 cfg->inlined_method = prev_inlined_method;
4098 cfg->real_offset = prev_real_offset;
4099 cfg->cbb_hash = prev_cbb_hash;
4100 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4101 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4102 cfg->cil_start = prev_cil_start;
4103 cfg->locals = prev_locals;
4104 cfg->args = prev_args;
4105 cfg->arg_types = prev_arg_types;
4106 cfg->current_method = prev_current_method;
4107 cfg->generic_context = prev_generic_context;
4109 if ((costs >= 0 && costs < 60) || inline_allways) {
4110 if (cfg->verbose_level > 2)
4111 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4113 mono_jit_stats.inlined_methods++;
4115 /* always add some code to avoid block split failures */
4116 MONO_INST_NEW (cfg, ins, OP_NOP);
4117 MONO_ADD_INS (prev_cbb, ins);
4119 prev_cbb->next_bb = sbblock;
4120 link_bblock (cfg, prev_cbb, sbblock);
4123 * Get rid of the begin and end bblocks if possible to aid local
4126 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4128 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4129 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4131 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4132 MonoBasicBlock *prev = ebblock->in_bb [0];
4133 mono_merge_basic_blocks (cfg, prev, ebblock);
4135 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4136 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4137 cfg->cbb = prev_cbb;
4145 * If the inlined method contains only a throw, then the ret var is not
4146 * set, so set it to a dummy value.
4148 if (!cfg->ret_var_set) {
4149 static double r8_0 = 0.0;
4151 switch (rvar->type) {
4153 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4156 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4161 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4164 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4165 ins->type = STACK_R8;
4166 ins->inst_p0 = (void*)&r8_0;
4167 ins->dreg = rvar->dreg;
4168 MONO_ADD_INS (cfg->cbb, ins);
4171 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4174 g_assert_not_reached ();
4178 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4183 if (cfg->verbose_level > 2)
4184 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4185 cfg->exception_type = MONO_EXCEPTION_NONE;
4186 mono_loader_clear_error ();
4188 /* This gets rid of the newly added bblocks */
4189 cfg->cbb = prev_cbb;
4195 * Some of these comments may well be out-of-date.
4196 * Design decisions: we do a single pass over the IL code (and we do bblock
4197 * splitting/merging in the few cases when it's required: a back jump to an IL
4198 * address that was not already seen as bblock starting point).
4199 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4200 * Complex operations are decomposed in simpler ones right away. We need to let the
4201 * arch-specific code peek and poke inside this process somehow (except when the
4202 * optimizations can take advantage of the full semantic info of coarse opcodes).
4203 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4204 * MonoInst->opcode initially is the IL opcode or some simplification of that
4205 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4206 * opcode with value bigger than OP_LAST.
4207 * At this point the IR can be handed over to an interpreter, a dumb code generator
4208 * or to the optimizing code generator that will translate it to SSA form.
4210 * Profiling directed optimizations.
4211 * We may compile by default with few or no optimizations and instrument the code
4212 * or the user may indicate what methods to optimize the most either in a config file
4213 * or through repeated runs where the compiler applies offline the optimizations to
4214 * each method and then decides if it was worth it.
4217 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4218 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4219 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4220 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4221 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4222 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4223 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4224 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4226 /* offset from br.s -> br like opcodes */
4227 #define BIG_BRANCH_OFFSET 13
4230 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4232 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4234 return b == NULL || b == bb;
4238 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4240 unsigned char *ip = start;
4241 unsigned char *target;
4244 MonoBasicBlock *bblock;
4245 const MonoOpcode *opcode;
4248 cli_addr = ip - start;
4249 i = mono_opcode_value ((const guint8 **)&ip, end);
4252 opcode = &mono_opcodes [i];
4253 switch (opcode->argument) {
4254 case MonoInlineNone:
4257 case MonoInlineString:
4258 case MonoInlineType:
4259 case MonoInlineField:
4260 case MonoInlineMethod:
4263 case MonoShortInlineR:
4270 case MonoShortInlineVar:
4271 case MonoShortInlineI:
4274 case MonoShortInlineBrTarget:
4275 target = start + cli_addr + 2 + (signed char)ip [1];
4276 GET_BBLOCK (cfg, bblock, target);
4279 GET_BBLOCK (cfg, bblock, ip);
4281 case MonoInlineBrTarget:
4282 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4283 GET_BBLOCK (cfg, bblock, target);
4286 GET_BBLOCK (cfg, bblock, ip);
4288 case MonoInlineSwitch: {
4289 guint32 n = read32 (ip + 1);
4292 cli_addr += 5 + 4 * n;
4293 target = start + cli_addr;
4294 GET_BBLOCK (cfg, bblock, target);
4296 for (j = 0; j < n; ++j) {
4297 target = start + cli_addr + (gint32)read32 (ip);
4298 GET_BBLOCK (cfg, bblock, target);
4308 g_assert_not_reached ();
4311 if (i == CEE_THROW) {
4312 unsigned char *bb_start = ip - 1;
4314 /* Find the start of the bblock containing the throw */
4316 while ((bb_start >= start) && !bblock) {
4317 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4321 bblock->out_of_line = 1;
4330 static inline MonoMethod *
4331 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4335 if (m->wrapper_type != MONO_WRAPPER_NONE)
4336 return mono_method_get_wrapper_data (m, token);
4338 method = mono_get_method_full (m->klass->image, token, klass, context);
4343 static inline MonoMethod *
4344 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4346 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4348 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4354 static inline MonoClass*
4355 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4359 if (method->wrapper_type != MONO_WRAPPER_NONE)
4360 klass = mono_method_get_wrapper_data (method, token);
4362 klass = mono_class_get_full (method->klass->image, token, context);
4364 mono_class_init (klass);
4369 * Returns TRUE if the JIT should abort inlining because "callee"
4370 * is influenced by security attributes.
4373 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4377 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4381 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4382 if (result == MONO_JIT_SECURITY_OK)
4385 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4386 /* Generate code to throw a SecurityException before the actual call/link */
4387 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4390 NEW_ICONST (cfg, args [0], 4);
4391 NEW_METHODCONST (cfg, args [1], caller);
4392 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4393 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4394 /* don't hide previous results */
4395 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4396 cfg->exception_data = result;
4404 method_access_exception (void)
4406 static MonoMethod *method = NULL;
4409 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4410 method = mono_class_get_method_from_name (secman->securitymanager,
4411 "MethodAccessException", 2);
4418 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4419 MonoBasicBlock *bblock, unsigned char *ip)
4421 MonoMethod *thrower = method_access_exception ();
4424 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4425 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4426 mono_emit_method_call (cfg, thrower, args, NULL);
4430 verification_exception (void)
4432 static MonoMethod *method = NULL;
4435 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4436 method = mono_class_get_method_from_name (secman->securitymanager,
4437 "VerificationException", 0);
4444 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4446 MonoMethod *thrower = verification_exception ();
4448 mono_emit_method_call (cfg, thrower, NULL, NULL);
4452 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4453 MonoBasicBlock *bblock, unsigned char *ip)
4455 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4456 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4457 gboolean is_safe = TRUE;
4459 if (!(caller_level >= callee_level ||
4460 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4461 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4466 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4470 method_is_safe (MonoMethod *method)
4473 if (strcmp (method->name, "unsafeMethod") == 0)
4480 * Check that the IL instructions at ip are the array initialization
4481 * sequence and return the pointer to the data and the size.
4484 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4487 * newarr[System.Int32]
4489 * ldtoken field valuetype ...
4490 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4492 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4493 guint32 token = read32 (ip + 7);
4494 guint32 field_token = read32 (ip + 2);
4495 guint32 field_index = field_token & 0xffffff;
4497 const char *data_ptr;
4499 MonoMethod *cmethod;
4500 MonoClass *dummy_class;
4501 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4507 *out_field_token = field_token;
4509 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4512 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4514 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4515 case MONO_TYPE_BOOLEAN:
4519 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4520 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4521 case MONO_TYPE_CHAR:
4531 return NULL; /* stupid ARM FP swapped format */
4541 if (size > mono_type_size (field->type, &dummy_align))
4544 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4545 if (!method->klass->image->dynamic) {
4546 field_index = read32 (ip + 2) & 0xffffff;
4547 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4548 data_ptr = mono_image_rva_map (method->klass->image, rva);
4549 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4550 /* for aot code we do the lookup on load */
4551 if (aot && data_ptr)
4552 return GUINT_TO_POINTER (rva);
4554 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4556 data_ptr = mono_field_get_data (field);
4564 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4566 char *method_fname = mono_method_full_name (method, TRUE);
4569 if (mono_method_get_header (method)->code_size == 0)
4570 method_code = g_strdup ("method body is empty.");
4572 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4573 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4574 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4575 g_free (method_fname);
4576 g_free (method_code);
4580 set_exception_object (MonoCompile *cfg, MonoException *exception)
4582 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4583 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4584 cfg->exception_ptr = exception;
4588 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4592 if (cfg->generic_sharing_context)
4593 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4595 type = &klass->byval_arg;
4596 return MONO_TYPE_IS_REFERENCE (type);
4600 * mono_decompose_array_access_opts:
4602 * Decompose array access opcodes.
4603 * This should be in decompose.c, but it emits calls so it has to stay here until
4604 * the old JIT is gone.
4607 mono_decompose_array_access_opts (MonoCompile *cfg)
4609 MonoBasicBlock *bb, *first_bb;
4612 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4613 * can be executed anytime. It should be run before decompose_long
4617 * Create a dummy bblock and emit code into it so we can use the normal
4618 * code generation macros.
4620 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4621 first_bb = cfg->cbb;
4623 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4625 MonoInst *prev = NULL;
4627 MonoInst *iargs [3];
4630 if (!bb->has_array_access)
4633 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4635 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4641 for (ins = bb->code; ins; ins = ins->next) {
4642 switch (ins->opcode) {
4644 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4645 G_STRUCT_OFFSET (MonoArray, max_length));
4646 MONO_ADD_INS (cfg->cbb, dest);
4648 case OP_BOUNDS_CHECK:
4649 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4652 if (cfg->opt & MONO_OPT_SHARED) {
4653 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4654 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4655 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4656 iargs [2]->dreg = ins->sreg1;
4658 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4659 dest->dreg = ins->dreg;
4661 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4664 NEW_VTABLECONST (cfg, iargs [0], vtable);
4665 MONO_ADD_INS (cfg->cbb, iargs [0]);
4666 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4667 iargs [1]->dreg = ins->sreg1;
4669 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4670 dest->dreg = ins->dreg;
4674 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4675 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4676 MONO_ADD_INS (cfg->cbb, dest);
4682 g_assert (cfg->cbb == first_bb);
4684 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4685 /* Replace the original instruction with the new code sequence */
4687 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4688 first_bb->code = first_bb->last_ins = NULL;
4689 first_bb->in_count = first_bb->out_count = 0;
4690 cfg->cbb = first_bb;
4697 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4707 #ifdef MONO_ARCH_SOFT_FLOAT
4710 * mono_decompose_soft_float:
4712 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4713 * similar to long support on 32 bit platforms. 32 bit float values require special
4714 * handling when used as locals, arguments, and in calls.
4715 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4718 mono_decompose_soft_float (MonoCompile *cfg)
4720 MonoBasicBlock *bb, *first_bb;
4723 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4727 * Create a dummy bblock and emit code into it so we can use the normal
4728 * code generation macros.
4730 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4731 first_bb = cfg->cbb;
4733 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4735 MonoInst *prev = NULL;
4738 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4740 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4746 for (ins = bb->code; ins; ins = ins->next) {
4747 const char *spec = INS_INFO (ins->opcode);
4749 /* Most fp operations are handled automatically by opcode emulation */
4751 switch (ins->opcode) {
4754 d.vald = *(double*)ins->inst_p0;
4755 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4760 /* We load the r8 value */
4761 d.vald = *(float*)ins->inst_p0;
4762 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4766 ins->opcode = OP_LMOVE;
4769 ins->opcode = OP_MOVE;
4770 ins->sreg1 = ins->sreg1 + 1;
4773 ins->opcode = OP_MOVE;
4774 ins->sreg1 = ins->sreg1 + 2;
4777 int reg = ins->sreg1;
4779 ins->opcode = OP_SETLRET;
4781 ins->sreg1 = reg + 1;
4782 ins->sreg2 = reg + 2;
4785 case OP_LOADR8_MEMBASE:
4786 ins->opcode = OP_LOADI8_MEMBASE;
4788 case OP_STORER8_MEMBASE_REG:
4789 ins->opcode = OP_STOREI8_MEMBASE_REG;
4791 case OP_STORER4_MEMBASE_REG: {
4792 MonoInst *iargs [2];
4795 /* Arg 1 is the double value */
4796 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4797 iargs [0]->dreg = ins->sreg1;
4799 /* Arg 2 is the address to store to */
4800 addr_reg = mono_alloc_preg (cfg);
4801 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4802 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4806 case OP_LOADR4_MEMBASE: {
4807 MonoInst *iargs [1];
4811 addr_reg = mono_alloc_preg (cfg);
4812 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4813 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4814 conv->dreg = ins->dreg;
4819 case OP_FCALL_MEMBASE: {
4820 MonoCallInst *call = (MonoCallInst*)ins;
4821 if (call->signature->ret->type == MONO_TYPE_R4) {
4822 MonoCallInst *call2;
4823 MonoInst *iargs [1];
4826 /* Convert the call into a call returning an int */
4827 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4828 memcpy (call2, call, sizeof (MonoCallInst));
4829 switch (ins->opcode) {
4831 call2->inst.opcode = OP_CALL;
4834 call2->inst.opcode = OP_CALL_REG;
4836 case OP_FCALL_MEMBASE:
4837 call2->inst.opcode = OP_CALL_MEMBASE;
4840 g_assert_not_reached ();
4842 call2->inst.dreg = mono_alloc_ireg (cfg);
4843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4845 /* FIXME: Optimize this */
4847 /* Emit an r4->r8 conversion */
4848 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4849 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4850 conv->dreg = ins->dreg;
4852 switch (ins->opcode) {
4854 ins->opcode = OP_LCALL;
4857 ins->opcode = OP_LCALL_REG;
4859 case OP_FCALL_MEMBASE:
4860 ins->opcode = OP_LCALL_MEMBASE;
4863 g_assert_not_reached ();
4869 MonoJitICallInfo *info;
4870 MonoInst *iargs [2];
4871 MonoInst *call, *cmp, *br;
4873 /* Convert fcompare+fbcc to icall+icompare+beq */
4875 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4878 /* Create dummy MonoInst's for the arguments */
4879 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4880 iargs [0]->dreg = ins->sreg1;
4881 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4882 iargs [1]->dreg = ins->sreg2;
4884 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4886 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4887 cmp->sreg1 = call->dreg;
4889 MONO_ADD_INS (cfg->cbb, cmp);
4891 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4892 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4893 br->inst_true_bb = ins->next->inst_true_bb;
4894 br->inst_false_bb = ins->next->inst_false_bb;
4895 MONO_ADD_INS (cfg->cbb, br);
4897 /* The call sequence might include fp ins */
4900 /* Skip fbcc or fccc */
4901 NULLIFY_INS (ins->next);
4909 MonoJitICallInfo *info;
4910 MonoInst *iargs [2];
4913 /* Convert fccc to icall+icompare+iceq */
4915 info = mono_find_jit_opcode_emulation (ins->opcode);
4918 /* Create dummy MonoInst's for the arguments */
4919 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4920 iargs [0]->dreg = ins->sreg1;
4921 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4922 iargs [1]->dreg = ins->sreg2;
4924 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4927 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4929 /* The call sequence might include fp ins */
4934 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4935 mono_print_ins (ins);
4936 g_assert_not_reached ();
4941 g_assert (cfg->cbb == first_bb);
4943 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4944 /* Replace the original instruction with the new code sequence */
4946 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4947 first_bb->code = first_bb->last_ins = NULL;
4948 first_bb->in_count = first_bb->out_count = 0;
4949 cfg->cbb = first_bb;
4956 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4959 mono_decompose_long_opts (cfg);
4965 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4968 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4969 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4970 /* Optimize reg-reg moves away */
4972 * Can't optimize other opcodes, since sp[0] might point to
4973 * the last ins of a decomposed opcode.
4975 sp [0]->dreg = (cfg)->locals [n]->dreg;
4977 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4982 * ldloca inhibits many optimizations so try to get rid of it in common
4985 static inline unsigned char *
4986 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4995 local = read16 (ip + 2);
4999 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5000 gboolean skip = FALSE;
5002 /* From the INITOBJ case */
5003 token = read32 (ip + 2);
5004 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5005 CHECK_TYPELOAD (klass);
5006 if (generic_class_is_reference_type (cfg, klass)) {
5007 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5008 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5009 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5010 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5011 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5024 * mono_method_to_ir:
5026 * Translate the .net IL into linear IR.
5029 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5030 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5031 guint inline_offset, gboolean is_virtual_call)
5033 MonoInst *ins, **sp, **stack_start;
5034 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5035 MonoMethod *cmethod, *method_definition;
5036 MonoInst **arg_array;
5037 MonoMethodHeader *header;
5039 guint32 token, ins_flag;
5041 MonoClass *constrained_call = NULL;
5042 unsigned char *ip, *end, *target, *err_pos;
5043 static double r8_0 = 0.0;
5044 MonoMethodSignature *sig;
5045 MonoGenericContext *generic_context = NULL;
5046 MonoGenericContainer *generic_container = NULL;
5047 MonoType **param_types;
5048 int i, n, start_new_bblock, dreg;
5049 int num_calls = 0, inline_costs = 0;
5050 int breakpoint_id = 0;
5052 MonoBoolean security, pinvoke;
5053 MonoSecurityManager* secman = NULL;
5054 MonoDeclSecurityActions actions;
5055 GSList *class_inits = NULL;
5056 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5059 /* serialization and xdomain stuff may need access to private fields and methods */
5060 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5061 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5062 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5063 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5064 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5065 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5067 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5069 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5070 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5071 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5072 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5074 image = method->klass->image;
5075 header = mono_method_get_header (method);
5076 generic_container = mono_method_get_generic_container (method);
5077 sig = mono_method_signature (method);
5078 num_args = sig->hasthis + sig->param_count;
5079 ip = (unsigned char*)header->code;
5080 cfg->cil_start = ip;
5081 end = ip + header->code_size;
5082 mono_jit_stats.cil_code_size += header->code_size;
5084 method_definition = method;
5085 while (method_definition->is_inflated) {
5086 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5087 method_definition = imethod->declaring;
5090 /* SkipVerification is not allowed if core-clr is enabled */
5091 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5093 dont_verify_stloc = TRUE;
5096 if (!dont_verify && mini_method_verify (cfg, method_definition))
5097 goto exception_exit;
5099 if (mono_debug_using_mono_debugger ())
5100 cfg->keep_cil_nops = TRUE;
5102 if (sig->is_inflated)
5103 generic_context = mono_method_get_context (method);
5104 else if (generic_container)
5105 generic_context = &generic_container->context;
5106 cfg->generic_context = generic_context;
5108 if (!cfg->generic_sharing_context)
5109 g_assert (!sig->has_type_parameters);
5111 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5112 g_assert (method->is_inflated);
5113 g_assert (mono_method_get_context (method)->method_inst);
5115 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5116 g_assert (sig->generic_param_count);
5118 if (cfg->method == method) {
5119 cfg->real_offset = 0;
5121 cfg->real_offset = inline_offset;
5124 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5125 cfg->cil_offset_to_bb_len = header->code_size;
5127 cfg->current_method = method;
5129 if (cfg->verbose_level > 2)
5130 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5132 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5134 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5135 for (n = 0; n < sig->param_count; ++n)
5136 param_types [n + sig->hasthis] = sig->params [n];
5137 cfg->arg_types = param_types;
5139 dont_inline = g_list_prepend (dont_inline, method);
5140 if (cfg->method == method) {
5142 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5143 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5146 NEW_BBLOCK (cfg, start_bblock);
5147 cfg->bb_entry = start_bblock;
5148 start_bblock->cil_code = NULL;
5149 start_bblock->cil_length = 0;
5152 NEW_BBLOCK (cfg, end_bblock);
5153 cfg->bb_exit = end_bblock;
5154 end_bblock->cil_code = NULL;
5155 end_bblock->cil_length = 0;
5156 g_assert (cfg->num_bblocks == 2);
5158 arg_array = cfg->args;
5160 if (header->num_clauses) {
5161 cfg->spvars = g_hash_table_new (NULL, NULL);
5162 cfg->exvars = g_hash_table_new (NULL, NULL);
5164 /* handle exception clauses */
5165 for (i = 0; i < header->num_clauses; ++i) {
5166 MonoBasicBlock *try_bb;
5167 MonoExceptionClause *clause = &header->clauses [i];
5168 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5169 try_bb->real_offset = clause->try_offset;
5170 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5171 tblock->real_offset = clause->handler_offset;
5172 tblock->flags |= BB_EXCEPTION_HANDLER;
5174 link_bblock (cfg, try_bb, tblock);
5176 if (*(ip + clause->handler_offset) == CEE_POP)
5177 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5179 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5180 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5181 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5182 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5183 MONO_ADD_INS (tblock, ins);
5185 /* todo: is a fault block unsafe to optimize? */
5186 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5187 tblock->flags |= BB_EXCEPTION_UNSAFE;
5191 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5193 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5195 /* catch and filter blocks get the exception object on the stack */
5196 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5197 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5198 MonoInst *dummy_use;
5200 /* mostly like handle_stack_args (), but just sets the input args */
5201 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5202 tblock->in_scount = 1;
5203 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5204 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5207 * Add a dummy use for the exvar so its liveness info will be
5211 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5213 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5214 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5215 tblock->flags |= BB_EXCEPTION_HANDLER;
5216 tblock->real_offset = clause->data.filter_offset;
5217 tblock->in_scount = 1;
5218 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5219 /* The filter block shares the exvar with the handler block */
5220 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5221 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5222 MONO_ADD_INS (tblock, ins);
5226 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5227 clause->data.catch_class &&
5228 cfg->generic_sharing_context &&
5229 mono_class_check_context_used (clause->data.catch_class)) {
5230 if (mono_method_get_context (method)->method_inst)
5231 GENERIC_SHARING_FAILURE (CEE_NOP);
5234 * In shared generic code with catch
5235 * clauses containing type variables
5236 * the exception handling code has to
5237 * be able to get to the rgctx.
5238 * Therefore we have to make sure that
5239 * the vtable/mrgctx argument (for
5240 * static or generic methods) or the
5241 * "this" argument (for non-static
5242 * methods) are live.
5244 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5245 mini_method_get_context (method)->method_inst ||
5246 method->klass->valuetype) {
5247 mono_get_vtable_var (cfg);
5249 MonoInst *dummy_use;
5251 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5256 arg_array = alloca (sizeof (MonoInst *) * num_args);
5257 cfg->cbb = start_bblock;
5258 cfg->args = arg_array;
5259 mono_save_args (cfg, sig, inline_args);
5262 /* FIRST CODE BLOCK */
5263 NEW_BBLOCK (cfg, bblock);
5264 bblock->cil_code = ip;
5268 ADD_BBLOCK (cfg, bblock);
5270 if (cfg->method == method) {
5271 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5272 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5273 MONO_INST_NEW (cfg, ins, OP_BREAK);
5274 MONO_ADD_INS (bblock, ins);
5278 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5279 secman = mono_security_manager_get_methods ();
5281 security = (secman && mono_method_has_declsec (method));
5282 /* at this point having security doesn't mean we have any code to generate */
5283 if (security && (cfg->method == method)) {
5284 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5285 * And we do not want to enter the next section (with allocation) if we
5286 * have nothing to generate */
5287 security = mono_declsec_get_demands (method, &actions);
5290 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5291 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5293 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5294 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5295 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5297 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5298 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5302 mono_custom_attrs_free (custom);
5305 custom = mono_custom_attrs_from_class (wrapped->klass);
5306 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5310 mono_custom_attrs_free (custom);
5313 /* not a P/Invoke after all */
5318 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5319 /* we use a separate basic block for the initialization code */
5320 NEW_BBLOCK (cfg, init_localsbb);
5321 cfg->bb_init = init_localsbb;
5322 init_localsbb->real_offset = cfg->real_offset;
5323 start_bblock->next_bb = init_localsbb;
5324 init_localsbb->next_bb = bblock;
5325 link_bblock (cfg, start_bblock, init_localsbb);
5326 link_bblock (cfg, init_localsbb, bblock);
5328 cfg->cbb = init_localsbb;
5330 start_bblock->next_bb = bblock;
5331 link_bblock (cfg, start_bblock, bblock);
5334 /* at this point we know, if security is TRUE, that some code needs to be generated */
5335 if (security && (cfg->method == method)) {
5338 mono_jit_stats.cas_demand_generation++;
5340 if (actions.demand.blob) {
5341 /* Add code for SecurityAction.Demand */
5342 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5343 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5344 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5345 mono_emit_method_call (cfg, secman->demand, args, NULL);
5347 if (actions.noncasdemand.blob) {
5348 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5349 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5350 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5351 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5352 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5353 mono_emit_method_call (cfg, secman->demand, args, NULL);
5355 if (actions.demandchoice.blob) {
5356 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5357 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5358 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5359 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5360 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5364 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5366 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5369 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5370 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5371 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5372 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5373 if (!(method->klass && method->klass->image &&
5374 mono_security_core_clr_is_platform_image (method->klass->image))) {
5375 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5379 if (!method_is_safe (method))
5380 emit_throw_verification_exception (cfg, bblock, ip);
5383 if (header->code_size == 0)
5386 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5391 if (cfg->method == method)
5392 mono_debug_init_method (cfg, bblock, breakpoint_id);
5394 for (n = 0; n < header->num_locals; ++n) {
5395 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5400 /* add a check for this != NULL to inlined methods */
5401 if (is_virtual_call) {
5404 NEW_ARGLOAD (cfg, arg_ins, 0);
5405 MONO_ADD_INS (cfg->cbb, arg_ins);
5406 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5407 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5408 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5411 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5412 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5415 start_new_bblock = 0;
5419 if (cfg->method == method)
5420 cfg->real_offset = ip - header->code;
5422 cfg->real_offset = inline_offset;
5427 if (start_new_bblock) {
5428 bblock->cil_length = ip - bblock->cil_code;
5429 if (start_new_bblock == 2) {
5430 g_assert (ip == tblock->cil_code);
5432 GET_BBLOCK (cfg, tblock, ip);
5434 bblock->next_bb = tblock;
5437 start_new_bblock = 0;
5438 for (i = 0; i < bblock->in_scount; ++i) {
5439 if (cfg->verbose_level > 3)
5440 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5441 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5445 g_slist_free (class_inits);
5448 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5449 link_bblock (cfg, bblock, tblock);
5450 if (sp != stack_start) {
5451 handle_stack_args (cfg, stack_start, sp - stack_start);
5453 CHECK_UNVERIFIABLE (cfg);
5455 bblock->next_bb = tblock;
5458 for (i = 0; i < bblock->in_scount; ++i) {
5459 if (cfg->verbose_level > 3)
5460 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5461 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5464 g_slist_free (class_inits);
5469 bblock->real_offset = cfg->real_offset;
5471 if ((cfg->method == method) && cfg->coverage_info) {
5472 guint32 cil_offset = ip - header->code;
5473 cfg->coverage_info->data [cil_offset].cil_code = ip;
5475 /* TODO: Use an increment here */
5476 #if defined(__i386__)
5477 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5478 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5480 MONO_ADD_INS (cfg->cbb, ins);
5482 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5483 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5487 if (cfg->verbose_level > 3)
5488 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5492 if (cfg->keep_cil_nops)
5493 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5495 MONO_INST_NEW (cfg, ins, OP_NOP);
5497 MONO_ADD_INS (bblock, ins);
5500 MONO_INST_NEW (cfg, ins, OP_BREAK);
5502 MONO_ADD_INS (bblock, ins);
5508 CHECK_STACK_OVF (1);
5509 n = (*ip)-CEE_LDARG_0;
5511 EMIT_NEW_ARGLOAD (cfg, ins, n);
5519 CHECK_STACK_OVF (1);
5520 n = (*ip)-CEE_LDLOC_0;
5522 EMIT_NEW_LOCLOAD (cfg, ins, n);
5531 n = (*ip)-CEE_STLOC_0;
5534 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5536 emit_stloc_ir (cfg, sp, header, n);
5543 CHECK_STACK_OVF (1);
5546 EMIT_NEW_ARGLOAD (cfg, ins, n);
5552 CHECK_STACK_OVF (1);
5555 NEW_ARGLOADA (cfg, ins, n);
5556 MONO_ADD_INS (cfg->cbb, ins);
5566 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5568 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5573 CHECK_STACK_OVF (1);
5576 EMIT_NEW_LOCLOAD (cfg, ins, n);
5580 case CEE_LDLOCA_S: {
5581 unsigned char *tmp_ip;
5583 CHECK_STACK_OVF (1);
5584 CHECK_LOCAL (ip [1]);
5586 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5592 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5601 CHECK_LOCAL (ip [1]);
5602 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5604 emit_stloc_ir (cfg, sp, header, ip [1]);
5609 CHECK_STACK_OVF (1);
5610 EMIT_NEW_PCONST (cfg, ins, NULL);
5611 ins->type = STACK_OBJ;
5616 CHECK_STACK_OVF (1);
5617 EMIT_NEW_ICONST (cfg, ins, -1);
5630 CHECK_STACK_OVF (1);
5631 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5637 CHECK_STACK_OVF (1);
5639 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5645 CHECK_STACK_OVF (1);
5646 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5652 CHECK_STACK_OVF (1);
5653 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5654 ins->type = STACK_I8;
5655 ins->dreg = alloc_dreg (cfg, STACK_I8);
5657 ins->inst_l = (gint64)read64 (ip);
5658 MONO_ADD_INS (bblock, ins);
5664 /* FIXME: we should really allocate this only late in the compilation process */
5665 mono_domain_lock (cfg->domain);
5666 f = mono_domain_alloc (cfg->domain, sizeof (float));
5667 mono_domain_unlock (cfg->domain);
5669 CHECK_STACK_OVF (1);
5670 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5671 ins->type = STACK_R8;
5672 ins->dreg = alloc_dreg (cfg, STACK_R8);
5676 MONO_ADD_INS (bblock, ins);
5684 /* FIXME: we should really allocate this only late in the compilation process */
5685 mono_domain_lock (cfg->domain);
5686 d = mono_domain_alloc (cfg->domain, sizeof (double));
5687 mono_domain_unlock (cfg->domain);
5689 CHECK_STACK_OVF (1);
5690 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5691 ins->type = STACK_R8;
5692 ins->dreg = alloc_dreg (cfg, STACK_R8);
5696 MONO_ADD_INS (bblock, ins);
5703 MonoInst *temp, *store;
5705 CHECK_STACK_OVF (1);
5709 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5710 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5712 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5715 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5728 if (sp [0]->type == STACK_R8)
5729 /* we need to pop the value from the x86 FP stack */
5730 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5737 if (stack_start != sp)
5739 token = read32 (ip + 1);
5740 /* FIXME: check the signature matches */
5741 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5746 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5747 GENERIC_SHARING_FAILURE (CEE_JMP);
5749 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5750 if (check_linkdemand (cfg, method, cmethod))
5752 CHECK_CFG_EXCEPTION;
5757 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5760 /* Handle tail calls similarly to calls */
5761 n = fsig->param_count + fsig->hasthis;
5763 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5764 call->method = cmethod;
5765 call->tail_call = TRUE;
5766 call->signature = mono_method_signature (cmethod);
5767 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5768 call->inst.inst_p0 = cmethod;
5769 for (i = 0; i < n; ++i)
5770 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5772 mono_arch_emit_call (cfg, call);
5773 MONO_ADD_INS (bblock, (MonoInst*)call);
5776 for (i = 0; i < num_args; ++i)
5777 /* Prevent arguments from being optimized away */
5778 arg_array [i]->flags |= MONO_INST_VOLATILE;
5780 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5781 ins = (MonoInst*)call;
5782 ins->inst_p0 = cmethod;
5783 MONO_ADD_INS (bblock, ins);
5787 start_new_bblock = 1;
5792 case CEE_CALLVIRT: {
5793 MonoInst *addr = NULL;
5794 MonoMethodSignature *fsig = NULL;
5796 int virtual = *ip == CEE_CALLVIRT;
5797 int calli = *ip == CEE_CALLI;
5798 gboolean pass_imt_from_rgctx = FALSE;
5799 MonoInst *imt_arg = NULL;
5800 gboolean pass_vtable = FALSE;
5801 gboolean pass_mrgctx = FALSE;
5802 MonoInst *vtable_arg = NULL;
5803 gboolean check_this = FALSE;
5806 token = read32 (ip + 1);
5813 if (method->wrapper_type != MONO_WRAPPER_NONE)
5814 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5816 fsig = mono_metadata_parse_signature (image, token);
5818 n = fsig->param_count + fsig->hasthis;
5820 MonoMethod *cil_method;
5822 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5823 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5824 cil_method = cmethod;
5825 } else if (constrained_call) {
5826 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5828 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5829 cil_method = cmethod;
5834 if (!dont_verify && !cfg->skip_visibility) {
5835 MonoMethod *target_method = cil_method;
5836 if (method->is_inflated) {
5837 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5839 if (!mono_method_can_access_method (method_definition, target_method) &&
5840 !mono_method_can_access_method (method, cil_method))
5841 METHOD_ACCESS_FAILURE;
5844 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5845 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5847 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5848 /* MS.NET seems to silently convert this to a callvirt */
5851 if (!cmethod->klass->inited)
5852 if (!mono_class_init (cmethod->klass))
5855 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5856 mini_class_is_system_array (cmethod->klass)) {
5857 array_rank = cmethod->klass->rank;
5858 fsig = mono_method_signature (cmethod);
5860 if (mono_method_signature (cmethod)->pinvoke) {
5861 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5862 check_for_pending_exc, FALSE);
5863 fsig = mono_method_signature (wrapper);
5864 } else if (constrained_call) {
5865 fsig = mono_method_signature (cmethod);
5867 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5871 mono_save_token_info (cfg, image, token, cil_method);
5873 n = fsig->param_count + fsig->hasthis;
5875 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5876 if (check_linkdemand (cfg, method, cmethod))
5878 CHECK_CFG_EXCEPTION;
5881 if (cmethod->string_ctor)
5882 g_assert_not_reached ();
5885 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5888 if (!cfg->generic_sharing_context && cmethod)
5889 g_assert (!mono_method_check_context_used (cmethod));
5893 //g_assert (!virtual || fsig->hasthis);
5897 if (constrained_call) {
5899 * We have the `constrained.' prefix opcode.
5901 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5905 * The type parameter is instantiated as a valuetype,
5906 * but that type doesn't override the method we're
5907 * calling, so we need to box `this'.
5909 dreg = alloc_dreg (cfg, STACK_VTYPE);
5910 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5911 ins->klass = constrained_call;
5912 sp [0] = handle_box (cfg, ins, constrained_call);
5913 } else if (!constrained_call->valuetype) {
5914 int dreg = alloc_preg (cfg);
5917 * The type parameter is instantiated as a reference
5918 * type. We have a managed pointer on the stack, so
5919 * we need to dereference it here.
5921 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5922 ins->type = STACK_OBJ;
5924 } else if (cmethod->klass->valuetype)
5926 constrained_call = NULL;
5929 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5933 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5934 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5935 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5936 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5937 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5940 * Pass vtable iff target method might
5941 * be shared, which means that sharing
5942 * is enabled for its class and its
5943 * context is sharable (and it's not a
5946 if (sharing_enabled && context_sharable &&
5947 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5951 if (cmethod && mini_method_get_context (cmethod) &&
5952 mini_method_get_context (cmethod)->method_inst) {
5953 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5954 MonoGenericContext *context = mini_method_get_context (cmethod);
5955 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5957 g_assert (!pass_vtable);
5959 if (sharing_enabled && context_sharable)
5963 if (cfg->generic_sharing_context && cmethod) {
5964 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5966 context_used = mono_method_check_context_used (cmethod);
5968 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5969 /* Generic method interface
5970 calls are resolved via a
5971 helper function and don't
5973 if (!cmethod_context || !cmethod_context->method_inst)
5974 pass_imt_from_rgctx = TRUE;
5978 * If a shared method calls another
5979 * shared method then the caller must
5980 * have a generic sharing context
5981 * because the magic trampoline
5982 * requires it. FIXME: We shouldn't
5983 * have to force the vtable/mrgctx
5984 * variable here. Instead there
5985 * should be a flag in the cfg to
5986 * request a generic sharing context.
5989 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
5990 mono_get_vtable_var (cfg);
5995 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5997 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5999 CHECK_TYPELOAD (cmethod->klass);
6000 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6005 g_assert (!vtable_arg);
6008 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6010 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6013 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6014 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
6021 if (pass_imt_from_rgctx) {
6022 g_assert (!pass_vtable);
6025 imt_arg = emit_get_rgctx_method (cfg, context_used,
6026 cmethod, MONO_RGCTX_INFO_METHOD);
6032 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6033 check->sreg1 = sp [0]->dreg;
6034 MONO_ADD_INS (cfg->cbb, check);
6037 /* Calling virtual generic methods */
6038 if (cmethod && virtual &&
6039 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6040 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
6041 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6042 mono_method_signature (cmethod)->generic_param_count) {
6043 MonoInst *this_temp, *this_arg_temp, *store;
6044 MonoInst *iargs [4];
6046 g_assert (mono_method_signature (cmethod)->is_inflated);
6048 /* Prevent inlining of methods that contain indirect calls */
6051 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6052 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6053 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6054 g_assert (!imt_arg);
6056 imt_arg = emit_get_rgctx_method (cfg, context_used,
6057 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6061 cfg->disable_aot = TRUE;
6062 g_assert (cmethod->is_inflated);
6063 EMIT_NEW_PCONST (cfg, imt_arg,
6064 ((MonoMethodInflated*)cmethod)->context.method_inst);
6066 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6070 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6071 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6072 MONO_ADD_INS (bblock, store);
6074 /* FIXME: This should be a managed pointer */
6075 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6077 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6079 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6080 cmethod, MONO_RGCTX_INFO_METHOD);
6081 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6082 addr = mono_emit_jit_icall (cfg,
6083 mono_helper_compile_generic_method, iargs);
6085 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6086 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6087 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6090 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6092 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6095 if (!MONO_TYPE_IS_VOID (fsig->ret))
6104 /* FIXME: runtime generic context pointer for jumps? */
6105 /* FIXME: handle this for generic sharing eventually */
6106 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6107 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6110 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6113 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6114 call->tail_call = TRUE;
6115 call->method = cmethod;
6116 call->signature = mono_method_signature (cmethod);
6119 /* Handle tail calls similarly to calls */
6120 call->inst.opcode = OP_TAILCALL;
6122 mono_arch_emit_call (cfg, call);
6125 * We implement tail calls by storing the actual arguments into the
6126 * argument variables, then emitting a CEE_JMP.
6128 for (i = 0; i < n; ++i) {
6129 /* Prevent argument from being register allocated */
6130 arg_array [i]->flags |= MONO_INST_VOLATILE;
6131 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6135 ins = (MonoInst*)call;
6136 ins->inst_p0 = cmethod;
6137 ins->inst_p1 = arg_array [0];
6138 MONO_ADD_INS (bblock, ins);
6139 link_bblock (cfg, bblock, end_bblock);
6140 start_new_bblock = 1;
6141 /* skip CEE_RET as well */
6147 /* Conversion to a JIT intrinsic */
6148 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6149 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6150 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6161 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6162 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
6163 mono_method_check_inlining (cfg, cmethod) &&
6164 !g_list_find (dont_inline, cmethod)) {
6166 gboolean allways = FALSE;
6168 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6169 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6170 /* Prevent inlining of methods that call wrappers */
6172 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6176 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6178 cfg->real_offset += 5;
6181 if (!MONO_TYPE_IS_VOID (fsig->ret))
6182 /* *sp is already set by inline_method */
6185 inline_costs += costs;
6191 inline_costs += 10 * num_calls++;
6193 /* Tail recursion elimination */
6194 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6195 gboolean has_vtargs = FALSE;
6198 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6201 /* keep it simple */
6202 for (i = fsig->param_count - 1; i >= 0; i--) {
6203 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6208 for (i = 0; i < n; ++i)
6209 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6210 MONO_INST_NEW (cfg, ins, OP_BR);
6211 MONO_ADD_INS (bblock, ins);
6212 tblock = start_bblock->out_bb [0];
6213 link_bblock (cfg, bblock, tblock);
6214 ins->inst_target_bb = tblock;
6215 start_new_bblock = 1;
6217 /* skip the CEE_RET, too */
6218 if (ip_in_bb (cfg, bblock, ip + 5))
6228 /* Generic sharing */
6229 /* FIXME: only do this for generic methods if
6230 they are not shared! */
6231 if (context_used && !imt_arg && !array_rank &&
6232 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6233 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6234 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6235 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6238 g_assert (cfg->generic_sharing_context && cmethod);
6242 * We are compiling a call to a
6243 * generic method from shared code,
6244 * which means that we have to look up
6245 * the method in the rgctx and do an
6248 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6251 /* Indirect calls */
6253 g_assert (!imt_arg);
6255 if (*ip == CEE_CALL)
6256 g_assert (context_used);
6257 else if (*ip == CEE_CALLI)
6258 g_assert (!vtable_arg);
6260 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6261 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6263 /* Prevent inlining of methods with indirect calls */
6267 #ifdef MONO_ARCH_RGCTX_REG
6269 int rgctx_reg = mono_alloc_preg (cfg);
6271 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6272 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6273 call = (MonoCallInst*)ins;
6274 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6275 cfg->uses_rgctx_reg = TRUE;
6280 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6282 * Instead of emitting an indirect call, emit a direct call
6283 * with the contents of the aotconst as the patch info.
6285 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6288 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6291 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6292 if (fsig->pinvoke && !fsig->ret->byref) {
6296 * Native code might return non register sized integers
6297 * without initializing the upper bits.
6299 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6300 case OP_LOADI1_MEMBASE:
6301 widen_op = OP_ICONV_TO_I1;
6303 case OP_LOADU1_MEMBASE:
6304 widen_op = OP_ICONV_TO_U1;
6306 case OP_LOADI2_MEMBASE:
6307 widen_op = OP_ICONV_TO_I2;
6309 case OP_LOADU2_MEMBASE:
6310 widen_op = OP_ICONV_TO_U2;
6316 if (widen_op != -1) {
6317 int dreg = alloc_preg (cfg);
6320 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6321 widen->type = ins->type;
6338 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6339 if (sp [fsig->param_count]->type == STACK_OBJ) {
6340 MonoInst *iargs [2];
6343 iargs [1] = sp [fsig->param_count];
6345 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6348 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6349 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6350 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6351 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6353 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6356 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6357 if (!cmethod->klass->element_class->valuetype && !readonly)
6358 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6361 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6364 g_assert_not_reached ();
6372 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6374 if (!MONO_TYPE_IS_VOID (fsig->ret))
6385 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6387 } else if (imt_arg) {
6388 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6390 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6393 if (!MONO_TYPE_IS_VOID (fsig->ret))
6401 if (cfg->method != method) {
6402 /* return from inlined method */
6404 * If in_count == 0, that means the ret is unreachable due to
6405 * being preceeded by a throw. In that case, inline_method () will
6406 * handle setting the return value
6407 * (test case: test_0_inline_throw ()).
6409 if (return_var && cfg->cbb->in_count) {
6413 //g_assert (returnvar != -1);
6414 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6415 cfg->ret_var_set = TRUE;
6419 MonoType *ret_type = mono_method_signature (method)->ret;
6421 g_assert (!return_var);
6424 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6427 if (!cfg->vret_addr) {
6430 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6432 EMIT_NEW_RETLOADA (cfg, ret_addr);
6434 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6435 ins->klass = mono_class_from_mono_type (ret_type);
6438 #ifdef MONO_ARCH_SOFT_FLOAT
6439 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6440 MonoInst *iargs [1];
6444 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6445 mono_arch_emit_setret (cfg, method, conv);
6447 mono_arch_emit_setret (cfg, method, *sp);
6450 mono_arch_emit_setret (cfg, method, *sp);
6455 if (sp != stack_start)
6457 MONO_INST_NEW (cfg, ins, OP_BR);
6459 ins->inst_target_bb = end_bblock;
6460 MONO_ADD_INS (bblock, ins);
6461 link_bblock (cfg, bblock, end_bblock);
6462 start_new_bblock = 1;
6466 MONO_INST_NEW (cfg, ins, OP_BR);
6468 target = ip + 1 + (signed char)(*ip);
6470 GET_BBLOCK (cfg, tblock, target);
6471 link_bblock (cfg, bblock, tblock);
6472 ins->inst_target_bb = tblock;
6473 if (sp != stack_start) {
6474 handle_stack_args (cfg, stack_start, sp - stack_start);
6476 CHECK_UNVERIFIABLE (cfg);
6478 MONO_ADD_INS (bblock, ins);
6479 start_new_bblock = 1;
6480 inline_costs += BRANCH_COST;
6494 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6496 target = ip + 1 + *(signed char*)ip;
6502 inline_costs += BRANCH_COST;
6506 MONO_INST_NEW (cfg, ins, OP_BR);
6509 target = ip + 4 + (gint32)read32(ip);
6511 GET_BBLOCK (cfg, tblock, target);
6512 link_bblock (cfg, bblock, tblock);
6513 ins->inst_target_bb = tblock;
6514 if (sp != stack_start) {
6515 handle_stack_args (cfg, stack_start, sp - stack_start);
6517 CHECK_UNVERIFIABLE (cfg);
6520 MONO_ADD_INS (bblock, ins);
6522 start_new_bblock = 1;
6523 inline_costs += BRANCH_COST;
6530 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6531 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6532 guint32 opsize = is_short ? 1 : 4;
6534 CHECK_OPSIZE (opsize);
6536 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6539 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6544 GET_BBLOCK (cfg, tblock, target);
6545 link_bblock (cfg, bblock, tblock);
6546 GET_BBLOCK (cfg, tblock, ip);
6547 link_bblock (cfg, bblock, tblock);
6549 if (sp != stack_start) {
6550 handle_stack_args (cfg, stack_start, sp - stack_start);
6551 CHECK_UNVERIFIABLE (cfg);
6554 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6555 cmp->sreg1 = sp [0]->dreg;
6556 type_from_op (cmp, sp [0], NULL);
6559 #if SIZEOF_VOID_P == 4
6560 if (cmp->opcode == OP_LCOMPARE_IMM) {
6561 /* Convert it to OP_LCOMPARE */
6562 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6563 ins->type = STACK_I8;
6564 ins->dreg = alloc_dreg (cfg, STACK_I8);
6566 MONO_ADD_INS (bblock, ins);
6567 cmp->opcode = OP_LCOMPARE;
6568 cmp->sreg2 = ins->dreg;
6571 MONO_ADD_INS (bblock, cmp);
6573 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6574 type_from_op (ins, sp [0], NULL);
6575 MONO_ADD_INS (bblock, ins);
6576 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6577 GET_BBLOCK (cfg, tblock, target);
6578 ins->inst_true_bb = tblock;
6579 GET_BBLOCK (cfg, tblock, ip);
6580 ins->inst_false_bb = tblock;
6581 start_new_bblock = 2;
6584 inline_costs += BRANCH_COST;
6599 MONO_INST_NEW (cfg, ins, *ip);
6601 target = ip + 4 + (gint32)read32(ip);
6607 inline_costs += BRANCH_COST;
6611 MonoBasicBlock **targets;
6612 MonoBasicBlock *default_bblock;
6613 MonoJumpInfoBBTable *table;
6614 int offset_reg = alloc_preg (cfg);
6615 int target_reg = alloc_preg (cfg);
6616 int table_reg = alloc_preg (cfg);
6617 int sum_reg = alloc_preg (cfg);
6618 gboolean use_op_switch;
6622 n = read32 (ip + 1);
6625 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6629 CHECK_OPSIZE (n * sizeof (guint32));
6630 target = ip + n * sizeof (guint32);
6632 GET_BBLOCK (cfg, default_bblock, target);
6634 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6635 for (i = 0; i < n; ++i) {
6636 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6637 targets [i] = tblock;
6641 if (sp != stack_start) {
6643 * Link the current bb with the targets as well, so handle_stack_args
6644 * will set their in_stack correctly.
6646 link_bblock (cfg, bblock, default_bblock);
6647 for (i = 0; i < n; ++i)
6648 link_bblock (cfg, bblock, targets [i]);
6650 handle_stack_args (cfg, stack_start, sp - stack_start);
6652 CHECK_UNVERIFIABLE (cfg);
6655 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6659 for (i = 0; i < n; ++i)
6660 link_bblock (cfg, bblock, targets [i]);
6662 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6663 table->table = targets;
6664 table->table_size = n;
6666 use_op_switch = FALSE;
6668 /* ARM implements SWITCH statements differently */
6669 /* FIXME: Make it use the generic implementation */
6670 if (!cfg->compile_aot)
6671 use_op_switch = TRUE;
6674 if (use_op_switch) {
6675 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6676 ins->sreg1 = src1->dreg;
6677 ins->inst_p0 = table;
6678 ins->inst_many_bb = targets;
6679 ins->klass = GUINT_TO_POINTER (n);
6680 MONO_ADD_INS (cfg->cbb, ins);
6682 if (sizeof (gpointer) == 8)
6683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6687 #if SIZEOF_VOID_P == 8
6688 /* The upper word might not be zero, and we add it to a 64 bit address later */
6689 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6692 if (cfg->compile_aot) {
6693 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6695 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6696 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6697 ins->inst_p0 = table;
6698 ins->dreg = table_reg;
6699 MONO_ADD_INS (cfg->cbb, ins);
6702 /* FIXME: Use load_memindex */
6703 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6705 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6707 start_new_bblock = 1;
6708 inline_costs += (BRANCH_COST * 2);
6728 dreg = alloc_freg (cfg);
6731 dreg = alloc_lreg (cfg);
6734 dreg = alloc_preg (cfg);
6737 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6738 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6739 ins->flags |= ins_flag;
6741 MONO_ADD_INS (bblock, ins);
6756 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6757 ins->flags |= ins_flag;
6759 MONO_ADD_INS (bblock, ins);
6767 MONO_INST_NEW (cfg, ins, (*ip));
6769 ins->sreg1 = sp [0]->dreg;
6770 ins->sreg2 = sp [1]->dreg;
6771 type_from_op (ins, sp [0], sp [1]);
6773 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6775 /* Use the immediate opcodes if possible */
6776 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6777 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6778 if (imm_opcode != -1) {
6779 ins->opcode = imm_opcode;
6780 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6783 sp [1]->opcode = OP_NOP;
6787 MONO_ADD_INS ((cfg)->cbb, (ins));
6790 mono_decompose_opcode (cfg, ins);
6807 MONO_INST_NEW (cfg, ins, (*ip));
6809 ins->sreg1 = sp [0]->dreg;
6810 ins->sreg2 = sp [1]->dreg;
6811 type_from_op (ins, sp [0], sp [1]);
6813 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6814 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6816 /* FIXME: Pass opcode to is_inst_imm */
6818 /* Use the immediate opcodes if possible */
6819 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6822 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6823 if (imm_opcode != -1) {
6824 ins->opcode = imm_opcode;
6825 if (sp [1]->opcode == OP_I8CONST) {
6826 #if SIZEOF_VOID_P == 8
6827 ins->inst_imm = sp [1]->inst_l;
6829 ins->inst_ls_word = sp [1]->inst_ls_word;
6830 ins->inst_ms_word = sp [1]->inst_ms_word;
6834 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6837 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6838 if (sp [1]->next == NULL)
6839 sp [1]->opcode = OP_NOP;
6842 MONO_ADD_INS ((cfg)->cbb, (ins));
6845 mono_decompose_opcode (cfg, ins);
6858 case CEE_CONV_OVF_I8:
6859 case CEE_CONV_OVF_U8:
6863 /* Special case this earlier so we have long constants in the IR */
6864 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6865 int data = sp [-1]->inst_c0;
6866 sp [-1]->opcode = OP_I8CONST;
6867 sp [-1]->type = STACK_I8;
6868 #if SIZEOF_VOID_P == 8
6869 if ((*ip) == CEE_CONV_U8)
6870 sp [-1]->inst_c0 = (guint32)data;
6872 sp [-1]->inst_c0 = data;
6874 sp [-1]->inst_ls_word = data;
6875 if ((*ip) == CEE_CONV_U8)
6876 sp [-1]->inst_ms_word = 0;
6878 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6880 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6887 case CEE_CONV_OVF_I4:
6888 case CEE_CONV_OVF_I1:
6889 case CEE_CONV_OVF_I2:
6890 case CEE_CONV_OVF_I:
6891 case CEE_CONV_OVF_U:
6894 if (sp [-1]->type == STACK_R8) {
6895 ADD_UNOP (CEE_CONV_OVF_I8);
6902 case CEE_CONV_OVF_U1:
6903 case CEE_CONV_OVF_U2:
6904 case CEE_CONV_OVF_U4:
6907 if (sp [-1]->type == STACK_R8) {
6908 ADD_UNOP (CEE_CONV_OVF_U8);
6915 case CEE_CONV_OVF_I1_UN:
6916 case CEE_CONV_OVF_I2_UN:
6917 case CEE_CONV_OVF_I4_UN:
6918 case CEE_CONV_OVF_I8_UN:
6919 case CEE_CONV_OVF_U1_UN:
6920 case CEE_CONV_OVF_U2_UN:
6921 case CEE_CONV_OVF_U4_UN:
6922 case CEE_CONV_OVF_U8_UN:
6923 case CEE_CONV_OVF_I_UN:
6924 case CEE_CONV_OVF_U_UN:
6934 case CEE_ADD_OVF_UN:
6936 case CEE_MUL_OVF_UN:
6938 case CEE_SUB_OVF_UN:
6946 token = read32 (ip + 1);
6947 klass = mini_get_class (method, token, generic_context);
6948 CHECK_TYPELOAD (klass);
6950 if (generic_class_is_reference_type (cfg, klass)) {
6951 MonoInst *store, *load;
6952 int dreg = alloc_preg (cfg);
6954 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6955 load->flags |= ins_flag;
6956 MONO_ADD_INS (cfg->cbb, load);
6958 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6959 store->flags |= ins_flag;
6960 MONO_ADD_INS (cfg->cbb, store);
6962 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6974 token = read32 (ip + 1);
6975 klass = mini_get_class (method, token, generic_context);
6976 CHECK_TYPELOAD (klass);
6978 /* Optimize the common ldobj+stloc combination */
6988 loc_index = ip [5] - CEE_STLOC_0;
6995 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6996 CHECK_LOCAL (loc_index);
6998 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6999 ins->dreg = cfg->locals [loc_index]->dreg;
7005 /* Optimize the ldobj+stobj combination */
7006 /* The reference case ends up being a load+store anyway */
7007 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7012 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7019 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7028 CHECK_STACK_OVF (1);
7030 n = read32 (ip + 1);
7032 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7033 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7034 ins->type = STACK_OBJ;
7037 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7038 MonoInst *iargs [1];
7040 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7041 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7043 if (cfg->opt & MONO_OPT_SHARED) {
7044 MonoInst *iargs [3];
7046 if (cfg->compile_aot) {
7047 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7049 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7050 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7051 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7052 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7053 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7055 if (bblock->out_of_line) {
7056 MonoInst *iargs [2];
7058 if (cfg->method->klass->image == mono_defaults.corlib) {
7060 * Avoid relocations in AOT and save some space by using a
7061 * version of helper_ldstr specialized to mscorlib.
7063 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7064 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7066 /* Avoid creating the string object */
7067 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7068 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7069 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7073 if (cfg->compile_aot) {
7074 NEW_LDSTRCONST (cfg, ins, image, n);
7076 MONO_ADD_INS (bblock, ins);
7079 NEW_PCONST (cfg, ins, NULL);
7080 ins->type = STACK_OBJ;
7081 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7083 MONO_ADD_INS (bblock, ins);
7092 MonoInst *iargs [2];
7093 MonoMethodSignature *fsig;
7096 MonoInst *vtable_arg = NULL;
7099 token = read32 (ip + 1);
7100 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7103 fsig = mono_method_get_signature (cmethod, image, token);
7105 mono_save_token_info (cfg, image, token, cmethod);
7107 if (!mono_class_init (cmethod->klass))
7110 if (cfg->generic_sharing_context)
7111 context_used = mono_method_check_context_used (cmethod);
7113 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7114 if (check_linkdemand (cfg, method, cmethod))
7116 CHECK_CFG_EXCEPTION;
7117 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7118 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7121 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7122 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7123 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7125 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7126 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7128 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7132 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7133 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7135 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7137 CHECK_TYPELOAD (cmethod->klass);
7138 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7143 n = fsig->param_count;
7147 * Generate smaller code for the common newobj <exception> instruction in
7148 * argument checking code.
7150 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7151 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7152 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7153 MonoInst *iargs [3];
7155 g_assert (!vtable_arg);
7159 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7162 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7166 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7171 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7174 g_assert_not_reached ();
7182 /* move the args to allow room for 'this' in the first position */
7188 /* check_call_signature () requires sp[0] to be set */
7189 this_ins.type = STACK_OBJ;
7191 if (check_call_signature (cfg, fsig, sp))
7196 if (mini_class_is_system_array (cmethod->klass)) {
7198 GENERIC_SHARING_FAILURE (*ip);
7199 g_assert (!context_used);
7200 g_assert (!vtable_arg);
7201 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7203 /* Avoid varargs in the common case */
7204 if (fsig->param_count == 1)
7205 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7206 else if (fsig->param_count == 2)
7207 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7209 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7210 } else if (cmethod->string_ctor) {
7211 g_assert (!context_used);
7212 g_assert (!vtable_arg);
7213 /* we simply pass a null pointer */
7214 EMIT_NEW_PCONST (cfg, *sp, NULL);
7215 /* now call the string ctor */
7216 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7218 MonoInst* callvirt_this_arg = NULL;
7220 if (cmethod->klass->valuetype) {
7221 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7222 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7223 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7228 * The code generated by mini_emit_virtual_call () expects
7229 * iargs [0] to be a boxed instance, but luckily the vcall
7230 * will be transformed into a normal call there.
7232 } else if (context_used) {
7236 if (cfg->opt & MONO_OPT_SHARED)
7237 rgctx_info = MONO_RGCTX_INFO_KLASS;
7239 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7240 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7242 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7245 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7247 CHECK_TYPELOAD (cmethod->klass);
7250 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7251 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7252 * As a workaround, we call class cctors before allocating objects.
7254 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7255 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7256 if (cfg->verbose_level > 2)
7257 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7258 class_inits = g_slist_prepend (class_inits, vtable);
7261 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7266 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7268 /* Now call the actual ctor */
7269 /* Avoid virtual calls to ctors if possible */
7270 if (cmethod->klass->marshalbyref)
7271 callvirt_this_arg = sp [0];
7273 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7274 mono_method_check_inlining (cfg, cmethod) &&
7275 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7276 !g_list_find (dont_inline, cmethod)) {
7279 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7280 cfg->real_offset += 5;
7283 inline_costs += costs - 5;
7286 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7288 } else if (context_used &&
7289 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7290 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7291 MonoInst *cmethod_addr;
7293 g_assert (!callvirt_this_arg);
7295 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7296 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7298 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7301 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7302 callvirt_this_arg, NULL, vtable_arg);
7306 if (alloc == NULL) {
7308 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7309 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7323 token = read32 (ip + 1);
7324 klass = mini_get_class (method, token, generic_context);
7325 CHECK_TYPELOAD (klass);
7326 if (sp [0]->type != STACK_OBJ)
7329 if (cfg->generic_sharing_context)
7330 context_used = mono_class_check_context_used (klass);
7339 args [1] = emit_get_rgctx_klass (cfg, context_used,
7340 klass, MONO_RGCTX_INFO_KLASS);
7342 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7346 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7347 MonoMethod *mono_castclass;
7348 MonoInst *iargs [1];
7351 mono_castclass = mono_marshal_get_castclass (klass);
7354 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7355 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7356 g_assert (costs > 0);
7359 cfg->real_offset += 5;
7364 inline_costs += costs;
7367 ins = handle_castclass (cfg, klass, *sp);
7377 token = read32 (ip + 1);
7378 klass = mini_get_class (method, token, generic_context);
7379 CHECK_TYPELOAD (klass);
7380 if (sp [0]->type != STACK_OBJ)
7383 if (cfg->generic_sharing_context)
7384 context_used = mono_class_check_context_used (klass);
7393 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7395 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7399 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7400 MonoMethod *mono_isinst;
7401 MonoInst *iargs [1];
7404 mono_isinst = mono_marshal_get_isinst (klass);
7407 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7408 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7409 g_assert (costs > 0);
7412 cfg->real_offset += 5;
7417 inline_costs += costs;
7420 ins = handle_isinst (cfg, klass, *sp);
7427 case CEE_UNBOX_ANY: {
7431 token = read32 (ip + 1);
7432 klass = mini_get_class (method, token, generic_context);
7433 CHECK_TYPELOAD (klass);
7435 mono_save_token_info (cfg, image, token, klass);
7437 if (cfg->generic_sharing_context)
7438 context_used = mono_class_check_context_used (klass);
7440 if (generic_class_is_reference_type (cfg, klass)) {
7443 MonoInst *iargs [2];
7448 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7449 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7453 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7454 MonoMethod *mono_castclass;
7455 MonoInst *iargs [1];
7458 mono_castclass = mono_marshal_get_castclass (klass);
7461 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7462 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7464 g_assert (costs > 0);
7467 cfg->real_offset += 5;
7471 inline_costs += costs;
7473 ins = handle_castclass (cfg, klass, *sp);
7481 if (mono_class_is_nullable (klass)) {
7482 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7489 ins = handle_unbox (cfg, klass, sp, context_used);
7495 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7508 token = read32 (ip + 1);
7509 klass = mini_get_class (method, token, generic_context);
7510 CHECK_TYPELOAD (klass);
7512 mono_save_token_info (cfg, image, token, klass);
7514 if (cfg->generic_sharing_context)
7515 context_used = mono_class_check_context_used (klass);
7517 if (generic_class_is_reference_type (cfg, klass)) {
7523 if (klass == mono_defaults.void_class)
7525 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7527 /* frequent check in generic code: box (struct), brtrue */
7528 if (!mono_class_is_nullable (klass) &&
7529 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7530 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7532 MONO_INST_NEW (cfg, ins, OP_BR);
7533 if (*ip == CEE_BRTRUE_S) {
7536 target = ip + 1 + (signed char)(*ip);
7541 target = ip + 4 + (gint)(read32 (ip));
7544 GET_BBLOCK (cfg, tblock, target);
7545 link_bblock (cfg, bblock, tblock);
7546 ins->inst_target_bb = tblock;
7547 GET_BBLOCK (cfg, tblock, ip);
7549 * This leads to some inconsistency, since the two bblocks are
7550 * not really connected, but it is needed for handling stack
7551 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7552 * FIXME: This should only be needed if sp != stack_start, but that
7553 * doesn't work for some reason (test failure in mcs/tests on x86).
7555 link_bblock (cfg, bblock, tblock);
7556 if (sp != stack_start) {
7557 handle_stack_args (cfg, stack_start, sp - stack_start);
7559 CHECK_UNVERIFIABLE (cfg);
7561 MONO_ADD_INS (bblock, ins);
7562 start_new_bblock = 1;
7570 if (cfg->opt & MONO_OPT_SHARED)
7571 rgctx_info = MONO_RGCTX_INFO_KLASS;
7573 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7574 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7575 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7577 *sp++ = handle_box (cfg, val, klass);
7588 token = read32 (ip + 1);
7589 klass = mini_get_class (method, token, generic_context);
7590 CHECK_TYPELOAD (klass);
7592 mono_save_token_info (cfg, image, token, klass);
7594 if (cfg->generic_sharing_context)
7595 context_used = mono_class_check_context_used (klass);
7597 if (mono_class_is_nullable (klass)) {
7600 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7601 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7605 ins = handle_unbox (cfg, klass, sp, context_used);
7615 MonoClassField *field;
7619 if (*ip == CEE_STFLD) {
7626 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7628 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7631 token = read32 (ip + 1);
7632 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7633 field = mono_method_get_wrapper_data (method, token);
7634 klass = field->parent;
7637 field = mono_field_from_token (image, token, &klass, generic_context);
7641 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7642 FIELD_ACCESS_FAILURE;
7643 mono_class_init (klass);
7645 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7646 if (*ip == CEE_STFLD) {
7647 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7649 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7650 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7651 MonoInst *iargs [5];
7654 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7655 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7656 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7660 if (cfg->opt & MONO_OPT_INLINE) {
7661 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7662 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7663 g_assert (costs > 0);
7665 cfg->real_offset += 5;
7668 inline_costs += costs;
7670 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7675 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7677 store->flags |= ins_flag;
7684 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7685 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7686 MonoInst *iargs [4];
7689 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7690 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7691 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7692 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7693 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7694 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7696 g_assert (costs > 0);
7698 cfg->real_offset += 5;
7702 inline_costs += costs;
7704 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7708 if (sp [0]->type == STACK_VTYPE) {
7711 /* Have to compute the address of the variable */
7713 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7715 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7717 g_assert (var->klass == klass);
7719 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7723 if (*ip == CEE_LDFLDA) {
7724 dreg = alloc_preg (cfg);
7726 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7727 ins->klass = mono_class_from_mono_type (field->type);
7728 ins->type = STACK_MP;
7733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7734 load->flags |= ins_flag;
7745 MonoClassField *field;
7746 gpointer addr = NULL;
7747 gboolean is_special_static;
7750 token = read32 (ip + 1);
7752 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7753 field = mono_method_get_wrapper_data (method, token);
7754 klass = field->parent;
7757 field = mono_field_from_token (image, token, &klass, generic_context);
7760 mono_class_init (klass);
7761 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7762 FIELD_ACCESS_FAILURE;
7765 * We can only support shared generic static
7766 * field access on architectures where the
7767 * trampoline code has been extended to handle
7768 * the generic class init.
7770 #ifndef MONO_ARCH_VTABLE_REG
7771 GENERIC_SHARING_FAILURE (*ip);
7774 if (cfg->generic_sharing_context)
7775 context_used = mono_class_check_context_used (klass);
7777 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7779 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7780 * to be called here.
7782 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7783 mono_class_vtable (cfg->domain, klass);
7784 CHECK_TYPELOAD (klass);
7786 mono_domain_lock (cfg->domain);
7787 if (cfg->domain->special_static_fields)
7788 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7789 mono_domain_unlock (cfg->domain);
7791 is_special_static = mono_class_field_is_special_static (field);
7793 /* Generate IR to compute the field address */
7795 if ((cfg->opt & MONO_OPT_SHARED) ||
7796 (cfg->compile_aot && is_special_static) ||
7797 (context_used && is_special_static)) {
7798 MonoInst *iargs [2];
7800 g_assert (field->parent);
7801 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7803 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7804 field, MONO_RGCTX_INFO_CLASS_FIELD);
7806 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7808 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7809 } else if (context_used) {
7810 MonoInst *static_data;
7813 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7814 method->klass->name_space, method->klass->name, method->name,
7815 depth, field->offset);
7818 if (mono_class_needs_cctor_run (klass, method)) {
7822 vtable = emit_get_rgctx_klass (cfg, context_used,
7823 klass, MONO_RGCTX_INFO_VTABLE);
7825 // FIXME: This doesn't work since it tries to pass the argument
7826 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7828 * The vtable pointer is always passed in a register regardless of
7829 * the calling convention, so assign it manually, and make a call
7830 * using a signature without parameters.
7832 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7833 #ifdef MONO_ARCH_VTABLE_REG
7834 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7835 cfg->uses_vtable_reg = TRUE;
7842 * The pointer we're computing here is
7844 * super_info.static_data + field->offset
7846 static_data = emit_get_rgctx_klass (cfg, context_used,
7847 klass, MONO_RGCTX_INFO_STATIC_DATA);
7849 if (field->offset == 0) {
7852 int addr_reg = mono_alloc_preg (cfg);
7853 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7855 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7856 MonoInst *iargs [2];
7858 g_assert (field->parent);
7859 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7860 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7861 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7863 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7865 CHECK_TYPELOAD (klass);
7867 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7868 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7869 if (cfg->verbose_level > 2)
7870 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7871 class_inits = g_slist_prepend (class_inits, vtable);
7873 if (cfg->run_cctors) {
7875 /* This makes so that inline cannot trigger */
7876 /* .cctors: too many apps depend on them */
7877 /* running with a specific order... */
7878 if (! vtable->initialized)
7880 ex = mono_runtime_class_init_full (vtable, FALSE);
7882 set_exception_object (cfg, ex);
7883 goto exception_exit;
7887 addr = (char*)vtable->data + field->offset;
7889 if (cfg->compile_aot)
7890 EMIT_NEW_SFLDACONST (cfg, ins, field);
7892 EMIT_NEW_PCONST (cfg, ins, addr);
7895 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7896 * This could be later optimized to do just a couple of
7897 * memory dereferences with constant offsets.
7899 MonoInst *iargs [1];
7900 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7901 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7905 /* Generate IR to do the actual load/store operation */
7907 if (*ip == CEE_LDSFLDA) {
7908 ins->klass = mono_class_from_mono_type (field->type);
7909 ins->type = STACK_PTR;
7911 } else if (*ip == CEE_STSFLD) {
7916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7917 store->flags |= ins_flag;
7919 gboolean is_const = FALSE;
7920 MonoVTable *vtable = NULL;
7922 if (!context_used) {
7923 vtable = mono_class_vtable (cfg->domain, klass);
7924 CHECK_TYPELOAD (klass);
7926 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7927 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7928 gpointer addr = (char*)vtable->data + field->offset;
7929 int ro_type = field->type->type;
7930 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7931 ro_type = field->type->data.klass->enum_basetype->type;
7933 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
7936 case MONO_TYPE_BOOLEAN:
7938 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7942 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7945 case MONO_TYPE_CHAR:
7947 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7951 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7956 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7960 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7965 case MONO_TYPE_STRING:
7966 case MONO_TYPE_OBJECT:
7967 case MONO_TYPE_CLASS:
7968 case MONO_TYPE_SZARRAY:
7970 case MONO_TYPE_FNPTR:
7971 case MONO_TYPE_ARRAY:
7972 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7973 type_to_eval_stack_type ((cfg), field->type, *sp);
7978 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7983 case MONO_TYPE_VALUETYPE:
7993 CHECK_STACK_OVF (1);
7995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7996 load->flags |= ins_flag;
8009 token = read32 (ip + 1);
8010 klass = mini_get_class (method, token, generic_context);
8011 CHECK_TYPELOAD (klass);
8012 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8013 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8024 const char *data_ptr;
8026 guint32 field_token;
8032 token = read32 (ip + 1);
8034 klass = mini_get_class (method, token, generic_context);
8035 CHECK_TYPELOAD (klass);
8037 if (cfg->generic_sharing_context)
8038 context_used = mono_class_check_context_used (klass);
8043 /* FIXME: Decompose later to help abcrem */
8046 args [0] = emit_get_rgctx_klass (cfg, context_used,
8047 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8052 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8054 if (cfg->opt & MONO_OPT_SHARED) {
8055 /* Decompose now to avoid problems with references to the domainvar */
8056 MonoInst *iargs [3];
8058 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8059 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8062 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8064 /* Decompose later since it is needed by abcrem */
8065 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8066 ins->dreg = alloc_preg (cfg);
8067 ins->sreg1 = sp [0]->dreg;
8068 ins->inst_newa_class = klass;
8069 ins->type = STACK_OBJ;
8071 MONO_ADD_INS (cfg->cbb, ins);
8072 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8073 cfg->cbb->has_array_access = TRUE;
8075 /* Needed so mono_emit_load_get_addr () gets called */
8076 mono_get_got_var (cfg);
8086 * we inline/optimize the initialization sequence if possible.
8087 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8088 * for small sizes open code the memcpy
8089 * ensure the rva field is big enough
8091 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8092 MonoMethod *memcpy_method = get_memcpy_method ();
8093 MonoInst *iargs [3];
8094 int add_reg = alloc_preg (cfg);
8096 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8097 if (cfg->compile_aot) {
8098 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8100 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8102 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8103 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8112 if (sp [0]->type != STACK_OBJ)
8115 dreg = alloc_preg (cfg);
8116 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8117 ins->dreg = alloc_preg (cfg);
8118 ins->sreg1 = sp [0]->dreg;
8119 ins->type = STACK_I4;
8120 MONO_ADD_INS (cfg->cbb, ins);
8121 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8122 cfg->cbb->has_array_access = TRUE;
8130 if (sp [0]->type != STACK_OBJ)
8133 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8135 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8136 CHECK_TYPELOAD (klass);
8137 /* we need to make sure that this array is exactly the type it needs
8138 * to be for correctness. the wrappers are lax with their usage
8139 * so we need to ignore them here
8141 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8142 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8145 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8149 case CEE_LDELEM_ANY:
8160 case CEE_LDELEM_REF: {
8166 if (*ip == CEE_LDELEM_ANY) {
8168 token = read32 (ip + 1);
8169 klass = mini_get_class (method, token, generic_context);
8170 CHECK_TYPELOAD (klass);
8171 mono_class_init (klass);
8174 klass = array_access_to_klass (*ip);
8176 if (sp [0]->type != STACK_OBJ)
8179 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8181 if (sp [1]->opcode == OP_ICONST) {
8182 int array_reg = sp [0]->dreg;
8183 int index_reg = sp [1]->dreg;
8184 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8186 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8187 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8189 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8190 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8193 if (*ip == CEE_LDELEM_ANY)
8206 case CEE_STELEM_REF:
8207 case CEE_STELEM_ANY: {
8213 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8215 if (*ip == CEE_STELEM_ANY) {
8217 token = read32 (ip + 1);
8218 klass = mini_get_class (method, token, generic_context);
8219 CHECK_TYPELOAD (klass);
8220 mono_class_init (klass);
8223 klass = array_access_to_klass (*ip);
8225 if (sp [0]->type != STACK_OBJ)
8228 /* storing a NULL doesn't need any of the complex checks in stelemref */
8229 if (generic_class_is_reference_type (cfg, klass) &&
8230 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8231 MonoMethod* helper = mono_marshal_get_stelemref ();
8232 MonoInst *iargs [3];
8234 if (sp [0]->type != STACK_OBJ)
8236 if (sp [2]->type != STACK_OBJ)
8243 mono_emit_method_call (cfg, helper, iargs, NULL);
8245 if (sp [1]->opcode == OP_ICONST) {
8246 int array_reg = sp [0]->dreg;
8247 int index_reg = sp [1]->dreg;
8248 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8250 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8251 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8253 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8254 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8258 if (*ip == CEE_STELEM_ANY)
8265 case CEE_CKFINITE: {
8269 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8270 ins->sreg1 = sp [0]->dreg;
8271 ins->dreg = alloc_freg (cfg);
8272 ins->type = STACK_R8;
8273 MONO_ADD_INS (bblock, ins);
8276 mono_decompose_opcode (cfg, ins);
8281 case CEE_REFANYVAL: {
8282 MonoInst *src_var, *src;
8284 int klass_reg = alloc_preg (cfg);
8285 int dreg = alloc_preg (cfg);
8288 MONO_INST_NEW (cfg, ins, *ip);
8291 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8292 CHECK_TYPELOAD (klass);
8293 mono_class_init (klass);
8295 if (cfg->generic_sharing_context)
8296 context_used = mono_class_check_context_used (klass);
8299 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8301 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8302 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8303 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8306 MonoInst *klass_ins;
8308 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8309 klass, MONO_RGCTX_INFO_KLASS);
8312 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8313 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8315 mini_emit_class_check (cfg, klass_reg, klass);
8317 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8318 ins->type = STACK_MP;
8323 case CEE_MKREFANY: {
8324 MonoInst *loc, *addr;
8327 MONO_INST_NEW (cfg, ins, *ip);
8330 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8331 CHECK_TYPELOAD (klass);
8332 mono_class_init (klass);
8334 if (cfg->generic_sharing_context)
8335 context_used = mono_class_check_context_used (klass);
8337 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8338 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8341 MonoInst *const_ins;
8342 int type_reg = alloc_preg (cfg);
8344 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8345 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8347 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8348 } else if (cfg->compile_aot) {
8349 int const_reg = alloc_preg (cfg);
8350 int type_reg = alloc_preg (cfg);
8352 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8353 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8354 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8355 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8357 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8358 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8360 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8362 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8363 ins->type = STACK_VTYPE;
8364 ins->klass = mono_defaults.typed_reference_class;
8371 MonoClass *handle_class;
8373 CHECK_STACK_OVF (1);
8376 n = read32 (ip + 1);
8378 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8379 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8380 handle = mono_method_get_wrapper_data (method, n);
8381 handle_class = mono_method_get_wrapper_data (method, n + 1);
8382 if (handle_class == mono_defaults.typehandle_class)
8383 handle = &((MonoClass*)handle)->byval_arg;
8386 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8390 mono_class_init (handle_class);
8391 if (cfg->generic_sharing_context) {
8392 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8393 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8394 /* This case handles ldtoken
8395 of an open type, like for
8398 } else if (handle_class == mono_defaults.typehandle_class) {
8399 /* If we get a MONO_TYPE_CLASS
8400 then we need to provide the
8402 instantiation of it. */
8403 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8406 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8407 } else if (handle_class == mono_defaults.fieldhandle_class)
8408 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8409 else if (handle_class == mono_defaults.methodhandle_class)
8410 context_used = mono_method_check_context_used (handle);
8412 g_assert_not_reached ();
8415 if ((cfg->opt & MONO_OPT_SHARED) &&
8416 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8417 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8418 MonoInst *addr, *vtvar, *iargs [3];
8419 int method_context_used;
8421 if (cfg->generic_sharing_context)
8422 method_context_used = mono_method_check_context_used (method);
8424 method_context_used = 0;
8426 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8428 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8429 EMIT_NEW_ICONST (cfg, iargs [1], n);
8430 if (method_context_used) {
8431 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8432 method, MONO_RGCTX_INFO_METHOD);
8433 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8435 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8436 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8438 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8440 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8442 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8444 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8445 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8446 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8447 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8448 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8449 MonoClass *tclass = mono_class_from_mono_type (handle);
8451 mono_class_init (tclass);
8453 ins = emit_get_rgctx_klass (cfg, context_used,
8454 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8455 } else if (cfg->compile_aot) {
8456 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8458 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8460 ins->type = STACK_OBJ;
8461 ins->klass = cmethod->klass;
8464 MonoInst *addr, *vtvar;
8466 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8469 if (handle_class == mono_defaults.typehandle_class) {
8470 ins = emit_get_rgctx_klass (cfg, context_used,
8471 mono_class_from_mono_type (handle),
8472 MONO_RGCTX_INFO_TYPE);
8473 } else if (handle_class == mono_defaults.methodhandle_class) {
8474 ins = emit_get_rgctx_method (cfg, context_used,
8475 handle, MONO_RGCTX_INFO_METHOD);
8476 } else if (handle_class == mono_defaults.fieldhandle_class) {
8477 ins = emit_get_rgctx_field (cfg, context_used,
8478 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8480 g_assert_not_reached ();
8482 } else if (cfg->compile_aot) {
8483 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8485 EMIT_NEW_PCONST (cfg, ins, handle);
8487 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8488 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8489 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8499 MONO_INST_NEW (cfg, ins, OP_THROW);
8501 ins->sreg1 = sp [0]->dreg;
8503 bblock->out_of_line = TRUE;
8504 MONO_ADD_INS (bblock, ins);
8505 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8506 MONO_ADD_INS (bblock, ins);
8509 link_bblock (cfg, bblock, end_bblock);
8510 start_new_bblock = 1;
8512 case CEE_ENDFINALLY:
8513 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8514 MONO_ADD_INS (bblock, ins);
8516 start_new_bblock = 1;
8519 * Control will leave the method so empty the stack, otherwise
8520 * the next basic block will start with a nonempty stack.
8522 while (sp != stack_start) {
8530 if (*ip == CEE_LEAVE) {
8532 target = ip + 5 + (gint32)read32(ip + 1);
8535 target = ip + 2 + (signed char)(ip [1]);
8538 /* empty the stack */
8539 while (sp != stack_start) {
8544 * If this leave statement is in a catch block, check for a
8545 * pending exception, and rethrow it if necessary.
8547 for (i = 0; i < header->num_clauses; ++i) {
8548 MonoExceptionClause *clause = &header->clauses [i];
8551 * Use <= in the final comparison to handle clauses with multiple
8552 * leave statements, like in bug #78024.
8553 * The ordering of the exception clauses guarantees that we find the
8556 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8558 MonoBasicBlock *dont_throw;
8563 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8566 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8568 NEW_BBLOCK (cfg, dont_throw);
8571 * Currently, we allways rethrow the abort exception, despite the
8572 * fact that this is not correct. See thread6.cs for an example.
8573 * But propagating the abort exception is more important than
8574 * getting the sematics right.
8576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8578 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8580 MONO_START_BB (cfg, dont_throw);
8585 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8587 for (tmp = handlers; tmp; tmp = tmp->next) {
8589 link_bblock (cfg, bblock, tblock);
8590 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8591 ins->inst_target_bb = tblock;
8592 MONO_ADD_INS (bblock, ins);
8594 g_list_free (handlers);
8597 MONO_INST_NEW (cfg, ins, OP_BR);
8598 MONO_ADD_INS (bblock, ins);
8599 GET_BBLOCK (cfg, tblock, target);
8600 link_bblock (cfg, bblock, tblock);
8601 ins->inst_target_bb = tblock;
8602 start_new_bblock = 1;
8604 if (*ip == CEE_LEAVE)
8613 * Mono specific opcodes
8615 case MONO_CUSTOM_PREFIX: {
8617 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8621 case CEE_MONO_ICALL: {
8623 MonoJitICallInfo *info;
8625 token = read32 (ip + 2);
8626 func = mono_method_get_wrapper_data (method, token);
8627 info = mono_find_jit_icall_by_addr (func);
8630 CHECK_STACK (info->sig->param_count);
8631 sp -= info->sig->param_count;
8633 ins = mono_emit_jit_icall (cfg, info->func, sp);
8634 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8638 inline_costs += 10 * num_calls++;
8642 case CEE_MONO_LDPTR: {
8645 CHECK_STACK_OVF (1);
8647 token = read32 (ip + 2);
8649 ptr = mono_method_get_wrapper_data (method, token);
8650 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8651 MonoJitICallInfo *callinfo;
8652 const char *icall_name;
8654 icall_name = method->name + strlen ("__icall_wrapper_");
8655 g_assert (icall_name);
8656 callinfo = mono_find_jit_icall_by_name (icall_name);
8657 g_assert (callinfo);
8659 if (ptr == callinfo->func) {
8660 /* Will be transformed into an AOTCONST later */
8661 EMIT_NEW_PCONST (cfg, ins, ptr);
8667 /* FIXME: Generalize this */
8668 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8669 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8674 EMIT_NEW_PCONST (cfg, ins, ptr);
8677 inline_costs += 10 * num_calls++;
8678 /* Can't embed random pointers into AOT code */
8679 cfg->disable_aot = 1;
8682 case CEE_MONO_ICALL_ADDR: {
8683 MonoMethod *cmethod;
8686 CHECK_STACK_OVF (1);
8688 token = read32 (ip + 2);
8690 cmethod = mono_method_get_wrapper_data (method, token);
8692 if (cfg->compile_aot) {
8693 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8695 ptr = mono_lookup_internal_call (cmethod);
8697 EMIT_NEW_PCONST (cfg, ins, ptr);
8703 case CEE_MONO_VTADDR: {
8704 MonoInst *src_var, *src;
8710 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8711 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8716 case CEE_MONO_NEWOBJ: {
8717 MonoInst *iargs [2];
8719 CHECK_STACK_OVF (1);
8721 token = read32 (ip + 2);
8722 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8723 mono_class_init (klass);
8724 NEW_DOMAINCONST (cfg, iargs [0]);
8725 MONO_ADD_INS (cfg->cbb, iargs [0]);
8726 NEW_CLASSCONST (cfg, iargs [1], klass);
8727 MONO_ADD_INS (cfg->cbb, iargs [1]);
8728 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8730 inline_costs += 10 * num_calls++;
8733 case CEE_MONO_OBJADDR:
8736 MONO_INST_NEW (cfg, ins, OP_MOVE);
8737 ins->dreg = alloc_preg (cfg);
8738 ins->sreg1 = sp [0]->dreg;
8739 ins->type = STACK_MP;
8740 MONO_ADD_INS (cfg->cbb, ins);
8744 case CEE_MONO_LDNATIVEOBJ:
8746 * Similar to LDOBJ, but instead load the unmanaged
8747 * representation of the vtype to the stack.
8752 token = read32 (ip + 2);
8753 klass = mono_method_get_wrapper_data (method, token);
8754 g_assert (klass->valuetype);
8755 mono_class_init (klass);
8758 MonoInst *src, *dest, *temp;
8761 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8762 temp->backend.is_pinvoke = 1;
8763 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8764 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8766 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8767 dest->type = STACK_VTYPE;
8768 dest->klass = klass;
8774 case CEE_MONO_RETOBJ: {
8776 * Same as RET, but return the native representation of a vtype
8779 g_assert (cfg->ret);
8780 g_assert (mono_method_signature (method)->pinvoke);
8785 token = read32 (ip + 2);
8786 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8788 if (!cfg->vret_addr) {
8789 g_assert (cfg->ret_var_is_local);
8791 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8793 EMIT_NEW_RETLOADA (cfg, ins);
8795 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8797 if (sp != stack_start)
8800 MONO_INST_NEW (cfg, ins, OP_BR);
8801 ins->inst_target_bb = end_bblock;
8802 MONO_ADD_INS (bblock, ins);
8803 link_bblock (cfg, bblock, end_bblock);
8804 start_new_bblock = 1;
8808 case CEE_MONO_CISINST:
8809 case CEE_MONO_CCASTCLASS: {
8814 token = read32 (ip + 2);
8815 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8816 if (ip [1] == CEE_MONO_CISINST)
8817 ins = handle_cisinst (cfg, klass, sp [0]);
8819 ins = handle_ccastclass (cfg, klass, sp [0]);
8825 case CEE_MONO_SAVE_LMF:
8826 case CEE_MONO_RESTORE_LMF:
8827 #ifdef MONO_ARCH_HAVE_LMF_OPS
8828 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8829 MONO_ADD_INS (bblock, ins);
8830 cfg->need_lmf_area = TRUE;
8834 case CEE_MONO_CLASSCONST:
8835 CHECK_STACK_OVF (1);
8837 token = read32 (ip + 2);
8838 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8841 inline_costs += 10 * num_calls++;
8843 case CEE_MONO_NOT_TAKEN:
8844 bblock->out_of_line = TRUE;
8848 CHECK_STACK_OVF (1);
8850 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8851 ins->dreg = alloc_preg (cfg);
8852 ins->inst_offset = (gint32)read32 (ip + 2);
8853 ins->type = STACK_PTR;
8854 MONO_ADD_INS (bblock, ins);
8859 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8869 /* somewhat similar to LDTOKEN */
8870 MonoInst *addr, *vtvar;
8871 CHECK_STACK_OVF (1);
8872 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8874 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8875 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8877 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8878 ins->type = STACK_VTYPE;
8879 ins->klass = mono_defaults.argumenthandle_class;
8892 * The following transforms:
8893 * CEE_CEQ into OP_CEQ
8894 * CEE_CGT into OP_CGT
8895 * CEE_CGT_UN into OP_CGT_UN
8896 * CEE_CLT into OP_CLT
8897 * CEE_CLT_UN into OP_CLT_UN
8899 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8901 MONO_INST_NEW (cfg, ins, cmp->opcode);
8903 cmp->sreg1 = sp [0]->dreg;
8904 cmp->sreg2 = sp [1]->dreg;
8905 type_from_op (cmp, sp [0], sp [1]);
8907 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8908 cmp->opcode = OP_LCOMPARE;
8909 else if (sp [0]->type == STACK_R8)
8910 cmp->opcode = OP_FCOMPARE;
8912 cmp->opcode = OP_ICOMPARE;
8913 MONO_ADD_INS (bblock, cmp);
8914 ins->type = STACK_I4;
8915 ins->dreg = alloc_dreg (cfg, ins->type);
8916 type_from_op (ins, sp [0], sp [1]);
8918 if (cmp->opcode == OP_FCOMPARE) {
8920 * The backends expect the fceq opcodes to do the
8923 cmp->opcode = OP_NOP;
8924 ins->sreg1 = cmp->sreg1;
8925 ins->sreg2 = cmp->sreg2;
8927 MONO_ADD_INS (bblock, ins);
8934 MonoMethod *cil_method, *ctor_method;
8935 gboolean needs_static_rgctx_invoke;
8937 CHECK_STACK_OVF (1);
8939 n = read32 (ip + 2);
8940 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8943 mono_class_init (cmethod->klass);
8945 mono_save_token_info (cfg, image, n, cmethod);
8947 if (cfg->generic_sharing_context)
8948 context_used = mono_method_check_context_used (cmethod);
8950 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8952 cil_method = cmethod;
8953 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8954 METHOD_ACCESS_FAILURE;
8956 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8957 if (check_linkdemand (cfg, method, cmethod))
8959 CHECK_CFG_EXCEPTION;
8960 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8961 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8965 * Optimize the common case of ldftn+delegate creation
8967 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8968 /* FIXME: SGEN support */
8969 /* FIXME: handle shared static generic methods */
8970 /* FIXME: handle this in shared code */
8971 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8972 MonoInst *target_ins;
8975 if (cfg->verbose_level > 3)
8976 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8977 target_ins = sp [-1];
8979 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8987 if (needs_static_rgctx_invoke)
8988 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
8990 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
8991 } else if (needs_static_rgctx_invoke) {
8992 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8994 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8996 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9000 inline_costs += 10 * num_calls++;
9003 case CEE_LDVIRTFTN: {
9008 n = read32 (ip + 2);
9009 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9012 mono_class_init (cmethod->klass);
9014 if (cfg->generic_sharing_context)
9015 context_used = mono_method_check_context_used (cmethod);
9017 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9018 if (check_linkdemand (cfg, method, cmethod))
9020 CHECK_CFG_EXCEPTION;
9021 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9022 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9029 args [1] = emit_get_rgctx_method (cfg, context_used,
9030 cmethod, MONO_RGCTX_INFO_METHOD);
9031 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9033 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9034 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9038 inline_costs += 10 * num_calls++;
9042 CHECK_STACK_OVF (1);
9044 n = read16 (ip + 2);
9046 EMIT_NEW_ARGLOAD (cfg, ins, n);
9051 CHECK_STACK_OVF (1);
9053 n = read16 (ip + 2);
9055 NEW_ARGLOADA (cfg, ins, n);
9056 MONO_ADD_INS (cfg->cbb, ins);
9064 n = read16 (ip + 2);
9066 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9068 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9072 CHECK_STACK_OVF (1);
9074 n = read16 (ip + 2);
9076 EMIT_NEW_LOCLOAD (cfg, ins, n);
9081 unsigned char *tmp_ip;
9082 CHECK_STACK_OVF (1);
9084 n = read16 (ip + 2);
9087 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9093 EMIT_NEW_LOCLOADA (cfg, ins, n);
9102 n = read16 (ip + 2);
9104 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9106 emit_stloc_ir (cfg, sp, header, n);
9113 if (sp != stack_start)
9115 if (cfg->method != method)
9117 * Inlining this into a loop in a parent could lead to
9118 * stack overflows which is different behavior than the
9119 * non-inlined case, thus disable inlining in this case.
9121 goto inline_failure;
9123 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9124 ins->dreg = alloc_preg (cfg);
9125 ins->sreg1 = sp [0]->dreg;
9126 ins->type = STACK_PTR;
9127 MONO_ADD_INS (cfg->cbb, ins);
9129 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9130 if (header->init_locals)
9131 ins->flags |= MONO_INST_INIT;
9136 case CEE_ENDFILTER: {
9137 MonoExceptionClause *clause, *nearest;
9138 int cc, nearest_num;
9142 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9144 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9145 ins->sreg1 = (*sp)->dreg;
9146 MONO_ADD_INS (bblock, ins);
9147 start_new_bblock = 1;
9152 for (cc = 0; cc < header->num_clauses; ++cc) {
9153 clause = &header->clauses [cc];
9154 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9155 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9156 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9162 if ((ip - header->code) != nearest->handler_offset)
9167 case CEE_UNALIGNED_:
9168 ins_flag |= MONO_INST_UNALIGNED;
9169 /* FIXME: record alignment? we can assume 1 for now */
9174 ins_flag |= MONO_INST_VOLATILE;
9178 ins_flag |= MONO_INST_TAILCALL;
9179 cfg->flags |= MONO_CFG_HAS_TAIL;
9180 /* Can't inline tail calls at this time */
9181 inline_costs += 100000;
9188 token = read32 (ip + 2);
9189 klass = mini_get_class (method, token, generic_context);
9190 CHECK_TYPELOAD (klass);
9191 if (generic_class_is_reference_type (cfg, klass))
9192 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9194 mini_emit_initobj (cfg, *sp, NULL, klass);
9198 case CEE_CONSTRAINED_:
9200 token = read32 (ip + 2);
9201 constrained_call = mono_class_get_full (image, token, generic_context);
9202 CHECK_TYPELOAD (constrained_call);
9207 MonoInst *iargs [3];
9211 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9212 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9213 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9214 /* emit_memset only works when val == 0 */
9215 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9220 if (ip [1] == CEE_CPBLK) {
9221 MonoMethod *memcpy_method = get_memcpy_method ();
9222 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9224 MonoMethod *memset_method = get_memset_method ();
9225 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9235 ins_flag |= MONO_INST_NOTYPECHECK;
9237 ins_flag |= MONO_INST_NORANGECHECK;
9238 /* we ignore the no-nullcheck for now since we
9239 * really do it explicitly only when doing callvirt->call
9245 int handler_offset = -1;
9247 for (i = 0; i < header->num_clauses; ++i) {
9248 MonoExceptionClause *clause = &header->clauses [i];
9249 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9250 handler_offset = clause->handler_offset;
9255 bblock->flags |= BB_EXCEPTION_UNSAFE;
9257 g_assert (handler_offset != -1);
9259 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9260 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9261 ins->sreg1 = load->dreg;
9262 MONO_ADD_INS (bblock, ins);
9264 link_bblock (cfg, bblock, end_bblock);
9265 start_new_bblock = 1;
9273 CHECK_STACK_OVF (1);
9275 token = read32 (ip + 2);
9276 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9277 MonoType *type = mono_type_create_from_typespec (image, token);
9278 token = mono_type_size (type, &ialign);
9280 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9281 CHECK_TYPELOAD (klass);
9282 mono_class_init (klass);
9283 token = mono_class_value_size (klass, &align);
9285 EMIT_NEW_ICONST (cfg, ins, token);
9290 case CEE_REFANYTYPE: {
9291 MonoInst *src_var, *src;
9297 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9299 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9300 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9301 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9311 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9316 g_error ("opcode 0x%02x not handled", *ip);
9319 if (start_new_bblock != 1)
9322 bblock->cil_length = ip - bblock->cil_code;
9323 bblock->next_bb = end_bblock;
9325 if (cfg->method == method && cfg->domainvar) {
9327 MonoInst *get_domain;
9329 cfg->cbb = init_localsbb;
9331 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9332 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9335 get_domain->dreg = alloc_preg (cfg);
9336 MONO_ADD_INS (cfg->cbb, get_domain);
9338 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9339 MONO_ADD_INS (cfg->cbb, store);
9342 if (cfg->method == method && cfg->got_var)
9343 mono_emit_load_got_addr (cfg);
9345 if (header->init_locals) {
9348 cfg->cbb = init_localsbb;
9350 for (i = 0; i < header->num_locals; ++i) {
9351 MonoType *ptype = header->locals [i];
9352 int t = ptype->type;
9353 dreg = cfg->locals [i]->dreg;
9355 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9356 t = ptype->data.klass->enum_basetype->type;
9358 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9359 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9360 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9361 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9362 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9363 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9364 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9365 ins->type = STACK_R8;
9366 ins->inst_p0 = (void*)&r8_0;
9367 ins->dreg = alloc_dreg (cfg, STACK_R8);
9368 MONO_ADD_INS (init_localsbb, ins);
9369 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9370 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9371 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9372 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9374 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9381 if (cfg->method == method) {
9383 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9384 bb->region = mono_find_block_region (cfg, bb->real_offset);
9386 mono_create_spvar_for_region (cfg, bb->region);
9387 if (cfg->verbose_level > 2)
9388 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9392 g_slist_free (class_inits);
9393 dont_inline = g_list_remove (dont_inline, method);
9395 if (inline_costs < 0) {
9398 /* Method is too large */
9399 mname = mono_method_full_name (method, TRUE);
9400 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9401 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9406 if ((cfg->verbose_level > 2) && (cfg->method == method))
9407 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9409 return inline_costs;
9412 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9413 g_slist_free (class_inits);
9414 dont_inline = g_list_remove (dont_inline, method);
9418 g_slist_free (class_inits);
9419 dont_inline = g_list_remove (dont_inline, method);
9423 g_slist_free (class_inits);
9424 dont_inline = g_list_remove (dont_inline, method);
9425 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9429 g_slist_free (class_inits);
9430 dont_inline = g_list_remove (dont_inline, method);
9431 set_exception_type_from_invalid_il (cfg, method, ip);
9436 store_membase_reg_to_store_membase_imm (int opcode)
9439 case OP_STORE_MEMBASE_REG:
9440 return OP_STORE_MEMBASE_IMM;
9441 case OP_STOREI1_MEMBASE_REG:
9442 return OP_STOREI1_MEMBASE_IMM;
9443 case OP_STOREI2_MEMBASE_REG:
9444 return OP_STOREI2_MEMBASE_IMM;
9445 case OP_STOREI4_MEMBASE_REG:
9446 return OP_STOREI4_MEMBASE_IMM;
9447 case OP_STOREI8_MEMBASE_REG:
9448 return OP_STOREI8_MEMBASE_IMM;
9450 g_assert_not_reached ();
9456 #endif /* DISABLE_JIT */
9459 mono_op_to_op_imm (int opcode)
9469 return OP_IDIV_UN_IMM;
9473 return OP_IREM_UN_IMM;
9487 return OP_ISHR_UN_IMM;
9504 return OP_LSHR_UN_IMM;
9507 return OP_COMPARE_IMM;
9509 return OP_ICOMPARE_IMM;
9511 return OP_LCOMPARE_IMM;
9513 case OP_STORE_MEMBASE_REG:
9514 return OP_STORE_MEMBASE_IMM;
9515 case OP_STOREI1_MEMBASE_REG:
9516 return OP_STOREI1_MEMBASE_IMM;
9517 case OP_STOREI2_MEMBASE_REG:
9518 return OP_STOREI2_MEMBASE_IMM;
9519 case OP_STOREI4_MEMBASE_REG:
9520 return OP_STOREI4_MEMBASE_IMM;
9522 #if defined(__i386__) || defined (__x86_64__)
9524 return OP_X86_PUSH_IMM;
9525 case OP_X86_COMPARE_MEMBASE_REG:
9526 return OP_X86_COMPARE_MEMBASE_IMM;
9528 #if defined(__x86_64__)
9529 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9530 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9532 case OP_VOIDCALL_REG:
9541 return OP_LOCALLOC_IMM;
9548 ldind_to_load_membase (int opcode)
9552 return OP_LOADI1_MEMBASE;
9554 return OP_LOADU1_MEMBASE;
9556 return OP_LOADI2_MEMBASE;
9558 return OP_LOADU2_MEMBASE;
9560 return OP_LOADI4_MEMBASE;
9562 return OP_LOADU4_MEMBASE;
9564 return OP_LOAD_MEMBASE;
9566 return OP_LOAD_MEMBASE;
9568 return OP_LOADI8_MEMBASE;
9570 return OP_LOADR4_MEMBASE;
9572 return OP_LOADR8_MEMBASE;
9574 g_assert_not_reached ();
9581 stind_to_store_membase (int opcode)
9585 return OP_STOREI1_MEMBASE_REG;
9587 return OP_STOREI2_MEMBASE_REG;
9589 return OP_STOREI4_MEMBASE_REG;
9592 return OP_STORE_MEMBASE_REG;
9594 return OP_STOREI8_MEMBASE_REG;
9596 return OP_STORER4_MEMBASE_REG;
9598 return OP_STORER8_MEMBASE_REG;
9600 g_assert_not_reached ();
9607 mono_load_membase_to_load_mem (int opcode)
9609 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9610 #if defined(__i386__) || defined(__x86_64__)
9612 case OP_LOAD_MEMBASE:
9614 case OP_LOADU1_MEMBASE:
9615 return OP_LOADU1_MEM;
9616 case OP_LOADU2_MEMBASE:
9617 return OP_LOADU2_MEM;
9618 case OP_LOADI4_MEMBASE:
9619 return OP_LOADI4_MEM;
9620 case OP_LOADU4_MEMBASE:
9621 return OP_LOADU4_MEM;
9622 #if SIZEOF_VOID_P == 8
9623 case OP_LOADI8_MEMBASE:
9624 return OP_LOADI8_MEM;
9633 op_to_op_dest_membase (int store_opcode, int opcode)
9635 #if defined(__i386__)
9636 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9641 return OP_X86_ADD_MEMBASE_REG;
9643 return OP_X86_SUB_MEMBASE_REG;
9645 return OP_X86_AND_MEMBASE_REG;
9647 return OP_X86_OR_MEMBASE_REG;
9649 return OP_X86_XOR_MEMBASE_REG;
9652 return OP_X86_ADD_MEMBASE_IMM;
9655 return OP_X86_SUB_MEMBASE_IMM;
9658 return OP_X86_AND_MEMBASE_IMM;
9661 return OP_X86_OR_MEMBASE_IMM;
9664 return OP_X86_XOR_MEMBASE_IMM;
9670 #if defined(__x86_64__)
9671 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9676 return OP_X86_ADD_MEMBASE_REG;
9678 return OP_X86_SUB_MEMBASE_REG;
9680 return OP_X86_AND_MEMBASE_REG;
9682 return OP_X86_OR_MEMBASE_REG;
9684 return OP_X86_XOR_MEMBASE_REG;
9686 return OP_X86_ADD_MEMBASE_IMM;
9688 return OP_X86_SUB_MEMBASE_IMM;
9690 return OP_X86_AND_MEMBASE_IMM;
9692 return OP_X86_OR_MEMBASE_IMM;
9694 return OP_X86_XOR_MEMBASE_IMM;
9696 return OP_AMD64_ADD_MEMBASE_REG;
9698 return OP_AMD64_SUB_MEMBASE_REG;
9700 return OP_AMD64_AND_MEMBASE_REG;
9702 return OP_AMD64_OR_MEMBASE_REG;
9704 return OP_AMD64_XOR_MEMBASE_REG;
9707 return OP_AMD64_ADD_MEMBASE_IMM;
9710 return OP_AMD64_SUB_MEMBASE_IMM;
9713 return OP_AMD64_AND_MEMBASE_IMM;
9716 return OP_AMD64_OR_MEMBASE_IMM;
9719 return OP_AMD64_XOR_MEMBASE_IMM;
9729 op_to_op_store_membase (int store_opcode, int opcode)
9731 #if defined(__i386__) || defined(__x86_64__)
9734 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9735 return OP_X86_SETEQ_MEMBASE;
9737 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9738 return OP_X86_SETNE_MEMBASE;
9746 op_to_op_src1_membase (int load_opcode, int opcode)
9749 /* FIXME: This has sign extension issues */
9751 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9752 return OP_X86_COMPARE_MEMBASE8_IMM;
9755 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9760 return OP_X86_PUSH_MEMBASE;
9761 case OP_COMPARE_IMM:
9762 case OP_ICOMPARE_IMM:
9763 return OP_X86_COMPARE_MEMBASE_IMM;
9766 return OP_X86_COMPARE_MEMBASE_REG;
9771 /* FIXME: This has sign extension issues */
9773 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9774 return OP_X86_COMPARE_MEMBASE8_IMM;
9779 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9780 return OP_X86_PUSH_MEMBASE;
9782 /* FIXME: This only works for 32 bit immediates
9783 case OP_COMPARE_IMM:
9784 case OP_LCOMPARE_IMM:
9785 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9786 return OP_AMD64_COMPARE_MEMBASE_IMM;
9788 case OP_ICOMPARE_IMM:
9789 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9790 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9794 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9795 return OP_AMD64_COMPARE_MEMBASE_REG;
9798 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9799 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9808 op_to_op_src2_membase (int load_opcode, int opcode)
9811 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9817 return OP_X86_COMPARE_REG_MEMBASE;
9819 return OP_X86_ADD_REG_MEMBASE;
9821 return OP_X86_SUB_REG_MEMBASE;
9823 return OP_X86_AND_REG_MEMBASE;
9825 return OP_X86_OR_REG_MEMBASE;
9827 return OP_X86_XOR_REG_MEMBASE;
9834 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9835 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9839 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9840 return OP_AMD64_COMPARE_REG_MEMBASE;
9843 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9844 return OP_X86_ADD_REG_MEMBASE;
9846 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9847 return OP_X86_SUB_REG_MEMBASE;
9849 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9850 return OP_X86_AND_REG_MEMBASE;
9852 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9853 return OP_X86_OR_REG_MEMBASE;
9855 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9856 return OP_X86_XOR_REG_MEMBASE;
9858 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9859 return OP_AMD64_ADD_REG_MEMBASE;
9861 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9862 return OP_AMD64_SUB_REG_MEMBASE;
9864 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9865 return OP_AMD64_AND_REG_MEMBASE;
9867 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9868 return OP_AMD64_OR_REG_MEMBASE;
9870 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9871 return OP_AMD64_XOR_REG_MEMBASE;
9879 mono_op_to_op_imm_noemul (int opcode)
9882 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9887 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9895 return mono_op_to_op_imm (opcode);
9902 * mono_handle_global_vregs:
9904 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9908 mono_handle_global_vregs (MonoCompile *cfg)
9914 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9916 #ifdef MONO_ARCH_SIMD_INTRINSICS
9917 if (cfg->uses_simd_intrinsics)
9918 mono_simd_simplify_indirection (cfg);
9921 /* Find local vregs used in more than one bb */
9922 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9923 MonoInst *ins = bb->code;
9924 int block_num = bb->block_num;
9926 if (cfg->verbose_level > 2)
9927 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9930 for (; ins; ins = ins->next) {
9931 const char *spec = INS_INFO (ins->opcode);
9932 int regtype, regindex;
9935 if (G_UNLIKELY (cfg->verbose_level > 2))
9936 mono_print_ins (ins);
9938 g_assert (ins->opcode >= MONO_CEE_LAST);
9940 for (regindex = 0; regindex < 3; regindex ++) {
9943 if (regindex == 0) {
9944 regtype = spec [MONO_INST_DEST];
9948 } else if (regindex == 1) {
9949 regtype = spec [MONO_INST_SRC1];
9954 regtype = spec [MONO_INST_SRC2];
9960 #if SIZEOF_VOID_P == 4
9961 if (regtype == 'l') {
9963 * Since some instructions reference the original long vreg,
9964 * and some reference the two component vregs, it is quite hard
9965 * to determine when it needs to be global. So be conservative.
9967 if (!get_vreg_to_inst (cfg, vreg)) {
9968 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9970 if (cfg->verbose_level > 2)
9971 printf ("LONG VREG R%d made global.\n", vreg);
9975 * Make the component vregs volatile since the optimizations can
9976 * get confused otherwise.
9978 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9979 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9983 g_assert (vreg != -1);
9985 prev_bb = vreg_to_bb [vreg];
9987 /* 0 is a valid block num */
9988 vreg_to_bb [vreg] = block_num + 1;
9989 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9990 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9993 if (!get_vreg_to_inst (cfg, vreg)) {
9994 if (G_UNLIKELY (cfg->verbose_level > 2))
9995 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9999 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10002 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10005 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10008 g_assert_not_reached ();
10012 /* Flag as having been used in more than one bb */
10013 vreg_to_bb [vreg] = -1;
10019 /* If a variable is used in only one bblock, convert it into a local vreg */
10020 for (i = 0; i < cfg->num_varinfo; i++) {
10021 MonoInst *var = cfg->varinfo [i];
10022 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10024 switch (var->type) {
10030 #if SIZEOF_VOID_P == 8
10033 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10034 /* Enabling this screws up the fp stack on x86 */
10037 /* Arguments are implicitly global */
10038 /* Putting R4 vars into registers doesn't work currently */
10039 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10041 * Make that the variable's liveness interval doesn't contain a call, since
10042 * that would cause the lvreg to be spilled, making the whole optimization
10045 /* This is too slow for JIT compilation */
10047 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10049 int def_index, call_index, ins_index;
10050 gboolean spilled = FALSE;
10055 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10056 const char *spec = INS_INFO (ins->opcode);
10058 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10059 def_index = ins_index;
10061 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10062 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10063 if (call_index > def_index) {
10069 if (MONO_IS_CALL (ins))
10070 call_index = ins_index;
10080 if (G_UNLIKELY (cfg->verbose_level > 2))
10081 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10082 var->flags |= MONO_INST_IS_DEAD;
10083 cfg->vreg_to_inst [var->dreg] = NULL;
10090 * Compress the varinfo and vars tables so the liveness computation is faster and
10091 * takes up less space.
10094 for (i = 0; i < cfg->num_varinfo; ++i) {
10095 MonoInst *var = cfg->varinfo [i];
10096 if (pos < i && cfg->locals_start == i)
10097 cfg->locals_start = pos;
10098 if (!(var->flags & MONO_INST_IS_DEAD)) {
10100 cfg->varinfo [pos] = cfg->varinfo [i];
10101 cfg->varinfo [pos]->inst_c0 = pos;
10102 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10103 cfg->vars [pos].idx = pos;
10104 #if SIZEOF_VOID_P == 4
10105 if (cfg->varinfo [pos]->type == STACK_I8) {
10106 /* Modify the two component vars too */
10109 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10110 var1->inst_c0 = pos;
10111 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10112 var1->inst_c0 = pos;
10119 cfg->num_varinfo = pos;
10120 if (cfg->locals_start > cfg->num_varinfo)
10121 cfg->locals_start = cfg->num_varinfo;
10125 * mono_spill_global_vars:
10127 * Generate spill code for variables which are not allocated to registers,
10128 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10129 * code is generated which could be optimized by the local optimization passes.
10132 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10134 MonoBasicBlock *bb;
10136 int orig_next_vreg;
10137 guint32 *vreg_to_lvreg;
10139 guint32 i, lvregs_len;
10140 gboolean dest_has_lvreg = FALSE;
10141 guint32 stacktypes [128];
10143 *need_local_opts = FALSE;
10145 memset (spec2, 0, sizeof (spec2));
10147 /* FIXME: Move this function to mini.c */
10148 stacktypes ['i'] = STACK_PTR;
10149 stacktypes ['l'] = STACK_I8;
10150 stacktypes ['f'] = STACK_R8;
10151 #ifdef MONO_ARCH_SIMD_INTRINSICS
10152 stacktypes ['x'] = STACK_VTYPE;
10155 #if SIZEOF_VOID_P == 4
10156 /* Create MonoInsts for longs */
10157 for (i = 0; i < cfg->num_varinfo; i++) {
10158 MonoInst *ins = cfg->varinfo [i];
10160 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10161 switch (ins->type) {
10162 #ifdef MONO_ARCH_SOFT_FLOAT
10168 g_assert (ins->opcode == OP_REGOFFSET);
10170 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10172 tree->opcode = OP_REGOFFSET;
10173 tree->inst_basereg = ins->inst_basereg;
10174 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10176 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10178 tree->opcode = OP_REGOFFSET;
10179 tree->inst_basereg = ins->inst_basereg;
10180 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10190 /* FIXME: widening and truncation */
10193 * As an optimization, when a variable allocated to the stack is first loaded into
10194 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10195 * the variable again.
10197 orig_next_vreg = cfg->next_vreg;
10198 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10199 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10202 /* Add spill loads/stores */
10203 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10206 if (cfg->verbose_level > 2)
10207 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10209 /* Clear vreg_to_lvreg array */
10210 for (i = 0; i < lvregs_len; i++)
10211 vreg_to_lvreg [lvregs [i]] = 0;
10215 MONO_BB_FOR_EACH_INS (bb, ins) {
10216 const char *spec = INS_INFO (ins->opcode);
10217 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10218 gboolean store, no_lvreg;
10220 if (G_UNLIKELY (cfg->verbose_level > 2))
10221 mono_print_ins (ins);
10223 if (ins->opcode == OP_NOP)
10227 * We handle LDADDR here as well, since it can only be decomposed
10228 * when variable addresses are known.
10230 if (ins->opcode == OP_LDADDR) {
10231 MonoInst *var = ins->inst_p0;
10233 if (var->opcode == OP_VTARG_ADDR) {
10234 /* Happens on SPARC/S390 where vtypes are passed by reference */
10235 MonoInst *vtaddr = var->inst_left;
10236 if (vtaddr->opcode == OP_REGVAR) {
10237 ins->opcode = OP_MOVE;
10238 ins->sreg1 = vtaddr->dreg;
10240 else if (var->inst_left->opcode == OP_REGOFFSET) {
10241 ins->opcode = OP_LOAD_MEMBASE;
10242 ins->inst_basereg = vtaddr->inst_basereg;
10243 ins->inst_offset = vtaddr->inst_offset;
10247 g_assert (var->opcode == OP_REGOFFSET);
10249 ins->opcode = OP_ADD_IMM;
10250 ins->sreg1 = var->inst_basereg;
10251 ins->inst_imm = var->inst_offset;
10254 *need_local_opts = TRUE;
10255 spec = INS_INFO (ins->opcode);
10258 if (ins->opcode < MONO_CEE_LAST) {
10259 mono_print_ins (ins);
10260 g_assert_not_reached ();
10264 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10268 if (MONO_IS_STORE_MEMBASE (ins)) {
10269 tmp_reg = ins->dreg;
10270 ins->dreg = ins->sreg2;
10271 ins->sreg2 = tmp_reg;
10274 spec2 [MONO_INST_DEST] = ' ';
10275 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10276 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10278 } else if (MONO_IS_STORE_MEMINDEX (ins))
10279 g_assert_not_reached ();
10284 if (G_UNLIKELY (cfg->verbose_level > 2))
10285 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10290 regtype = spec [MONO_INST_DEST];
10291 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10294 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10295 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10296 MonoInst *store_ins;
10299 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10301 if (var->opcode == OP_REGVAR) {
10302 ins->dreg = var->dreg;
10303 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10305 * Instead of emitting a load+store, use a _membase opcode.
10307 g_assert (var->opcode == OP_REGOFFSET);
10308 if (ins->opcode == OP_MOVE) {
10311 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10312 ins->inst_basereg = var->inst_basereg;
10313 ins->inst_offset = var->inst_offset;
10316 spec = INS_INFO (ins->opcode);
10320 g_assert (var->opcode == OP_REGOFFSET);
10322 prev_dreg = ins->dreg;
10324 /* Invalidate any previous lvreg for this vreg */
10325 vreg_to_lvreg [ins->dreg] = 0;
10329 #ifdef MONO_ARCH_SOFT_FLOAT
10330 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10332 store_opcode = OP_STOREI8_MEMBASE_REG;
10336 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10338 if (regtype == 'l') {
10339 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10340 mono_bblock_insert_after_ins (bb, ins, store_ins);
10341 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10342 mono_bblock_insert_after_ins (bb, ins, store_ins);
10345 g_assert (store_opcode != OP_STOREV_MEMBASE);
10347 /* Try to fuse the store into the instruction itself */
10348 /* FIXME: Add more instructions */
10349 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10350 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10351 ins->inst_imm = ins->inst_c0;
10352 ins->inst_destbasereg = var->inst_basereg;
10353 ins->inst_offset = var->inst_offset;
10354 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10355 ins->opcode = store_opcode;
10356 ins->inst_destbasereg = var->inst_basereg;
10357 ins->inst_offset = var->inst_offset;
10361 tmp_reg = ins->dreg;
10362 ins->dreg = ins->sreg2;
10363 ins->sreg2 = tmp_reg;
10366 spec2 [MONO_INST_DEST] = ' ';
10367 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10368 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10370 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10371 // FIXME: The backends expect the base reg to be in inst_basereg
10372 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10374 ins->inst_basereg = var->inst_basereg;
10375 ins->inst_offset = var->inst_offset;
10376 spec = INS_INFO (ins->opcode);
10378 /* printf ("INS: "); mono_print_ins (ins); */
10379 /* Create a store instruction */
10380 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10382 /* Insert it after the instruction */
10383 mono_bblock_insert_after_ins (bb, ins, store_ins);
10386 * We can't assign ins->dreg to var->dreg here, since the
10387 * sregs could use it. So set a flag, and do it after
10390 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10391 dest_has_lvreg = TRUE;
10400 for (srcindex = 0; srcindex < 2; ++srcindex) {
10401 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10402 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10404 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10405 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10406 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10407 MonoInst *load_ins;
10408 guint32 load_opcode;
10410 if (var->opcode == OP_REGVAR) {
10412 ins->sreg1 = var->dreg;
10414 ins->sreg2 = var->dreg;
10418 g_assert (var->opcode == OP_REGOFFSET);
10420 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10422 g_assert (load_opcode != OP_LOADV_MEMBASE);
10424 if (vreg_to_lvreg [sreg]) {
10425 /* The variable is already loaded to an lvreg */
10426 if (G_UNLIKELY (cfg->verbose_level > 2))
10427 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10429 ins->sreg1 = vreg_to_lvreg [sreg];
10431 ins->sreg2 = vreg_to_lvreg [sreg];
10435 /* Try to fuse the load into the instruction */
10436 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10437 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10438 ins->inst_basereg = var->inst_basereg;
10439 ins->inst_offset = var->inst_offset;
10440 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10441 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10442 ins->sreg2 = var->inst_basereg;
10443 ins->inst_offset = var->inst_offset;
10445 if (MONO_IS_REAL_MOVE (ins)) {
10446 ins->opcode = OP_NOP;
10449 //printf ("%d ", srcindex); mono_print_ins (ins);
10451 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10453 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10454 if (var->dreg == prev_dreg) {
10456 * sreg refers to the value loaded by the load
10457 * emitted below, but we need to use ins->dreg
10458 * since it refers to the store emitted earlier.
10462 vreg_to_lvreg [var->dreg] = sreg;
10463 g_assert (lvregs_len < 1024);
10464 lvregs [lvregs_len ++] = var->dreg;
10473 if (regtype == 'l') {
10474 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10475 mono_bblock_insert_before_ins (bb, ins, load_ins);
10476 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10477 mono_bblock_insert_before_ins (bb, ins, load_ins);
10480 #if SIZEOF_VOID_P == 4
10481 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10483 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10484 mono_bblock_insert_before_ins (bb, ins, load_ins);
10490 if (dest_has_lvreg) {
10491 vreg_to_lvreg [prev_dreg] = ins->dreg;
10492 g_assert (lvregs_len < 1024);
10493 lvregs [lvregs_len ++] = prev_dreg;
10494 dest_has_lvreg = FALSE;
10498 tmp_reg = ins->dreg;
10499 ins->dreg = ins->sreg2;
10500 ins->sreg2 = tmp_reg;
10503 if (MONO_IS_CALL (ins)) {
10504 /* Clear vreg_to_lvreg array */
10505 for (i = 0; i < lvregs_len; i++)
10506 vreg_to_lvreg [lvregs [i]] = 0;
10510 if (cfg->verbose_level > 2)
10511 mono_print_ins_index (1, ins);
10518 * - use 'iadd' instead of 'int_add'
10519 * - handling ovf opcodes: decompose in method_to_ir.
10520 * - unify iregs/fregs
10521 * -> partly done, the missing parts are:
10522 * - a more complete unification would involve unifying the hregs as well, so
10523 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10524 * would no longer map to the machine hregs, so the code generators would need to
10525 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10526 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10527 * fp/non-fp branches speeds it up by about 15%.
10528 * - use sext/zext opcodes instead of shifts
10530 * - get rid of TEMPLOADs if possible and use vregs instead
10531 * - clean up usage of OP_P/OP_ opcodes
10532 * - cleanup usage of DUMMY_USE
10533 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10535 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10536 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10537 * - make sure handle_stack_args () is called before the branch is emitted
10538 * - when the new IR is done, get rid of all unused stuff
10539 * - COMPARE/BEQ as separate instructions or unify them ?
10540 * - keeping them separate allows specialized compare instructions like
10541 * compare_imm, compare_membase
10542 * - most back ends unify fp compare+branch, fp compare+ceq
10543 * - integrate mono_save_args into inline_method
10544 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10545 * - handle long shift opts on 32 bit platforms somehow: they require
10546 * 3 sregs (2 for arg1 and 1 for arg2)
10547 * - make byref a 'normal' type.
10548 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10549 * variable if needed.
10550 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10551 * like inline_method.
10552 * - remove inlining restrictions
10553 * - fix LNEG and enable cfold of INEG
10554 * - generalize x86 optimizations like ldelema as a peephole optimization
10555 * - add store_mem_imm for amd64
10556 * - optimize the loading of the interruption flag in the managed->native wrappers
10557 * - avoid special handling of OP_NOP in passes
10558 * - move code inserting instructions into one function/macro.
10559 * - try a coalescing phase after liveness analysis
10560 * - add float -> vreg conversion + local optimizations on !x86
10561 * - figure out how to handle decomposed branches during optimizations, ie.
10562 * compare+branch, op_jump_table+op_br etc.
10563 * - promote RuntimeXHandles to vregs
10564 * - vtype cleanups:
10565 * - add a NEW_VARLOADA_VREG macro
10566 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10567 * accessing vtype fields.
10568 * - get rid of I8CONST on 64 bit platforms
10569 * - dealing with the increase in code size due to branches created during opcode
10571 * - use extended basic blocks
10572 * - all parts of the JIT
10573 * - handle_global_vregs () && local regalloc
10574 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10575 * - sources of increase in code size:
10578 * - isinst and castclass
10579 * - lvregs not allocated to global registers even if used multiple times
10580 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10582 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10583 * - add all micro optimizations from the old JIT
10584 * - put tree optimizations into the deadce pass
10585 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10586 * specific function.
10587 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10588 * fcompare + branchCC.
10589 * - create a helper function for allocating a stack slot, taking into account
10590 * MONO_CFG_HAS_SPILLUP.
10591 * - merge new GC changes in mini.c.
10593 * - merge the ia64 switch changes.
10594 * - merge the mips conditional changes.
10595 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10596 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10597 * - optimize mono_regstate2_alloc_int/float.
10598 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10599 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10600 * parts of the tree could be separated by other instructions, killing the tree
10601 * arguments, or stores killing loads etc. Also, should we fold loads into other
10602 * instructions if the result of the load is used multiple times ?
10603 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10604 * - LAST MERGE: 108395.
10605 * - when returning vtypes in registers, generate IR and append it to the end of the
10606 * last bb instead of doing it in the epilog.
10607 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10615 - When to decompose opcodes:
10616 - earlier: this makes some optimizations hard to implement, since the low level IR
10617 no longer contains the neccessary information. But it is easier to do.
10618 - later: harder to implement, enables more optimizations.
10619 - Branches inside bblocks:
10620 - created when decomposing complex opcodes.
10621 - branches to another bblock: harmless, but not tracked by the branch
10622 optimizations, so need to branch to a label at the start of the bblock.
10623 - branches to inside the same bblock: very problematic, trips up the local
10624 reg allocator. Can be fixed by spitting the current bblock, but that is a
10625 complex operation, since some local vregs can become global vregs etc.
10626 - Local/global vregs:
10627 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10628 local register allocator.
10629 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10630 structure, created by mono_create_var (). Assigned to hregs or the stack by
10631 the global register allocator.
10632 - When to do optimizations like alu->alu_imm:
10633 - earlier -> saves work later on since the IR will be smaller/simpler
10634 - later -> can work on more instructions
10635 - Handling of valuetypes:
10636 - When a vtype is pushed on the stack, a new temporary is created, an
10637 instruction computing its address (LDADDR) is emitted and pushed on
10638 the stack. Need to optimize cases when the vtype is used immediately as in
10639 argument passing, stloc etc.
10640 - Instead of the to_end stuff in the old JIT, simply call the function handling
10641 the values on the stack before emitting the last instruction of the bb.
10644 #endif /* DISABLE_JIT */