2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
57 #define BRANCH_COST 100
58 #define INLINE_LENGTH_LIMIT 20
59 #define INLINE_FAILURE do {\
60 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
63 #define CHECK_CFG_EXCEPTION do {\
64 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
67 #define METHOD_ACCESS_FAILURE do { \
68 char *method_fname = mono_method_full_name (method, TRUE); \
69 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
70 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
71 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
72 g_free (method_fname); \
73 g_free (cil_method_fname); \
74 goto exception_exit; \
76 #define FIELD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *field_fname = mono_field_full_name (field); \
79 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
80 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (field_fname); \
83 goto exception_exit; \
85 #define GENERIC_SHARING_FAILURE(opcode) do { \
86 if (cfg->generic_sharing_context) { \
87 if (cfg->verbose_level > 2) \
88 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
89 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
90 goto exception_exit; \
94 /* Determine whenever 'ins' represents a load of the 'this' argument */
95 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
97 static int ldind_to_load_membase (int opcode);
98 static int stind_to_store_membase (int opcode);
100 int mono_op_to_op_imm (int opcode);
101 int mono_op_to_op_imm_noemul (int opcode);
103 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
104 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
105 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
107 /* helper methods signature */
108 extern MonoMethodSignature *helper_sig_class_init_trampoline;
109 extern MonoMethodSignature *helper_sig_domain_get;
110 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
111 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
112 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
115 * Instruction metadata
120 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
126 #if SIZEOF_REGISTER == 8
131 /* keep in sync with the enum in mini.h */
134 #include "mini-ops.h"
138 extern GHashTable *jit_icall_name_hash;
140 #define MONO_INIT_VARINFO(vi,id) do { \
141 (vi)->range.first_use.pos.bid = 0xffff; \
147 mono_alloc_ireg (MonoCompile *cfg)
149 return alloc_ireg (cfg);
153 mono_alloc_freg (MonoCompile *cfg)
155 return alloc_freg (cfg);
159 mono_alloc_preg (MonoCompile *cfg)
161 return alloc_preg (cfg);
165 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
167 return alloc_dreg (cfg, stack_type);
171 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 switch (type->type) {
180 case MONO_TYPE_BOOLEAN:
192 case MONO_TYPE_FNPTR:
194 case MONO_TYPE_CLASS:
195 case MONO_TYPE_STRING:
196 case MONO_TYPE_OBJECT:
197 case MONO_TYPE_SZARRAY:
198 case MONO_TYPE_ARRAY:
202 #if SIZEOF_REGISTER == 8
211 case MONO_TYPE_VALUETYPE:
212 if (type->data.klass->enumtype) {
213 type = type->data.klass->enum_basetype;
216 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
219 case MONO_TYPE_TYPEDBYREF:
221 case MONO_TYPE_GENERICINST:
222 type = &type->data.generic_class->container_class->byval_arg;
226 g_assert (cfg->generic_sharing_context);
229 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 mono_print_bb (MonoBasicBlock *bb, const char *msg)
240 printf ("\n%s %d: [IN: ", msg, bb->block_num);
241 for (i = 0; i < bb->in_count; ++i)
242 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
244 for (i = 0; i < bb->out_count; ++i)
245 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
247 for (tree = bb->code; tree; tree = tree->next)
248 mono_print_ins_index (-1, tree);
252 * Can't put this at the beginning, since other files reference stuff from this
257 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
259 #define GET_BBLOCK(cfg,tblock,ip) do { \
260 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
262 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
263 NEW_BBLOCK (cfg, (tblock)); \
264 (tblock)->cil_code = (ip); \
265 ADD_BBLOCK (cfg, (tblock)); \
269 #if defined(__i386__) || defined(__x86_64__)
270 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
271 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
272 (dest)->dreg = alloc_preg ((cfg)); \
273 (dest)->sreg1 = (sr1); \
274 (dest)->sreg2 = (sr2); \
275 (dest)->inst_imm = (imm); \
276 (dest)->backend.shift_amount = (shift); \
277 MONO_ADD_INS ((cfg)->cbb, (dest)); \
281 #if SIZEOF_REGISTER == 8
282 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
283 /* FIXME: Need to add many more cases */ \
284 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
286 int dr = alloc_preg (cfg); \
287 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
288 (ins)->sreg2 = widen->dreg; \
292 #define ADD_WIDEN_OP(ins, arg1, arg2)
295 #define ADD_BINOP(op) do { \
296 MONO_INST_NEW (cfg, ins, (op)); \
298 ins->sreg1 = sp [0]->dreg; \
299 ins->sreg2 = sp [1]->dreg; \
300 type_from_op (ins, sp [0], sp [1]); \
302 /* Have to insert a widening op */ \
303 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
304 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
305 MONO_ADD_INS ((cfg)->cbb, (ins)); \
307 mono_decompose_opcode ((cfg), (ins)); \
310 #define ADD_UNOP(op) do { \
311 MONO_INST_NEW (cfg, ins, (op)); \
313 ins->sreg1 = sp [0]->dreg; \
314 type_from_op (ins, sp [0], NULL); \
316 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
317 MONO_ADD_INS ((cfg)->cbb, (ins)); \
319 mono_decompose_opcode (cfg, ins); \
322 #define ADD_BINCOND(next_block) do { \
325 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
326 cmp->sreg1 = sp [0]->dreg; \
327 cmp->sreg2 = sp [1]->dreg; \
328 type_from_op (cmp, sp [0], sp [1]); \
330 type_from_op (ins, sp [0], sp [1]); \
331 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
332 GET_BBLOCK (cfg, tblock, target); \
333 link_bblock (cfg, bblock, tblock); \
334 ins->inst_true_bb = tblock; \
335 if ((next_block)) { \
336 link_bblock (cfg, bblock, (next_block)); \
337 ins->inst_false_bb = (next_block); \
338 start_new_bblock = 1; \
340 GET_BBLOCK (cfg, tblock, ip); \
341 link_bblock (cfg, bblock, tblock); \
342 ins->inst_false_bb = tblock; \
343 start_new_bblock = 2; \
345 if (sp != stack_start) { \
346 handle_stack_args (cfg, stack_start, sp - stack_start); \
347 CHECK_UNVERIFIABLE (cfg); \
349 MONO_ADD_INS (bblock, cmp); \
350 MONO_ADD_INS (bblock, ins); \
354 * link_bblock: Links two basic blocks
356 * links two basic blocks in the control flow graph, the 'from'
357 * argument is the starting block and the 'to' argument is the block
358 * the control flow ends to after 'from'.
361 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
363 MonoBasicBlock **newa;
367 if (from->cil_code) {
369 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
371 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
374 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
376 printf ("edge from entry to exit\n");
381 for (i = 0; i < from->out_count; ++i) {
382 if (to == from->out_bb [i]) {
388 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
389 for (i = 0; i < from->out_count; ++i) {
390 newa [i] = from->out_bb [i];
398 for (i = 0; i < to->in_count; ++i) {
399 if (from == to->in_bb [i]) {
405 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
406 for (i = 0; i < to->in_count; ++i) {
407 newa [i] = to->in_bb [i];
416 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
418 link_bblock (cfg, from, to);
422 * mono_find_block_region:
424 * We mark each basic block with a region ID. We use that to avoid BB
425 * optimizations when blocks are in different regions.
428 * A region token that encodes where this region is, and information
429 * about the clause owner for this block.
431 * The region encodes the try/catch/filter clause that owns this block
432 * as well as the type. -1 is a special value that represents a block
433 * that is in none of try/catch/filter.
436 mono_find_block_region (MonoCompile *cfg, int offset)
438 MonoMethod *method = cfg->method;
439 MonoMethodHeader *header = mono_method_get_header (method);
440 MonoExceptionClause *clause;
443 /* first search for handlers and filters */
444 for (i = 0; i < header->num_clauses; ++i) {
445 clause = &header->clauses [i];
446 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
447 (offset < (clause->handler_offset)))
448 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
450 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
452 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
453 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
454 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
456 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
460 /* search the try blocks */
461 for (i = 0; i < header->num_clauses; ++i) {
462 clause = &header->clauses [i];
463 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
464 return ((i + 1) << 8) | clause->flags;
471 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
473 MonoMethod *method = cfg->method;
474 MonoMethodHeader *header = mono_method_get_header (method);
475 MonoExceptionClause *clause;
476 MonoBasicBlock *handler;
480 for (i = 0; i < header->num_clauses; ++i) {
481 clause = &header->clauses [i];
482 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
483 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
484 if (clause->flags == type) {
485 handler = cfg->cil_offset_to_bb [clause->handler_offset];
487 res = g_list_append (res, handler);
495 mono_create_spvar_for_region (MonoCompile *cfg, int region)
499 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
504 /* prevent it from being register allocated */
505 var->flags |= MONO_INST_INDIRECT;
507 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
511 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
513 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
517 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
521 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
525 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 * Returns the type used in the eval stack when @type is loaded.
536 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
539 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
543 inst->klass = klass = mono_class_from_mono_type (type);
545 inst->type = STACK_MP;
550 switch (type->type) {
552 inst->type = STACK_INV;
556 case MONO_TYPE_BOOLEAN:
562 inst->type = STACK_I4;
567 case MONO_TYPE_FNPTR:
568 inst->type = STACK_PTR;
570 case MONO_TYPE_CLASS:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_OBJECT:
573 case MONO_TYPE_SZARRAY:
574 case MONO_TYPE_ARRAY:
575 inst->type = STACK_OBJ;
579 inst->type = STACK_I8;
583 inst->type = STACK_R8;
585 case MONO_TYPE_VALUETYPE:
586 if (type->data.klass->enumtype) {
587 type = type->data.klass->enum_basetype;
591 inst->type = STACK_VTYPE;
594 case MONO_TYPE_TYPEDBYREF:
595 inst->klass = mono_defaults.typed_reference_class;
596 inst->type = STACK_VTYPE;
598 case MONO_TYPE_GENERICINST:
599 type = &type->data.generic_class->container_class->byval_arg;
602 case MONO_TYPE_MVAR :
603 /* FIXME: all the arguments must be references for now,
604 * later look inside cfg and see if the arg num is
607 g_assert (cfg->generic_sharing_context);
608 inst->type = STACK_OBJ;
611 g_error ("unknown type 0x%02x in eval stack type", type->type);
616 * The following tables are used to quickly validate the IL code in type_from_op ().
619 bin_num_table [STACK_MAX] [STACK_MAX] = {
620 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
621 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
622 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
623 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
632 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
635 /* reduce the size of this table */
637 bin_int_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 bin_comp_table [STACK_MAX] [STACK_MAX] = {
650 /* Inv i L p F & O vt */
652 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
653 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
654 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
655 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
656 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
657 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
658 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
661 /* reduce the size of this table */
663 shift_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 * Tables to map from the non-specific opcode to the matching
676 * type-specific opcode.
678 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
680 binops_op_map [STACK_MAX] = {
681 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
684 /* handles from CEE_NEG to CEE_CONV_U8 */
686 unops_op_map [STACK_MAX] = {
687 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
690 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
692 ovfops_op_map [STACK_MAX] = {
693 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
696 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
698 ovf2ops_op_map [STACK_MAX] = {
699 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
702 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
704 ovf3ops_op_map [STACK_MAX] = {
705 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
708 /* handles from CEE_BEQ to CEE_BLT_UN */
710 beqops_op_map [STACK_MAX] = {
711 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
714 /* handles from CEE_CEQ to CEE_CLT_UN */
716 ceqops_op_map [STACK_MAX] = {
717 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
721 * Sets ins->type (the type on the eval stack) according to the
722 * type of the opcode and the arguments to it.
723 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
725 * FIXME: this function sets ins->type unconditionally in some cases, but
726 * it should set it to invalid for some types (a conv.x on an object)
729 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
731 switch (ins->opcode) {
738 /* FIXME: check unverifiable args for STACK_MP */
739 ins->type = bin_num_table [src1->type] [src2->type];
740 ins->opcode += binops_op_map [ins->type];
747 ins->type = bin_int_table [src1->type] [src2->type];
748 ins->opcode += binops_op_map [ins->type];
753 ins->type = shift_table [src1->type] [src2->type];
754 ins->opcode += binops_op_map [ins->type];
759 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
760 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
761 ins->opcode = OP_LCOMPARE;
762 else if (src1->type == STACK_R8)
763 ins->opcode = OP_FCOMPARE;
765 ins->opcode = OP_ICOMPARE;
767 case OP_ICOMPARE_IMM:
768 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
769 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
770 ins->opcode = OP_LCOMPARE_IMM;
782 ins->opcode += beqops_op_map [src1->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 ins->opcode += ceqops_op_map [src1->type];
792 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
793 ins->opcode += ceqops_op_map [src1->type];
797 ins->type = neg_table [src1->type];
798 ins->opcode += unops_op_map [ins->type];
801 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
802 ins->type = src1->type;
804 ins->type = STACK_INV;
805 ins->opcode += unops_op_map [ins->type];
811 ins->type = STACK_I4;
812 ins->opcode += unops_op_map [src1->type];
815 ins->type = STACK_R8;
816 switch (src1->type) {
819 ins->opcode = OP_ICONV_TO_R_UN;
822 ins->opcode = OP_LCONV_TO_R_UN;
826 case CEE_CONV_OVF_I1:
827 case CEE_CONV_OVF_U1:
828 case CEE_CONV_OVF_I2:
829 case CEE_CONV_OVF_U2:
830 case CEE_CONV_OVF_I4:
831 case CEE_CONV_OVF_U4:
832 ins->type = STACK_I4;
833 ins->opcode += ovf3ops_op_map [src1->type];
835 case CEE_CONV_OVF_I_UN:
836 case CEE_CONV_OVF_U_UN:
837 ins->type = STACK_PTR;
838 ins->opcode += ovf2ops_op_map [src1->type];
840 case CEE_CONV_OVF_I1_UN:
841 case CEE_CONV_OVF_I2_UN:
842 case CEE_CONV_OVF_I4_UN:
843 case CEE_CONV_OVF_U1_UN:
844 case CEE_CONV_OVF_U2_UN:
845 case CEE_CONV_OVF_U4_UN:
846 ins->type = STACK_I4;
847 ins->opcode += ovf2ops_op_map [src1->type];
850 ins->type = STACK_PTR;
851 switch (src1->type) {
853 ins->opcode = OP_ICONV_TO_U;
857 #if SIZEOF_REGISTER == 8
858 ins->opcode = OP_LCONV_TO_U;
860 ins->opcode = OP_MOVE;
864 ins->opcode = OP_LCONV_TO_U;
867 ins->opcode = OP_FCONV_TO_U;
873 ins->type = STACK_I8;
874 ins->opcode += unops_op_map [src1->type];
876 case CEE_CONV_OVF_I8:
877 case CEE_CONV_OVF_U8:
878 ins->type = STACK_I8;
879 ins->opcode += ovf3ops_op_map [src1->type];
881 case CEE_CONV_OVF_U8_UN:
882 case CEE_CONV_OVF_I8_UN:
883 ins->type = STACK_I8;
884 ins->opcode += ovf2ops_op_map [src1->type];
888 ins->type = STACK_R8;
889 ins->opcode += unops_op_map [src1->type];
892 ins->type = STACK_R8;
896 ins->type = STACK_I4;
897 ins->opcode += ovfops_op_map [src1->type];
902 ins->type = STACK_PTR;
903 ins->opcode += ovfops_op_map [src1->type];
911 ins->type = bin_num_table [src1->type] [src2->type];
912 ins->opcode += ovfops_op_map [src1->type];
913 if (ins->type == STACK_R8)
914 ins->type = STACK_INV;
916 case OP_LOAD_MEMBASE:
917 ins->type = STACK_PTR;
919 case OP_LOADI1_MEMBASE:
920 case OP_LOADU1_MEMBASE:
921 case OP_LOADI2_MEMBASE:
922 case OP_LOADU2_MEMBASE:
923 case OP_LOADI4_MEMBASE:
924 case OP_LOADU4_MEMBASE:
925 ins->type = STACK_PTR;
927 case OP_LOADI8_MEMBASE:
928 ins->type = STACK_I8;
930 case OP_LOADR4_MEMBASE:
931 case OP_LOADR8_MEMBASE:
932 ins->type = STACK_R8;
935 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
939 if (ins->type == STACK_MP)
940 ins->klass = mono_defaults.object_class;
945 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
951 param_table [STACK_MAX] [STACK_MAX] = {
956 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
960 switch (args->type) {
970 for (i = 0; i < sig->param_count; ++i) {
971 switch (args [i].type) {
975 if (!sig->params [i]->byref)
979 if (sig->params [i]->byref)
981 switch (sig->params [i]->type) {
982 case MONO_TYPE_CLASS:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_OBJECT:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
993 if (sig->params [i]->byref)
995 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1004 /*if (!param_table [args [i].type] [sig->params [i]->type])
1012 * When we need a pointer to the current domain many times in a method, we
1013 * call mono_domain_get() once and we store the result in a local variable.
1014 * This function returns the variable that represents the MonoDomain*.
1016 inline static MonoInst *
1017 mono_get_domainvar (MonoCompile *cfg)
1019 if (!cfg->domainvar)
1020 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1021 return cfg->domainvar;
1025 * The got_var contains the address of the Global Offset Table when AOT
1028 inline static MonoInst *
1029 mono_get_got_var (MonoCompile *cfg)
1031 #ifdef MONO_ARCH_NEED_GOT_VAR
1032 if (!cfg->compile_aot)
1034 if (!cfg->got_var) {
1035 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1037 return cfg->got_var;
1044 mono_get_vtable_var (MonoCompile *cfg)
1046 g_assert (cfg->generic_sharing_context);
1048 if (!cfg->rgctx_var) {
1049 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 /* force the var to be stack allocated */
1051 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1054 return cfg->rgctx_var;
1058 type_from_stack_type (MonoInst *ins) {
1059 switch (ins->type) {
1060 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1061 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1062 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1063 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1065 return &ins->klass->this_arg;
1066 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1067 case STACK_VTYPE: return &ins->klass->byval_arg;
1069 g_error ("stack type %d to monotype not handled\n", ins->type);
1074 static G_GNUC_UNUSED int
1075 type_to_stack_type (MonoType *t)
1077 switch (mono_type_get_underlying_type (t)->type) {
1080 case MONO_TYPE_BOOLEAN:
1083 case MONO_TYPE_CHAR:
1090 case MONO_TYPE_FNPTR:
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1104 case MONO_TYPE_VALUETYPE:
1105 case MONO_TYPE_TYPEDBYREF:
1107 case MONO_TYPE_GENERICINST:
1108 if (mono_type_generic_inst_is_valuetype (t))
1114 g_assert_not_reached ();
1121 array_access_to_klass (int opcode)
1125 return mono_defaults.byte_class;
1127 return mono_defaults.uint16_class;
1130 return mono_defaults.int_class;
1133 return mono_defaults.sbyte_class;
1136 return mono_defaults.int16_class;
1139 return mono_defaults.int32_class;
1141 return mono_defaults.uint32_class;
1144 return mono_defaults.int64_class;
1147 return mono_defaults.single_class;
1150 return mono_defaults.double_class;
1151 case CEE_LDELEM_REF:
1152 case CEE_STELEM_REF:
1153 return mono_defaults.object_class;
1155 g_assert_not_reached ();
1161 * We try to share variables when possible
1164 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1169 /* inlining can result in deeper stacks */
1170 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1171 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1173 pos = ins->type - 1 + slot * STACK_MAX;
1175 switch (ins->type) {
1182 if ((vnum = cfg->intvars [pos]))
1183 return cfg->varinfo [vnum];
1184 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1185 cfg->intvars [pos] = res->inst_c0;
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1197 * Don't use this if a generic_context is set, since that means AOT can't
1198 * look up the method using just the image+token.
1199 * table == 0 means this is a reference made from a wrapper.
1201 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1202 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1203 jump_info_token->image = image;
1204 jump_info_token->token = token;
1205 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1210 * This function is called to handle items that are left on the evaluation stack
1211 * at basic block boundaries. What happens is that we save the values to local variables
1212 * and we reload them later when first entering the target basic block (with the
1213 * handle_loaded_temps () function).
1214 * A single joint point will use the same variables (stored in the array bb->out_stack or
1215 * bb->in_stack, if the basic block is before or after the joint point).
1217 * This function needs to be called _before_ emitting the last instruction of
1218 * the bb (i.e. before emitting a branch).
1219 * If the stack merge fails at a join point, cfg->unverifiable is set.
1222 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1225 MonoBasicBlock *bb = cfg->cbb;
1226 MonoBasicBlock *outb;
1227 MonoInst *inst, **locals;
1232 if (cfg->verbose_level > 3)
1233 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1234 if (!bb->out_scount) {
1235 bb->out_scount = count;
1236 //printf ("bblock %d has out:", bb->block_num);
1238 for (i = 0; i < bb->out_count; ++i) {
1239 outb = bb->out_bb [i];
1240 /* exception handlers are linked, but they should not be considered for stack args */
1241 if (outb->flags & BB_EXCEPTION_HANDLER)
1243 //printf (" %d", outb->block_num);
1244 if (outb->in_stack) {
1246 bb->out_stack = outb->in_stack;
1252 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1253 for (i = 0; i < count; ++i) {
1255 * try to reuse temps already allocated for this purpouse, if they occupy the same
1256 * stack slot and if they are of the same type.
1257 * This won't cause conflicts since if 'local' is used to
1258 * store one of the values in the in_stack of a bblock, then
1259 * the same variable will be used for the same outgoing stack
1261 * This doesn't work when inlining methods, since the bblocks
1262 * in the inlined methods do not inherit their in_stack from
1263 * the bblock they are inlined to. See bug #58863 for an
1266 if (cfg->inlined_method)
1267 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1269 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1274 for (i = 0; i < bb->out_count; ++i) {
1275 outb = bb->out_bb [i];
1276 /* exception handlers are linked, but they should not be considered for stack args */
1277 if (outb->flags & BB_EXCEPTION_HANDLER)
1279 if (outb->in_scount) {
1280 if (outb->in_scount != bb->out_scount) {
1281 cfg->unverifiable = TRUE;
1284 continue; /* check they are the same locals */
1286 outb->in_scount = count;
1287 outb->in_stack = bb->out_stack;
1290 locals = bb->out_stack;
1292 for (i = 0; i < count; ++i) {
1293 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1294 inst->cil_code = sp [i]->cil_code;
1295 sp [i] = locals [i];
1296 if (cfg->verbose_level > 3)
1297 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1301 * It is possible that the out bblocks already have in_stack assigned, and
1302 * the in_stacks differ. In this case, we will store to all the different
1309 /* Find a bblock which has a different in_stack */
1311 while (bindex < bb->out_count) {
1312 outb = bb->out_bb [bindex];
1313 /* exception handlers are linked, but they should not be considered for stack args */
1314 if (outb->flags & BB_EXCEPTION_HANDLER) {
1318 if (outb->in_stack != locals) {
1319 for (i = 0; i < count; ++i) {
1320 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1321 inst->cil_code = sp [i]->cil_code;
1322 sp [i] = locals [i];
1323 if (cfg->verbose_level > 3)
1324 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1326 locals = outb->in_stack;
1335 /* Emit code which loads interface_offsets [klass->interface_id]
1336 * The array is stored in memory before vtable.
1339 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1341 if (cfg->compile_aot) {
1342 int ioffset_reg = alloc_preg (cfg);
1343 int iid_reg = alloc_preg (cfg);
1345 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1346 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1355 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1356 * stored in "klass_reg" implements the interface "klass".
1359 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1361 int ibitmap_reg = alloc_preg (cfg);
1362 int ibitmap_byte_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1366 if (cfg->compile_aot) {
1367 int iid_reg = alloc_preg (cfg);
1368 int shifted_iid_reg = alloc_preg (cfg);
1369 int ibitmap_byte_address_reg = alloc_preg (cfg);
1370 int masked_iid_reg = alloc_preg (cfg);
1371 int iid_one_bit_reg = alloc_preg (cfg);
1372 int iid_bit_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1378 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1388 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1389 * stored in "vtable_reg" implements the interface "klass".
1392 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1394 int ibitmap_reg = alloc_preg (cfg);
1395 int ibitmap_byte_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1399 if (cfg->compile_aot) {
1400 int iid_reg = alloc_preg (cfg);
1401 int shifted_iid_reg = alloc_preg (cfg);
1402 int ibitmap_byte_address_reg = alloc_preg (cfg);
1403 int masked_iid_reg = alloc_preg (cfg);
1404 int iid_one_bit_reg = alloc_preg (cfg);
1405 int iid_bit_reg = alloc_preg (cfg);
1406 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1411 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which checks whenever the interface id of @klass is smaller than
1422 * than the value given by max_iid_reg.
1425 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1426 MonoBasicBlock *false_target)
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1438 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1441 /* Same as above, but obtains max_iid from a vtable */
1443 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 int max_iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1449 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1452 /* Same as above, but obtains max_iid from a klass */
1454 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 int max_iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1460 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1464 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1466 int idepth_reg = alloc_preg (cfg);
1467 int stypes_reg = alloc_preg (cfg);
1468 int stype = alloc_preg (cfg);
1470 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1477 if (cfg->compile_aot) {
1478 int const_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1482 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1488 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int intf_reg = alloc_preg (cfg);
1492 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1493 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1502 * Variant of the above that takes a register to the class, not the vtable.
1505 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1507 int intf_bit_reg = alloc_preg (cfg);
1509 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1510 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1519 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1521 if (cfg->compile_aot) {
1522 int const_reg = alloc_preg (cfg);
1523 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1528 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1532 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1534 if (cfg->compile_aot) {
1535 int const_reg = alloc_preg (cfg);
1536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1545 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1548 int rank_reg = alloc_preg (cfg);
1549 int eclass_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1556 if (klass->cast_class == mono_defaults.object_class) {
1557 int parent_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1559 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1560 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1563 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1564 } else if (klass->cast_class == mono_defaults.enum_class) {
1565 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1569 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1570 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1574 /* Check that the object is a vector too */
1575 int bounds_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1581 int idepth_reg = alloc_preg (cfg);
1582 int stypes_reg = alloc_preg (cfg);
1583 int stype = alloc_preg (cfg);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1592 mini_emit_class_check (cfg, stype, klass);
1597 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1601 g_assert (val == 0);
1606 if ((size <= 4) && (size <= align)) {
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1612 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1617 #if SIZEOF_REGISTER == 8
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1625 val_reg = alloc_preg (cfg);
1627 if (SIZEOF_REGISTER == 8)
1628 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1630 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1633 /* This could be optimized further if neccesary */
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1642 #if !NO_UNALIGNED_ACCESS
1643 if (SIZEOF_REGISTER == 8) {
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1674 #endif /* DISABLE_JIT */
1677 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1685 /* This could be optimized further if neccesary */
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1696 #if !NO_UNALIGNED_ACCESS
1697 if (SIZEOF_REGISTER == 8) {
1699 cur_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1738 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1741 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1744 type = mini_get_basic_type_from_generic (gsctx, type);
1745 switch (type->type) {
1746 case MONO_TYPE_VOID:
1747 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1750 case MONO_TYPE_BOOLEAN:
1753 case MONO_TYPE_CHAR:
1756 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1760 case MONO_TYPE_FNPTR:
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 case MONO_TYPE_CLASS:
1763 case MONO_TYPE_STRING:
1764 case MONO_TYPE_OBJECT:
1765 case MONO_TYPE_SZARRAY:
1766 case MONO_TYPE_ARRAY:
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1773 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1774 case MONO_TYPE_VALUETYPE:
1775 if (type->data.klass->enumtype) {
1776 type = type->data.klass->enum_basetype;
1779 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1780 case MONO_TYPE_TYPEDBYREF:
1781 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1782 case MONO_TYPE_GENERICINST:
1783 type = &type->data.generic_class->container_class->byval_arg;
1786 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 * target_type_is_incompatible:
1793 * @cfg: MonoCompile context
1795 * Check that the item @arg on the evaluation stack can be stored
1796 * in the target type (can be a local, or field, etc).
1797 * The cfg arg can be used to check if we need verification or just
1800 * Returns: non-0 value if arg can't be stored on a target.
1803 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1805 MonoType *simple_type;
1808 if (target->byref) {
1809 /* FIXME: check that the pointed to types match */
1810 if (arg->type == STACK_MP)
1811 return arg->klass != mono_class_from_mono_type (target);
1812 if (arg->type == STACK_PTR)
1817 simple_type = mono_type_get_underlying_type (target);
1818 switch (simple_type->type) {
1819 case MONO_TYPE_VOID:
1823 case MONO_TYPE_BOOLEAN:
1826 case MONO_TYPE_CHAR:
1829 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1833 /* STACK_MP is needed when setting pinned locals */
1834 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 case MONO_TYPE_FNPTR:
1840 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1843 case MONO_TYPE_CLASS:
1844 case MONO_TYPE_STRING:
1845 case MONO_TYPE_OBJECT:
1846 case MONO_TYPE_SZARRAY:
1847 case MONO_TYPE_ARRAY:
1848 if (arg->type != STACK_OBJ)
1850 /* FIXME: check type compatibility */
1854 if (arg->type != STACK_I8)
1859 if (arg->type != STACK_R8)
1862 case MONO_TYPE_VALUETYPE:
1863 if (arg->type != STACK_VTYPE)
1865 klass = mono_class_from_mono_type (simple_type);
1866 if (klass != arg->klass)
1869 case MONO_TYPE_TYPEDBYREF:
1870 if (arg->type != STACK_VTYPE)
1872 klass = mono_class_from_mono_type (simple_type);
1873 if (klass != arg->klass)
1876 case MONO_TYPE_GENERICINST:
1877 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1878 if (arg->type != STACK_VTYPE)
1880 klass = mono_class_from_mono_type (simple_type);
1881 if (klass != arg->klass)
1885 if (arg->type != STACK_OBJ)
1887 /* FIXME: check type compatibility */
1891 case MONO_TYPE_MVAR:
1892 /* FIXME: all the arguments must be references for now,
1893 * later look inside cfg and see if the arg num is
1894 * really a reference
1896 g_assert (cfg->generic_sharing_context);
1897 if (arg->type != STACK_OBJ)
1901 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 * Prepare arguments for passing to a function call.
1908 * Return a non-zero value if the arguments can't be passed to the given
1910 * The type checks are not yet complete and some conversions may need
1911 * casts on 32 or 64 bit architectures.
1913 * FIXME: implement this using target_type_is_incompatible ()
1916 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1918 MonoType *simple_type;
1922 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1926 for (i = 0; i < sig->param_count; ++i) {
1927 if (sig->params [i]->byref) {
1928 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1932 simple_type = sig->params [i];
1933 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1935 switch (simple_type->type) {
1936 case MONO_TYPE_VOID:
1941 case MONO_TYPE_BOOLEAN:
1944 case MONO_TYPE_CHAR:
1947 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1953 case MONO_TYPE_FNPTR:
1954 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1957 case MONO_TYPE_CLASS:
1958 case MONO_TYPE_STRING:
1959 case MONO_TYPE_OBJECT:
1960 case MONO_TYPE_SZARRAY:
1961 case MONO_TYPE_ARRAY:
1962 if (args [i]->type != STACK_OBJ)
1967 if (args [i]->type != STACK_I8)
1972 if (args [i]->type != STACK_R8)
1975 case MONO_TYPE_VALUETYPE:
1976 if (simple_type->data.klass->enumtype) {
1977 simple_type = simple_type->data.klass->enum_basetype;
1980 if (args [i]->type != STACK_VTYPE)
1983 case MONO_TYPE_TYPEDBYREF:
1984 if (args [i]->type != STACK_VTYPE)
1987 case MONO_TYPE_GENERICINST:
1988 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1992 g_error ("unknown type 0x%02x in check_call_signature",
2000 callvirt_to_call (int opcode)
2005 case OP_VOIDCALLVIRT:
2014 g_assert_not_reached ();
2021 callvirt_to_call_membase (int opcode)
2025 return OP_CALL_MEMBASE;
2026 case OP_VOIDCALLVIRT:
2027 return OP_VOIDCALL_MEMBASE;
2029 return OP_FCALL_MEMBASE;
2031 return OP_LCALL_MEMBASE;
2033 return OP_VCALL_MEMBASE;
2035 g_assert_not_reached ();
2041 #ifdef MONO_ARCH_HAVE_IMT
2043 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2045 #ifdef MONO_ARCH_IMT_REG
2046 int method_reg = alloc_preg (cfg);
2049 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2050 } else if (cfg->compile_aot) {
2051 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2054 MONO_INST_NEW (cfg, ins, OP_PCONST);
2055 ins->inst_p0 = call->method;
2056 ins->dreg = method_reg;
2057 MONO_ADD_INS (cfg->cbb, ins);
2060 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2062 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 static MonoJumpInfo *
2068 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2070 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2074 ji->data.target = target;
2079 inline static MonoInst*
2080 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2082 inline static MonoCallInst *
2083 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2084 MonoInst **args, int calli, int virtual)
2087 #ifdef MONO_ARCH_SOFT_FLOAT
2091 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2094 call->signature = sig;
2096 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2098 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2099 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2102 temp->backend.is_pinvoke = sig->pinvoke;
2105 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2106 * address of return value to increase optimization opportunities.
2107 * Before vtype decomposition, the dreg of the call ins itself represents the
2108 * fact the call modifies the return value. After decomposition, the call will
2109 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2110 * will be transformed into an LDADDR.
2112 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2113 loada->dreg = alloc_preg (cfg);
2114 loada->inst_p0 = temp;
2115 /* We reference the call too since call->dreg could change during optimization */
2116 loada->inst_p1 = call;
2117 MONO_ADD_INS (cfg->cbb, loada);
2119 call->inst.dreg = temp->dreg;
2121 call->vret_var = loada;
2122 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2123 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2127 * If the call has a float argument, we would need to do an r8->r4 conversion using
2128 * an icall, but that cannot be done during the call sequence since it would clobber
2129 * the call registers + the stack. So we do it before emitting the call.
2131 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 MonoInst *in = call->args [i];
2135 if (i >= sig->hasthis)
2136 t = sig->params [i - sig->hasthis];
2138 t = &mono_defaults.int_class->byval_arg;
2139 t = mono_type_get_underlying_type (t);
2141 if (!t->byref && t->type == MONO_TYPE_R4) {
2142 MonoInst *iargs [1];
2146 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2148 /* The result will be in an int vreg */
2149 call->args [i] = conv;
2154 mono_arch_emit_call (cfg, call);
2156 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2157 cfg->flags |= MONO_CFG_HAS_CALLS;
2162 inline static MonoInst*
2163 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2165 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2167 call->inst.sreg1 = addr->dreg;
2169 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2171 return (MonoInst*)call;
2174 inline static MonoInst*
2175 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2177 #ifdef MONO_ARCH_RGCTX_REG
2182 rgctx_reg = mono_alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2185 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2187 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2188 cfg->uses_rgctx_reg = TRUE;
2190 return (MonoInst*)call;
2192 g_assert_not_reached ();
2198 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2199 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2201 gboolean virtual = this != NULL;
2202 gboolean enable_for_aot = TRUE;
2205 if (method->string_ctor) {
2206 /* Create the real signature */
2207 /* FIXME: Cache these */
2208 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2209 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2214 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2216 if (this && sig->hasthis &&
2217 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2218 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2219 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2221 call->method = method;
2223 call->inst.flags |= MONO_INST_HAS_METHOD;
2224 call->inst.inst_left = this;
2227 int vtable_reg, slot_reg, this_reg;
2229 this_reg = this->dreg;
2231 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2232 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2233 /* Make a call to delegate->invoke_impl */
2234 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2235 call->inst.inst_basereg = this_reg;
2236 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2243 if ((!cfg->compile_aot || enable_for_aot) &&
2244 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2245 (MONO_METHOD_IS_FINAL (method) &&
2246 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2248 * the method is not virtual, we just need to ensure this is not null
2249 * and then we can call the method directly.
2251 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2252 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2255 if (!method->string_ctor) {
2256 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2257 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2258 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2261 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2263 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2265 return (MonoInst*)call;
2268 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2270 * the method is virtual, but we can statically dispatch since either
2271 * it's class or the method itself are sealed.
2272 * But first we need to ensure it's not a null reference.
2274 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2275 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2276 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2278 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2279 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2281 return (MonoInst*)call;
2284 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2286 vtable_reg = alloc_preg (cfg);
2287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2288 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2290 #ifdef MONO_ARCH_HAVE_IMT
2292 guint32 imt_slot = mono_method_get_imt_slot (method);
2293 emit_imt_argument (cfg, call, imt_arg);
2294 slot_reg = vtable_reg;
2295 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2298 if (slot_reg == -1) {
2299 slot_reg = alloc_preg (cfg);
2300 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2301 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2304 slot_reg = vtable_reg;
2305 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2306 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2307 #ifdef MONO_ARCH_HAVE_IMT
2309 g_assert (mono_method_signature (method)->generic_param_count);
2310 emit_imt_argument (cfg, call, imt_arg);
2315 call->inst.sreg1 = slot_reg;
2316 call->virtual = TRUE;
2319 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2321 return (MonoInst*)call;
2325 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2326 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2333 #ifdef MONO_ARCH_RGCTX_REG
2334 rgctx_reg = mono_alloc_preg (cfg);
2335 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2340 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2342 call = (MonoCallInst*)ins;
2344 #ifdef MONO_ARCH_RGCTX_REG
2345 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2346 cfg->uses_rgctx_reg = TRUE;
2355 static inline MonoInst*
2356 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2358 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2362 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2369 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2374 return (MonoInst*)call;
2377 inline static MonoInst*
2378 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2380 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2384 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2388 * mono_emit_abs_call:
2390 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2392 inline static MonoInst*
2393 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2394 MonoMethodSignature *sig, MonoInst **args)
2396 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2400 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2403 if (cfg->abs_patches == NULL)
2404 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2405 g_hash_table_insert (cfg->abs_patches, ji, ji);
2406 ins = mono_emit_native_call (cfg, ji, sig, args);
2407 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2412 get_memcpy_method (void)
2414 static MonoMethod *memcpy_method = NULL;
2415 if (!memcpy_method) {
2416 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2418 g_error ("Old corlib found. Install a new one");
2420 return memcpy_method;
2424 * Emit code to copy a valuetype of type @klass whose address is stored in
2425 * @src->dreg to memory whose address is stored at @dest->dreg.
2428 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2430 MonoInst *iargs [3];
2433 MonoMethod *memcpy_method;
2437 * This check breaks with spilled vars... need to handle it during verification anyway.
2438 * g_assert (klass && klass == src->klass && klass == dest->klass);
2442 n = mono_class_native_size (klass, &align);
2444 n = mono_class_value_size (klass, &align);
2446 #if HAVE_WRITE_BARRIERS
2447 /* if native is true there should be no references in the struct */
2448 if (klass->has_references && !native) {
2449 /* Avoid barriers when storing to the stack */
2450 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2451 (dest->opcode == OP_LDADDR))) {
2454 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2456 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2461 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2462 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2463 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2467 EMIT_NEW_ICONST (cfg, iargs [2], n);
2469 memcpy_method = get_memcpy_method ();
2470 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2475 get_memset_method (void)
2477 static MonoMethod *memset_method = NULL;
2478 if (!memset_method) {
2479 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2481 g_error ("Old corlib found. Install a new one");
2483 return memset_method;
2487 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2489 MonoInst *iargs [3];
2492 MonoMethod *memset_method;
2494 /* FIXME: Optimize this for the case when dest is an LDADDR */
2496 mono_class_init (klass);
2497 n = mono_class_value_size (klass, &align);
2499 if (n <= sizeof (gpointer) * 5) {
2500 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2503 memset_method = get_memset_method ();
2505 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2506 EMIT_NEW_ICONST (cfg, iargs [2], n);
2507 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2512 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2514 MonoInst *this = NULL;
2516 g_assert (cfg->generic_sharing_context);
2518 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2519 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2520 !method->klass->valuetype)
2521 EMIT_NEW_ARGLOAD (cfg, this, 0);
2523 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2524 MonoInst *mrgctx_loc, *mrgctx_var;
2527 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2529 mrgctx_loc = mono_get_vtable_var (cfg);
2530 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2533 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2534 MonoInst *vtable_loc, *vtable_var;
2538 vtable_loc = mono_get_vtable_var (cfg);
2539 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2541 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2542 MonoInst *mrgctx_var = vtable_var;
2545 vtable_reg = alloc_preg (cfg);
2546 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2547 vtable_var->type = STACK_PTR;
2553 int vtable_reg, res_reg;
2555 vtable_reg = alloc_preg (cfg);
2556 res_reg = alloc_preg (cfg);
2557 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2562 static MonoJumpInfoRgctxEntry *
2563 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2565 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2566 res->method = method;
2567 res->in_mrgctx = in_mrgctx;
2568 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2569 res->data->type = patch_type;
2570 res->data->data.target = patch_data;
2571 res->info_type = info_type;
2576 static inline MonoInst*
2577 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2579 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2583 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2584 MonoClass *klass, int rgctx_type)
2586 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2587 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2589 return emit_rgctx_fetch (cfg, rgctx, entry);
2593 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2594 MonoMethod *cmethod, int rgctx_type)
2596 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2597 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2599 return emit_rgctx_fetch (cfg, rgctx, entry);
2603 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2604 MonoClassField *field, int rgctx_type)
2606 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2607 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2609 return emit_rgctx_fetch (cfg, rgctx, entry);
2613 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2615 int vtable_reg = alloc_preg (cfg);
2616 int context_used = 0;
2618 if (cfg->generic_sharing_context)
2619 context_used = mono_class_check_context_used (array_class);
2621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2623 if (cfg->opt & MONO_OPT_SHARED) {
2624 int class_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2626 if (cfg->compile_aot) {
2627 int klass_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2633 } else if (context_used) {
2634 MonoInst *vtable_ins;
2636 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2639 if (cfg->compile_aot) {
2640 int vt_reg = alloc_preg (cfg);
2641 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2652 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2654 if (mini_get_debug_options ()->better_cast_details) {
2655 int to_klass_reg = alloc_preg (cfg);
2656 int vtable_reg = alloc_preg (cfg);
2657 int klass_reg = alloc_preg (cfg);
2658 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2661 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2665 MONO_ADD_INS (cfg->cbb, tls_get);
2666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2670 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2676 reset_cast_details (MonoCompile *cfg)
2678 /* Reset the variables holding the cast details */
2679 if (mini_get_debug_options ()->better_cast_details) {
2680 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2682 MONO_ADD_INS (cfg->cbb, tls_get);
2683 /* It is enough to reset the from field */
2684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2689 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2690 * generic code is generated.
2693 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2695 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2698 MonoInst *rgctx, *addr;
2700 /* FIXME: What if the class is shared? We might not
2701 have to get the address of the method from the
2703 addr = emit_get_rgctx_method (cfg, context_used, method,
2704 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2706 rgctx = emit_get_rgctx (cfg, method, context_used);
2708 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2710 return mono_emit_method_call (cfg, method, &val, NULL);
2715 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2719 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2720 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2721 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2722 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2724 obj_reg = sp [0]->dreg;
2725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2728 /* FIXME: generics */
2729 g_assert (klass->rank == 0);
2732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2733 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2739 MonoInst *element_class;
2741 /* This assertion is from the unboxcast insn */
2742 g_assert (klass->rank == 0);
2744 element_class = emit_get_rgctx_klass (cfg, context_used,
2745 klass->element_class, MONO_RGCTX_INFO_KLASS);
2747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2750 save_cast_details (cfg, klass->element_class, obj_reg);
2751 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2752 reset_cast_details (cfg);
2755 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2756 MONO_ADD_INS (cfg->cbb, add);
2757 add->type = STACK_MP;
2764 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2766 MonoInst *iargs [2];
2769 if (cfg->opt & MONO_OPT_SHARED) {
2770 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2771 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2773 alloc_ftn = mono_object_new;
2774 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2775 /* This happens often in argument checking code, eg. throw new FooException... */
2776 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2777 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2778 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2780 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2781 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2784 if (managed_alloc) {
2785 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2786 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2788 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2790 guint32 lw = vtable->klass->instance_size;
2791 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2792 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2793 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2796 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2800 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2804 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2807 MonoInst *iargs [2];
2808 MonoMethod *managed_alloc = NULL;
2812 FIXME: we cannot get managed_alloc here because we can't get
2813 the class's vtable (because it's not a closed class)
2815 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2816 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2819 if (cfg->opt & MONO_OPT_SHARED) {
2820 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2821 iargs [1] = data_inst;
2822 alloc_ftn = mono_object_new;
2824 if (managed_alloc) {
2825 iargs [0] = data_inst;
2826 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2829 iargs [0] = data_inst;
2830 alloc_ftn = mono_object_new_specific;
2833 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2837 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2839 MonoInst *alloc, *ins;
2841 if (mono_class_is_nullable (klass)) {
2842 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2843 return mono_emit_method_call (cfg, method, &val, NULL);
2846 alloc = handle_alloc (cfg, klass, TRUE);
2848 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2854 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2856 MonoInst *alloc, *ins;
2858 if (mono_class_is_nullable (klass)) {
2859 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2860 /* FIXME: What if the class is shared? We might not
2861 have to get the method address from the RGCTX. */
2862 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2863 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2864 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2866 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2868 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2870 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2877 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2879 MonoBasicBlock *is_null_bb;
2880 int obj_reg = src->dreg;
2881 int vtable_reg = alloc_preg (cfg);
2883 NEW_BBLOCK (cfg, is_null_bb);
2885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2888 save_cast_details (cfg, klass, obj_reg);
2890 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2892 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2894 int klass_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2899 /* the remoting code is broken, access the class for now */
2901 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2907 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2910 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2914 MONO_START_BB (cfg, is_null_bb);
2916 reset_cast_details (cfg);
2922 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2925 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2926 int obj_reg = src->dreg;
2927 int vtable_reg = alloc_preg (cfg);
2928 int res_reg = alloc_preg (cfg);
2930 NEW_BBLOCK (cfg, is_null_bb);
2931 NEW_BBLOCK (cfg, false_bb);
2932 NEW_BBLOCK (cfg, end_bb);
2934 /* Do the assignment at the beginning, so the other assignment can be if converted */
2935 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2936 ins->type = STACK_OBJ;
2939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2942 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2944 /* the is_null_bb target simply copies the input register to the output */
2945 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2947 int klass_reg = alloc_preg (cfg);
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2952 int rank_reg = alloc_preg (cfg);
2953 int eclass_reg = alloc_preg (cfg);
2955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2957 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2958 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2959 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2960 if (klass->cast_class == mono_defaults.object_class) {
2961 int parent_reg = alloc_preg (cfg);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2963 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2964 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2965 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2966 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2967 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2968 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2970 } else if (klass->cast_class == mono_defaults.enum_class) {
2971 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2972 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2973 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2974 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2976 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2977 /* Check that the object is a vector too */
2978 int bounds_reg = alloc_preg (cfg);
2979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2984 /* the is_null_bb target simply copies the input register to the output */
2985 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2987 } else if (mono_class_is_nullable (klass)) {
2988 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2989 /* the is_null_bb target simply copies the input register to the output */
2990 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2992 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2993 /* the remoting code is broken, access the class for now */
2995 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2998 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3004 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3005 /* the is_null_bb target simply copies the input register to the output */
3006 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3011 MONO_START_BB (cfg, false_bb);
3013 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3016 MONO_START_BB (cfg, is_null_bb);
3018 MONO_START_BB (cfg, end_bb);
3024 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3026 /* This opcode takes as input an object reference and a class, and returns:
3027 0) if the object is an instance of the class,
3028 1) if the object is not instance of the class,
3029 2) if the object is a proxy whose type cannot be determined */
3032 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3033 int obj_reg = src->dreg;
3034 int dreg = alloc_ireg (cfg);
3036 int klass_reg = alloc_preg (cfg);
3038 NEW_BBLOCK (cfg, true_bb);
3039 NEW_BBLOCK (cfg, false_bb);
3040 NEW_BBLOCK (cfg, false2_bb);
3041 NEW_BBLOCK (cfg, end_bb);
3042 NEW_BBLOCK (cfg, no_proxy_bb);
3044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3047 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3048 NEW_BBLOCK (cfg, interface_fail_bb);
3050 tmp_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3052 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3053 MONO_START_BB (cfg, interface_fail_bb);
3054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3056 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3058 tmp_reg = alloc_preg (cfg);
3059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3063 tmp_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3067 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3068 tmp_reg = alloc_preg (cfg);
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3077 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3080 MONO_START_BB (cfg, no_proxy_bb);
3082 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3085 MONO_START_BB (cfg, false_bb);
3087 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3088 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3090 MONO_START_BB (cfg, false2_bb);
3092 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3093 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3095 MONO_START_BB (cfg, true_bb);
3097 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3099 MONO_START_BB (cfg, end_bb);
3102 MONO_INST_NEW (cfg, ins, OP_ICONST);
3104 ins->type = STACK_I4;
3110 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3112 /* This opcode takes as input an object reference and a class, and returns:
3113 0) if the object is an instance of the class,
3114 1) if the object is a proxy whose type cannot be determined
3115 an InvalidCastException exception is thrown otherwhise*/
3118 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3119 int obj_reg = src->dreg;
3120 int dreg = alloc_ireg (cfg);
3121 int tmp_reg = alloc_preg (cfg);
3122 int klass_reg = alloc_preg (cfg);
3124 NEW_BBLOCK (cfg, end_bb);
3125 NEW_BBLOCK (cfg, ok_result_bb);
3127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3128 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3130 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3131 NEW_BBLOCK (cfg, interface_fail_bb);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3134 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3135 MONO_START_BB (cfg, interface_fail_bb);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3138 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3140 tmp_reg = alloc_preg (cfg);
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3143 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3145 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3149 NEW_BBLOCK (cfg, no_proxy_bb);
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3153 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3155 tmp_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3159 tmp_reg = alloc_preg (cfg);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3164 NEW_BBLOCK (cfg, fail_1_bb);
3166 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3168 MONO_START_BB (cfg, fail_1_bb);
3170 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3173 MONO_START_BB (cfg, no_proxy_bb);
3175 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3178 MONO_START_BB (cfg, ok_result_bb);
3180 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3182 MONO_START_BB (cfg, end_bb);
3185 MONO_INST_NEW (cfg, ins, OP_ICONST);
3187 ins->type = STACK_I4;
3192 static G_GNUC_UNUSED MonoInst*
3193 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3195 gpointer *trampoline;
3196 MonoInst *obj, *method_ins, *tramp_ins;
3200 obj = handle_alloc (cfg, klass, FALSE);
3202 /* Inline the contents of mono_delegate_ctor */
3204 /* Set target field */
3205 /* Optimize away setting of NULL target */
3206 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3207 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3209 /* Set method field */
3210 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3214 * To avoid looking up the compiled code belonging to the target method
3215 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3216 * store it, and we fill it after the method has been compiled.
3218 if (!cfg->compile_aot && !method->dynamic) {
3219 MonoInst *code_slot_ins;
3221 domain = mono_domain_get ();
3222 mono_domain_lock (domain);
3223 if (!domain_jit_info (domain)->method_code_hash)
3224 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3225 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3227 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3228 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3230 mono_domain_unlock (domain);
3232 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3233 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3236 /* Set invoke_impl field */
3237 if (cfg->compile_aot) {
3238 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3240 trampoline = mono_create_delegate_trampoline (klass);
3241 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3245 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3251 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3253 MonoJitICallInfo *info;
3255 /* Need to register the icall so it gets an icall wrapper */
3256 info = mono_get_array_new_va_icall (rank);
3258 cfg->flags |= MONO_CFG_HAS_VARARGS;
3260 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3261 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3265 mono_emit_load_got_addr (MonoCompile *cfg)
3267 MonoInst *getaddr, *dummy_use;
3269 if (!cfg->got_var || cfg->got_var_allocated)
3272 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3273 getaddr->dreg = cfg->got_var->dreg;
3275 /* Add it to the start of the first bblock */
3276 if (cfg->bb_entry->code) {
3277 getaddr->next = cfg->bb_entry->code;
3278 cfg->bb_entry->code = getaddr;
3281 MONO_ADD_INS (cfg->bb_entry, getaddr);
3283 cfg->got_var_allocated = TRUE;
3286 * Add a dummy use to keep the got_var alive, since real uses might
3287 * only be generated by the back ends.
3288 * Add it to end_bblock, so the variable's lifetime covers the whole
3290 * It would be better to make the usage of the got var explicit in all
3291 * cases when the backend needs it (i.e. calls, throw etc.), so this
3292 * wouldn't be needed.
3294 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3295 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3298 static int inline_limit;
3299 static gboolean inline_limit_inited;
3302 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3304 MonoMethodHeader *header = mono_method_get_header (method);
3306 #ifdef MONO_ARCH_SOFT_FLOAT
3307 MonoMethodSignature *sig = mono_method_signature (method);
3311 if (cfg->generic_sharing_context)
3314 #ifdef MONO_ARCH_HAVE_LMF_OPS
3315 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3316 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3317 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3321 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3322 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3323 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3324 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3325 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3326 (method->klass->marshalbyref) ||
3327 !header || header->num_clauses)
3330 /* also consider num_locals? */
3331 /* Do the size check early to avoid creating vtables */
3332 if (!inline_limit_inited) {
3333 if (getenv ("MONO_INLINELIMIT"))
3334 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3336 inline_limit = INLINE_LENGTH_LIMIT;
3337 inline_limit_inited = TRUE;
3339 if (header->code_size >= inline_limit)
3343 * if we can initialize the class of the method right away, we do,
3344 * otherwise we don't allow inlining if the class needs initialization,
3345 * since it would mean inserting a call to mono_runtime_class_init()
3346 * inside the inlined code
3348 if (!(cfg->opt & MONO_OPT_SHARED)) {
3349 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3350 if (cfg->run_cctors && method->klass->has_cctor) {
3351 if (!method->klass->runtime_info)
3352 /* No vtable created yet */
3354 vtable = mono_class_vtable (cfg->domain, method->klass);
3357 /* This makes so that inline cannot trigger */
3358 /* .cctors: too many apps depend on them */
3359 /* running with a specific order... */
3360 if (! vtable->initialized)
3362 mono_runtime_class_init (vtable);
3364 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3365 if (!method->klass->runtime_info)
3366 /* No vtable created yet */
3368 vtable = mono_class_vtable (cfg->domain, method->klass);
3371 if (!vtable->initialized)
3376 * If we're compiling for shared code
3377 * the cctor will need to be run at aot method load time, for example,
3378 * or at the end of the compilation of the inlining method.
3380 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3385 * CAS - do not inline methods with declarative security
3386 * Note: this has to be before any possible return TRUE;
3388 if (mono_method_has_declsec (method))
3391 #ifdef MONO_ARCH_SOFT_FLOAT
3393 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3395 for (i = 0; i < sig->param_count; ++i)
3396 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3404 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3406 if (vtable->initialized && !cfg->compile_aot)
3409 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3412 if (!mono_class_needs_cctor_run (vtable->klass, method))
3415 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3416 /* The initialization is already done before the method is called */
3423 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3427 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3429 mono_class_init (klass);
3430 size = mono_class_array_element_size (klass);
3432 mult_reg = alloc_preg (cfg);
3433 array_reg = arr->dreg;
3434 index_reg = index->dreg;
3436 #if SIZEOF_REGISTER == 8
3437 /* The array reg is 64 bits but the index reg is only 32 */
3438 index2_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3441 index2_reg = index_reg;
3444 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3446 #if defined(__i386__) || defined(__x86_64__)
3447 if (size == 1 || size == 2 || size == 4 || size == 8) {
3448 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3450 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3451 ins->type = STACK_PTR;
3457 add_reg = alloc_preg (cfg);
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3461 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3462 ins->type = STACK_PTR;
3463 MONO_ADD_INS (cfg->cbb, ins);
3468 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3470 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3472 int bounds_reg = alloc_preg (cfg);
3473 int add_reg = alloc_preg (cfg);
3474 int mult_reg = alloc_preg (cfg);
3475 int mult2_reg = alloc_preg (cfg);
3476 int low1_reg = alloc_preg (cfg);
3477 int low2_reg = alloc_preg (cfg);
3478 int high1_reg = alloc_preg (cfg);
3479 int high2_reg = alloc_preg (cfg);
3480 int realidx1_reg = alloc_preg (cfg);
3481 int realidx2_reg = alloc_preg (cfg);
3482 int sum_reg = alloc_preg (cfg);
3487 mono_class_init (klass);
3488 size = mono_class_array_element_size (klass);
3490 index1 = index_ins1->dreg;
3491 index2 = index_ins2->dreg;
3493 /* range checking */
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3495 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3498 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3499 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3501 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3503 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3506 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3507 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3509 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3511 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3513 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3516 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3517 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3519 ins->type = STACK_MP;
3521 MONO_ADD_INS (cfg->cbb, ins);
3528 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3532 MonoMethod *addr_method;
3535 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3538 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3540 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3541 /* emit_ldelema_2 depends on OP_LMUL */
3542 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3543 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3547 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3548 addr_method = mono_marshal_get_array_address (rank, element_size);
3549 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3555 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3557 MonoInst *ins = NULL;
3559 static MonoClass *runtime_helpers_class = NULL;
3560 if (! runtime_helpers_class)
3561 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3562 "System.Runtime.CompilerServices", "RuntimeHelpers");
3564 if (cmethod->klass == mono_defaults.string_class) {
3565 if (strcmp (cmethod->name, "get_Chars") == 0) {
3566 int dreg = alloc_ireg (cfg);
3567 int index_reg = alloc_preg (cfg);
3568 int mult_reg = alloc_preg (cfg);
3569 int add_reg = alloc_preg (cfg);
3571 #if SIZEOF_REGISTER == 8
3572 /* The array reg is 64 bits but the index reg is only 32 */
3573 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3575 index_reg = args [1]->dreg;
3577 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3579 #if defined(__i386__) || defined(__x86_64__)
3580 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3581 add_reg = ins->dreg;
3582 /* Avoid a warning */
3584 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3588 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3589 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3590 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3592 type_from_op (ins, NULL, NULL);
3594 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3595 int dreg = alloc_ireg (cfg);
3596 /* Decompose later to allow more optimizations */
3597 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3598 ins->type = STACK_I4;
3599 cfg->cbb->has_array_access = TRUE;
3600 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3603 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3604 int mult_reg = alloc_preg (cfg);
3605 int add_reg = alloc_preg (cfg);
3607 /* The corlib functions check for oob already. */
3608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3609 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3613 } else if (cmethod->klass == mono_defaults.object_class) {
3615 if (strcmp (cmethod->name, "GetType") == 0) {
3616 int dreg = alloc_preg (cfg);
3617 int vt_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3619 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3620 type_from_op (ins, NULL, NULL);
3623 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3624 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3625 int dreg = alloc_ireg (cfg);
3626 int t1 = alloc_ireg (cfg);
3628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3629 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3630 ins->type = STACK_I4;
3634 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3635 MONO_INST_NEW (cfg, ins, OP_NOP);
3636 MONO_ADD_INS (cfg->cbb, ins);
3640 } else if (cmethod->klass == mono_defaults.array_class) {
3641 if (cmethod->name [0] != 'g')
3644 if (strcmp (cmethod->name, "get_Rank") == 0) {
3645 int dreg = alloc_ireg (cfg);
3646 int vtable_reg = alloc_preg (cfg);
3647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3648 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3649 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3650 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3651 type_from_op (ins, NULL, NULL);
3654 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3655 int dreg = alloc_ireg (cfg);
3657 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3658 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3659 type_from_op (ins, NULL, NULL);
3664 } else if (cmethod->klass == runtime_helpers_class) {
3666 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3667 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3671 } else if (cmethod->klass == mono_defaults.thread_class) {
3672 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3673 ins->dreg = alloc_preg (cfg);
3674 ins->type = STACK_OBJ;
3675 MONO_ADD_INS (cfg->cbb, ins);
3677 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3678 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3679 MONO_ADD_INS (cfg->cbb, ins);
3681 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3682 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3683 MONO_ADD_INS (cfg->cbb, ins);
3686 } else if (cmethod->klass == mono_defaults.monitor_class) {
3687 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3688 if (strcmp (cmethod->name, "Enter") == 0) {
3691 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3692 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3693 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3694 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3696 return (MonoInst*)call;
3697 } else if (strcmp (cmethod->name, "Exit") == 0) {
3700 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3701 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3702 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3703 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3705 return (MonoInst*)call;
3707 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3708 MonoMethod *fast_method = NULL;
3710 /* Avoid infinite recursion */
3711 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3712 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3713 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3716 if (strcmp (cmethod->name, "Enter") == 0 ||
3717 strcmp (cmethod->name, "Exit") == 0)
3718 fast_method = mono_monitor_get_fast_path (cmethod);
3722 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3724 } else if (mini_class_is_system_array (cmethod->klass) &&
3725 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3726 MonoInst *addr, *store, *load;
3727 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3729 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3733 } else if (cmethod->klass->image == mono_defaults.corlib &&
3734 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3735 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3738 #if SIZEOF_REGISTER == 8
3739 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3740 /* 64 bit reads are already atomic */
3741 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3742 ins->dreg = mono_alloc_preg (cfg);
3743 ins->inst_basereg = args [0]->dreg;
3744 ins->inst_offset = 0;
3745 MONO_ADD_INS (cfg->cbb, ins);
3749 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3750 if (strcmp (cmethod->name, "Increment") == 0) {
3751 MonoInst *ins_iconst;
3754 if (fsig->params [0]->type == MONO_TYPE_I4)
3755 opcode = OP_ATOMIC_ADD_NEW_I4;
3756 #if SIZEOF_REGISTER == 8
3757 else if (fsig->params [0]->type == MONO_TYPE_I8)
3758 opcode = OP_ATOMIC_ADD_NEW_I8;
3761 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3762 ins_iconst->inst_c0 = 1;
3763 ins_iconst->dreg = mono_alloc_ireg (cfg);
3764 MONO_ADD_INS (cfg->cbb, ins_iconst);
3766 MONO_INST_NEW (cfg, ins, opcode);
3767 ins->dreg = mono_alloc_ireg (cfg);
3768 ins->inst_basereg = args [0]->dreg;
3769 ins->inst_offset = 0;
3770 ins->sreg2 = ins_iconst->dreg;
3771 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3772 MONO_ADD_INS (cfg->cbb, ins);
3774 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3775 MonoInst *ins_iconst;
3778 if (fsig->params [0]->type == MONO_TYPE_I4)
3779 opcode = OP_ATOMIC_ADD_NEW_I4;
3780 #if SIZEOF_REGISTER == 8
3781 else if (fsig->params [0]->type == MONO_TYPE_I8)
3782 opcode = OP_ATOMIC_ADD_NEW_I8;
3785 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3786 ins_iconst->inst_c0 = -1;
3787 ins_iconst->dreg = mono_alloc_ireg (cfg);
3788 MONO_ADD_INS (cfg->cbb, ins_iconst);
3790 MONO_INST_NEW (cfg, ins, opcode);
3791 ins->dreg = mono_alloc_ireg (cfg);
3792 ins->inst_basereg = args [0]->dreg;
3793 ins->inst_offset = 0;
3794 ins->sreg2 = ins_iconst->dreg;
3795 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3796 MONO_ADD_INS (cfg->cbb, ins);
3798 } else if (strcmp (cmethod->name, "Add") == 0) {
3801 if (fsig->params [0]->type == MONO_TYPE_I4)
3802 opcode = OP_ATOMIC_ADD_NEW_I4;
3803 #if SIZEOF_REGISTER == 8
3804 else if (fsig->params [0]->type == MONO_TYPE_I8)
3805 opcode = OP_ATOMIC_ADD_NEW_I8;
3809 MONO_INST_NEW (cfg, ins, opcode);
3810 ins->dreg = mono_alloc_ireg (cfg);
3811 ins->inst_basereg = args [0]->dreg;
3812 ins->inst_offset = 0;
3813 ins->sreg2 = args [1]->dreg;
3814 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3815 MONO_ADD_INS (cfg->cbb, ins);
3818 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3820 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3821 if (strcmp (cmethod->name, "Exchange") == 0) {
3824 if (fsig->params [0]->type == MONO_TYPE_I4)
3825 opcode = OP_ATOMIC_EXCHANGE_I4;
3826 #if SIZEOF_REGISTER == 8
3827 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3828 (fsig->params [0]->type == MONO_TYPE_I) ||
3829 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3830 opcode = OP_ATOMIC_EXCHANGE_I8;
3832 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3833 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3834 opcode = OP_ATOMIC_EXCHANGE_I4;
3839 MONO_INST_NEW (cfg, ins, opcode);
3840 ins->dreg = mono_alloc_ireg (cfg);
3841 ins->inst_basereg = args [0]->dreg;
3842 ins->inst_offset = 0;
3843 ins->sreg2 = args [1]->dreg;
3844 MONO_ADD_INS (cfg->cbb, ins);
3846 switch (fsig->params [0]->type) {
3848 ins->type = STACK_I4;
3852 ins->type = STACK_I8;
3854 case MONO_TYPE_OBJECT:
3855 ins->type = STACK_OBJ;
3858 g_assert_not_reached ();
3861 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3863 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3865 * Can't implement CompareExchange methods this way since they have
3866 * three arguments. We can implement one of the common cases, where the new
3867 * value is a constant.
3869 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3870 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3871 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3872 && args [2]->opcode == OP_ICONST) {
3873 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3874 ins->dreg = alloc_ireg (cfg);
3875 ins->sreg1 = args [0]->dreg;
3876 ins->sreg2 = args [1]->dreg;
3877 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3878 ins->type = STACK_I4;
3879 MONO_ADD_INS (cfg->cbb, ins);
3881 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3883 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3887 } else if (cmethod->klass->image == mono_defaults.corlib) {
3888 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3889 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3890 MONO_INST_NEW (cfg, ins, OP_BREAK);
3891 MONO_ADD_INS (cfg->cbb, ins);
3894 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3895 && strcmp (cmethod->klass->name, "Environment") == 0) {
3896 #ifdef PLATFORM_WIN32
3897 EMIT_NEW_ICONST (cfg, ins, 1);
3899 EMIT_NEW_ICONST (cfg, ins, 0);
3903 } else if (cmethod->klass == mono_defaults.math_class) {
3905 * There is general branches code for Min/Max, but it does not work for
3907 * http://everything2.com/?node_id=1051618
3911 #ifdef MONO_ARCH_SIMD_INTRINSICS
3912 if (cfg->opt & MONO_OPT_SIMD) {
3913 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3919 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3923 * This entry point could be used later for arbitrary method
3926 inline static MonoInst*
3927 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3928 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3930 if (method->klass == mono_defaults.string_class) {
3931 /* managed string allocation support */
3932 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3933 MonoInst *iargs [2];
3934 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3935 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3938 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3939 iargs [1] = args [0];
3940 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3947 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3949 MonoInst *store, *temp;
3952 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3953 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3956 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3957 * would be different than the MonoInst's used to represent arguments, and
3958 * the ldelema implementation can't deal with that.
3959 * Solution: When ldelema is used on an inline argument, create a var for
3960 * it, emit ldelema on that var, and emit the saving code below in
3961 * inline_method () if needed.
3963 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3964 cfg->args [i] = temp;
3965 /* This uses cfg->args [i] which is set by the preceeding line */
3966 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3967 store->cil_code = sp [0]->cil_code;
3972 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3973 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3975 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3977 check_inline_called_method_name_limit (MonoMethod *called_method)
3980 static char *limit = NULL;
3982 if (limit == NULL) {
3983 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3985 if (limit_string != NULL)
3986 limit = limit_string;
3988 limit = (char *) "";
3991 if (limit [0] != '\0') {
3992 char *called_method_name = mono_method_full_name (called_method, TRUE);
3994 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3995 g_free (called_method_name);
3997 //return (strncmp_result <= 0);
3998 return (strncmp_result == 0);
4005 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4007 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4010 static char *limit = NULL;
4012 if (limit == NULL) {
4013 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4014 if (limit_string != NULL) {
4015 limit = limit_string;
4017 limit = (char *) "";
4021 if (limit [0] != '\0') {
4022 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4024 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4025 g_free (caller_method_name);
4027 //return (strncmp_result <= 0);
4028 return (strncmp_result == 0);
4036 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4037 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4039 MonoInst *ins, *rvar = NULL;
4040 MonoMethodHeader *cheader;
4041 MonoBasicBlock *ebblock, *sbblock;
4043 MonoMethod *prev_inlined_method;
4044 MonoInst **prev_locals, **prev_args;
4045 MonoType **prev_arg_types;
4046 guint prev_real_offset;
4047 GHashTable *prev_cbb_hash;
4048 MonoBasicBlock **prev_cil_offset_to_bb;
4049 MonoBasicBlock *prev_cbb;
4050 unsigned char* prev_cil_start;
4051 guint32 prev_cil_offset_to_bb_len;
4052 MonoMethod *prev_current_method;
4053 MonoGenericContext *prev_generic_context;
4055 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4057 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4058 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4061 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4062 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4066 if (cfg->verbose_level > 2)
4067 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4069 if (!cmethod->inline_info) {
4070 mono_jit_stats.inlineable_methods++;
4071 cmethod->inline_info = 1;
4073 /* allocate space to store the return value */
4074 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4075 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4078 /* allocate local variables */
4079 cheader = mono_method_get_header (cmethod);
4080 prev_locals = cfg->locals;
4081 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4082 for (i = 0; i < cheader->num_locals; ++i)
4083 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4085 /* allocate start and end blocks */
4086 /* This is needed so if the inline is aborted, we can clean up */
4087 NEW_BBLOCK (cfg, sbblock);
4088 sbblock->real_offset = real_offset;
4090 NEW_BBLOCK (cfg, ebblock);
4091 ebblock->block_num = cfg->num_bblocks++;
4092 ebblock->real_offset = real_offset;
4094 prev_args = cfg->args;
4095 prev_arg_types = cfg->arg_types;
4096 prev_inlined_method = cfg->inlined_method;
4097 cfg->inlined_method = cmethod;
4098 cfg->ret_var_set = FALSE;
4099 prev_real_offset = cfg->real_offset;
4100 prev_cbb_hash = cfg->cbb_hash;
4101 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4102 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4103 prev_cil_start = cfg->cil_start;
4104 prev_cbb = cfg->cbb;
4105 prev_current_method = cfg->current_method;
4106 prev_generic_context = cfg->generic_context;
4108 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4110 cfg->inlined_method = prev_inlined_method;
4111 cfg->real_offset = prev_real_offset;
4112 cfg->cbb_hash = prev_cbb_hash;
4113 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4114 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4115 cfg->cil_start = prev_cil_start;
4116 cfg->locals = prev_locals;
4117 cfg->args = prev_args;
4118 cfg->arg_types = prev_arg_types;
4119 cfg->current_method = prev_current_method;
4120 cfg->generic_context = prev_generic_context;
4122 if ((costs >= 0 && costs < 60) || inline_allways) {
4123 if (cfg->verbose_level > 2)
4124 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4126 mono_jit_stats.inlined_methods++;
4128 /* always add some code to avoid block split failures */
4129 MONO_INST_NEW (cfg, ins, OP_NOP);
4130 MONO_ADD_INS (prev_cbb, ins);
4132 prev_cbb->next_bb = sbblock;
4133 link_bblock (cfg, prev_cbb, sbblock);
4136 * Get rid of the begin and end bblocks if possible to aid local
4139 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4141 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4142 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4144 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4145 MonoBasicBlock *prev = ebblock->in_bb [0];
4146 mono_merge_basic_blocks (cfg, prev, ebblock);
4148 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4149 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4150 cfg->cbb = prev_cbb;
4158 * If the inlined method contains only a throw, then the ret var is not
4159 * set, so set it to a dummy value.
4161 if (!cfg->ret_var_set) {
4162 static double r8_0 = 0.0;
4164 switch (rvar->type) {
4166 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4169 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4174 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4177 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4178 ins->type = STACK_R8;
4179 ins->inst_p0 = (void*)&r8_0;
4180 ins->dreg = rvar->dreg;
4181 MONO_ADD_INS (cfg->cbb, ins);
4184 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4187 g_assert_not_reached ();
4191 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4196 if (cfg->verbose_level > 2)
4197 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4198 cfg->exception_type = MONO_EXCEPTION_NONE;
4199 mono_loader_clear_error ();
4201 /* This gets rid of the newly added bblocks */
4202 cfg->cbb = prev_cbb;
4208 * Some of these comments may well be out-of-date.
4209 * Design decisions: we do a single pass over the IL code (and we do bblock
4210 * splitting/merging in the few cases when it's required: a back jump to an IL
4211 * address that was not already seen as bblock starting point).
4212 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4213 * Complex operations are decomposed in simpler ones right away. We need to let the
4214 * arch-specific code peek and poke inside this process somehow (except when the
4215 * optimizations can take advantage of the full semantic info of coarse opcodes).
4216 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4217 * MonoInst->opcode initially is the IL opcode or some simplification of that
4218 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4219 * opcode with value bigger than OP_LAST.
4220 * At this point the IR can be handed over to an interpreter, a dumb code generator
4221 * or to the optimizing code generator that will translate it to SSA form.
4223 * Profiling directed optimizations.
4224 * We may compile by default with few or no optimizations and instrument the code
4225 * or the user may indicate what methods to optimize the most either in a config file
4226 * or through repeated runs where the compiler applies offline the optimizations to
4227 * each method and then decides if it was worth it.
4230 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4231 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4232 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4233 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4234 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4235 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4236 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4237 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4239 /* offset from br.s -> br like opcodes */
4240 #define BIG_BRANCH_OFFSET 13
4243 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4245 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4247 return b == NULL || b == bb;
4251 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4253 unsigned char *ip = start;
4254 unsigned char *target;
4257 MonoBasicBlock *bblock;
4258 const MonoOpcode *opcode;
4261 cli_addr = ip - start;
4262 i = mono_opcode_value ((const guint8 **)&ip, end);
4265 opcode = &mono_opcodes [i];
4266 switch (opcode->argument) {
4267 case MonoInlineNone:
4270 case MonoInlineString:
4271 case MonoInlineType:
4272 case MonoInlineField:
4273 case MonoInlineMethod:
4276 case MonoShortInlineR:
4283 case MonoShortInlineVar:
4284 case MonoShortInlineI:
4287 case MonoShortInlineBrTarget:
4288 target = start + cli_addr + 2 + (signed char)ip [1];
4289 GET_BBLOCK (cfg, bblock, target);
4292 GET_BBLOCK (cfg, bblock, ip);
4294 case MonoInlineBrTarget:
4295 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4296 GET_BBLOCK (cfg, bblock, target);
4299 GET_BBLOCK (cfg, bblock, ip);
4301 case MonoInlineSwitch: {
4302 guint32 n = read32 (ip + 1);
4305 cli_addr += 5 + 4 * n;
4306 target = start + cli_addr;
4307 GET_BBLOCK (cfg, bblock, target);
4309 for (j = 0; j < n; ++j) {
4310 target = start + cli_addr + (gint32)read32 (ip);
4311 GET_BBLOCK (cfg, bblock, target);
4321 g_assert_not_reached ();
4324 if (i == CEE_THROW) {
4325 unsigned char *bb_start = ip - 1;
4327 /* Find the start of the bblock containing the throw */
4329 while ((bb_start >= start) && !bblock) {
4330 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4334 bblock->out_of_line = 1;
4343 static inline MonoMethod *
4344 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4348 if (m->wrapper_type != MONO_WRAPPER_NONE)
4349 return mono_method_get_wrapper_data (m, token);
4351 method = mono_get_method_full (m->klass->image, token, klass, context);
4356 static inline MonoMethod *
4357 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4359 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4361 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4367 static inline MonoClass*
4368 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4372 if (method->wrapper_type != MONO_WRAPPER_NONE)
4373 klass = mono_method_get_wrapper_data (method, token);
4375 klass = mono_class_get_full (method->klass->image, token, context);
4377 mono_class_init (klass);
4382 * Returns TRUE if the JIT should abort inlining because "callee"
4383 * is influenced by security attributes.
4386 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4390 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4394 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4395 if (result == MONO_JIT_SECURITY_OK)
4398 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4399 /* Generate code to throw a SecurityException before the actual call/link */
4400 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4403 NEW_ICONST (cfg, args [0], 4);
4404 NEW_METHODCONST (cfg, args [1], caller);
4405 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4406 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4407 /* don't hide previous results */
4408 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4409 cfg->exception_data = result;
4417 method_access_exception (void)
4419 static MonoMethod *method = NULL;
4422 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4423 method = mono_class_get_method_from_name (secman->securitymanager,
4424 "MethodAccessException", 2);
4431 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4432 MonoBasicBlock *bblock, unsigned char *ip)
4434 MonoMethod *thrower = method_access_exception ();
4437 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4438 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4439 mono_emit_method_call (cfg, thrower, args, NULL);
4443 verification_exception (void)
4445 static MonoMethod *method = NULL;
4448 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4449 method = mono_class_get_method_from_name (secman->securitymanager,
4450 "VerificationException", 0);
4457 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4459 MonoMethod *thrower = verification_exception ();
4461 mono_emit_method_call (cfg, thrower, NULL, NULL);
4465 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4466 MonoBasicBlock *bblock, unsigned char *ip)
4468 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4469 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4470 gboolean is_safe = TRUE;
4472 if (!(caller_level >= callee_level ||
4473 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4474 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4479 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4483 method_is_safe (MonoMethod *method)
4486 if (strcmp (method->name, "unsafeMethod") == 0)
4493 * Check that the IL instructions at ip are the array initialization
4494 * sequence and return the pointer to the data and the size.
4497 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4500 * newarr[System.Int32]
4502 * ldtoken field valuetype ...
4503 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4505 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4506 guint32 token = read32 (ip + 7);
4507 guint32 field_token = read32 (ip + 2);
4508 guint32 field_index = field_token & 0xffffff;
4510 const char *data_ptr;
4512 MonoMethod *cmethod;
4513 MonoClass *dummy_class;
4514 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4520 *out_field_token = field_token;
4522 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4525 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4527 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4528 case MONO_TYPE_BOOLEAN:
4532 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4533 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4534 case MONO_TYPE_CHAR:
4544 return NULL; /* stupid ARM FP swapped format */
4554 if (size > mono_type_size (field->type, &dummy_align))
4557 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4558 if (!method->klass->image->dynamic) {
4559 field_index = read32 (ip + 2) & 0xffffff;
4560 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4561 data_ptr = mono_image_rva_map (method->klass->image, rva);
4562 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4563 /* for aot code we do the lookup on load */
4564 if (aot && data_ptr)
4565 return GUINT_TO_POINTER (rva);
4567 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4569 data_ptr = mono_field_get_data (field);
4577 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4579 char *method_fname = mono_method_full_name (method, TRUE);
4582 if (mono_method_get_header (method)->code_size == 0)
4583 method_code = g_strdup ("method body is empty.");
4585 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4586 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4587 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4588 g_free (method_fname);
4589 g_free (method_code);
4593 set_exception_object (MonoCompile *cfg, MonoException *exception)
4595 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4596 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4597 cfg->exception_ptr = exception;
4601 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4605 if (cfg->generic_sharing_context)
4606 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4608 type = &klass->byval_arg;
4609 return MONO_TYPE_IS_REFERENCE (type);
4613 * mono_decompose_array_access_opts:
4615 * Decompose array access opcodes.
4616 * This should be in decompose.c, but it emits calls so it has to stay here until
4617 * the old JIT is gone.
4620 mono_decompose_array_access_opts (MonoCompile *cfg)
4622 MonoBasicBlock *bb, *first_bb;
4625 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4626 * can be executed anytime. It should be run before decompose_long
4630 * Create a dummy bblock and emit code into it so we can use the normal
4631 * code generation macros.
4633 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4634 first_bb = cfg->cbb;
4636 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4638 MonoInst *prev = NULL;
4640 MonoInst *iargs [3];
4643 if (!bb->has_array_access)
4646 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4648 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4654 for (ins = bb->code; ins; ins = ins->next) {
4655 switch (ins->opcode) {
4657 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4658 G_STRUCT_OFFSET (MonoArray, max_length));
4659 MONO_ADD_INS (cfg->cbb, dest);
4661 case OP_BOUNDS_CHECK:
4662 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4665 if (cfg->opt & MONO_OPT_SHARED) {
4666 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4667 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4668 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4669 iargs [2]->dreg = ins->sreg1;
4671 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4672 dest->dreg = ins->dreg;
4674 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4677 NEW_VTABLECONST (cfg, iargs [0], vtable);
4678 MONO_ADD_INS (cfg->cbb, iargs [0]);
4679 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4680 iargs [1]->dreg = ins->sreg1;
4682 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4683 dest->dreg = ins->dreg;
4687 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4688 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4689 MONO_ADD_INS (cfg->cbb, dest);
4695 g_assert (cfg->cbb == first_bb);
4697 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4698 /* Replace the original instruction with the new code sequence */
4700 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4701 first_bb->code = first_bb->last_ins = NULL;
4702 first_bb->in_count = first_bb->out_count = 0;
4703 cfg->cbb = first_bb;
4710 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4720 #ifdef MONO_ARCH_SOFT_FLOAT
4723 * mono_decompose_soft_float:
4725 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4726 * similar to long support on 32 bit platforms. 32 bit float values require special
4727 * handling when used as locals, arguments, and in calls.
4728 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4731 mono_decompose_soft_float (MonoCompile *cfg)
4733 MonoBasicBlock *bb, *first_bb;
4736 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4740 * Create a dummy bblock and emit code into it so we can use the normal
4741 * code generation macros.
4743 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4744 first_bb = cfg->cbb;
4746 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4748 MonoInst *prev = NULL;
4751 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4753 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4759 for (ins = bb->code; ins; ins = ins->next) {
4760 const char *spec = INS_INFO (ins->opcode);
4762 /* Most fp operations are handled automatically by opcode emulation */
4764 switch (ins->opcode) {
4767 d.vald = *(double*)ins->inst_p0;
4768 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4773 /* We load the r8 value */
4774 d.vald = *(float*)ins->inst_p0;
4775 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4779 ins->opcode = OP_LMOVE;
4782 ins->opcode = OP_MOVE;
4783 ins->sreg1 = ins->sreg1 + 1;
4786 ins->opcode = OP_MOVE;
4787 ins->sreg1 = ins->sreg1 + 2;
4790 int reg = ins->sreg1;
4792 ins->opcode = OP_SETLRET;
4794 ins->sreg1 = reg + 1;
4795 ins->sreg2 = reg + 2;
4798 case OP_LOADR8_MEMBASE:
4799 ins->opcode = OP_LOADI8_MEMBASE;
4801 case OP_STORER8_MEMBASE_REG:
4802 ins->opcode = OP_STOREI8_MEMBASE_REG;
4804 case OP_STORER4_MEMBASE_REG: {
4805 MonoInst *iargs [2];
4808 /* Arg 1 is the double value */
4809 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4810 iargs [0]->dreg = ins->sreg1;
4812 /* Arg 2 is the address to store to */
4813 addr_reg = mono_alloc_preg (cfg);
4814 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4815 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4819 case OP_LOADR4_MEMBASE: {
4820 MonoInst *iargs [1];
4824 addr_reg = mono_alloc_preg (cfg);
4825 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4826 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4827 conv->dreg = ins->dreg;
4832 case OP_FCALL_MEMBASE: {
4833 MonoCallInst *call = (MonoCallInst*)ins;
4834 if (call->signature->ret->type == MONO_TYPE_R4) {
4835 MonoCallInst *call2;
4836 MonoInst *iargs [1];
4839 /* Convert the call into a call returning an int */
4840 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4841 memcpy (call2, call, sizeof (MonoCallInst));
4842 switch (ins->opcode) {
4844 call2->inst.opcode = OP_CALL;
4847 call2->inst.opcode = OP_CALL_REG;
4849 case OP_FCALL_MEMBASE:
4850 call2->inst.opcode = OP_CALL_MEMBASE;
4853 g_assert_not_reached ();
4855 call2->inst.dreg = mono_alloc_ireg (cfg);
4856 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4858 /* FIXME: Optimize this */
4860 /* Emit an r4->r8 conversion */
4861 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4862 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4863 conv->dreg = ins->dreg;
4865 switch (ins->opcode) {
4867 ins->opcode = OP_LCALL;
4870 ins->opcode = OP_LCALL_REG;
4872 case OP_FCALL_MEMBASE:
4873 ins->opcode = OP_LCALL_MEMBASE;
4876 g_assert_not_reached ();
4882 MonoJitICallInfo *info;
4883 MonoInst *iargs [2];
4884 MonoInst *call, *cmp, *br;
4886 /* Convert fcompare+fbcc to icall+icompare+beq */
4888 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4891 /* Create dummy MonoInst's for the arguments */
4892 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4893 iargs [0]->dreg = ins->sreg1;
4894 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4895 iargs [1]->dreg = ins->sreg2;
4897 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4899 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4900 cmp->sreg1 = call->dreg;
4902 MONO_ADD_INS (cfg->cbb, cmp);
4904 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4905 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4906 br->inst_true_bb = ins->next->inst_true_bb;
4907 br->inst_false_bb = ins->next->inst_false_bb;
4908 MONO_ADD_INS (cfg->cbb, br);
4910 /* The call sequence might include fp ins */
4913 /* Skip fbcc or fccc */
4914 NULLIFY_INS (ins->next);
4922 MonoJitICallInfo *info;
4923 MonoInst *iargs [2];
4926 /* Convert fccc to icall+icompare+iceq */
4928 info = mono_find_jit_opcode_emulation (ins->opcode);
4931 /* Create dummy MonoInst's for the arguments */
4932 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4933 iargs [0]->dreg = ins->sreg1;
4934 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4935 iargs [1]->dreg = ins->sreg2;
4937 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4940 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4942 /* The call sequence might include fp ins */
4947 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4948 mono_print_ins (ins);
4949 g_assert_not_reached ();
4954 g_assert (cfg->cbb == first_bb);
4956 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4957 /* Replace the original instruction with the new code sequence */
4959 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4960 first_bb->code = first_bb->last_ins = NULL;
4961 first_bb->in_count = first_bb->out_count = 0;
4962 cfg->cbb = first_bb;
4969 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4972 mono_decompose_long_opts (cfg);
4978 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4981 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4982 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4983 /* Optimize reg-reg moves away */
4985 * Can't optimize other opcodes, since sp[0] might point to
4986 * the last ins of a decomposed opcode.
4988 sp [0]->dreg = (cfg)->locals [n]->dreg;
4990 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4995 * ldloca inhibits many optimizations so try to get rid of it in common
4998 static inline unsigned char *
4999 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5008 local = read16 (ip + 2);
5012 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5013 gboolean skip = FALSE;
5015 /* From the INITOBJ case */
5016 token = read32 (ip + 2);
5017 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5018 CHECK_TYPELOAD (klass);
5019 if (generic_class_is_reference_type (cfg, klass)) {
5020 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5021 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5022 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5023 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5024 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5037 * mono_method_to_ir:
5039 * Translate the .net IL into linear IR.
5042 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5043 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5044 guint inline_offset, gboolean is_virtual_call)
5046 MonoInst *ins, **sp, **stack_start;
5047 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5048 MonoMethod *cmethod, *method_definition;
5049 MonoInst **arg_array;
5050 MonoMethodHeader *header;
5052 guint32 token, ins_flag;
5054 MonoClass *constrained_call = NULL;
5055 unsigned char *ip, *end, *target, *err_pos;
5056 static double r8_0 = 0.0;
5057 MonoMethodSignature *sig;
5058 MonoGenericContext *generic_context = NULL;
5059 MonoGenericContainer *generic_container = NULL;
5060 MonoType **param_types;
5061 int i, n, start_new_bblock, dreg;
5062 int num_calls = 0, inline_costs = 0;
5063 int breakpoint_id = 0;
5065 MonoBoolean security, pinvoke;
5066 MonoSecurityManager* secman = NULL;
5067 MonoDeclSecurityActions actions;
5068 GSList *class_inits = NULL;
5069 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5072 /* serialization and xdomain stuff may need access to private fields and methods */
5073 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5074 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5075 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5076 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5077 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5078 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5080 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5082 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5083 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5084 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5085 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5087 image = method->klass->image;
5088 header = mono_method_get_header (method);
5089 generic_container = mono_method_get_generic_container (method);
5090 sig = mono_method_signature (method);
5091 num_args = sig->hasthis + sig->param_count;
5092 ip = (unsigned char*)header->code;
5093 cfg->cil_start = ip;
5094 end = ip + header->code_size;
5095 mono_jit_stats.cil_code_size += header->code_size;
5097 method_definition = method;
5098 while (method_definition->is_inflated) {
5099 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5100 method_definition = imethod->declaring;
5103 /* SkipVerification is not allowed if core-clr is enabled */
5104 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5106 dont_verify_stloc = TRUE;
5109 if (!dont_verify && mini_method_verify (cfg, method_definition))
5110 goto exception_exit;
5112 if (mono_debug_using_mono_debugger ())
5113 cfg->keep_cil_nops = TRUE;
5115 if (sig->is_inflated)
5116 generic_context = mono_method_get_context (method);
5117 else if (generic_container)
5118 generic_context = &generic_container->context;
5119 cfg->generic_context = generic_context;
5121 if (!cfg->generic_sharing_context)
5122 g_assert (!sig->has_type_parameters);
5124 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5125 g_assert (method->is_inflated);
5126 g_assert (mono_method_get_context (method)->method_inst);
5128 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5129 g_assert (sig->generic_param_count);
5131 if (cfg->method == method) {
5132 cfg->real_offset = 0;
5134 cfg->real_offset = inline_offset;
5137 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5138 cfg->cil_offset_to_bb_len = header->code_size;
5140 cfg->current_method = method;
5142 if (cfg->verbose_level > 2)
5143 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5145 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5147 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5148 for (n = 0; n < sig->param_count; ++n)
5149 param_types [n + sig->hasthis] = sig->params [n];
5150 cfg->arg_types = param_types;
5152 dont_inline = g_list_prepend (dont_inline, method);
5153 if (cfg->method == method) {
5155 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5156 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5159 NEW_BBLOCK (cfg, start_bblock);
5160 cfg->bb_entry = start_bblock;
5161 start_bblock->cil_code = NULL;
5162 start_bblock->cil_length = 0;
5165 NEW_BBLOCK (cfg, end_bblock);
5166 cfg->bb_exit = end_bblock;
5167 end_bblock->cil_code = NULL;
5168 end_bblock->cil_length = 0;
5169 g_assert (cfg->num_bblocks == 2);
5171 arg_array = cfg->args;
5173 if (header->num_clauses) {
5174 cfg->spvars = g_hash_table_new (NULL, NULL);
5175 cfg->exvars = g_hash_table_new (NULL, NULL);
5177 /* handle exception clauses */
5178 for (i = 0; i < header->num_clauses; ++i) {
5179 MonoBasicBlock *try_bb;
5180 MonoExceptionClause *clause = &header->clauses [i];
5181 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5182 try_bb->real_offset = clause->try_offset;
5183 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5184 tblock->real_offset = clause->handler_offset;
5185 tblock->flags |= BB_EXCEPTION_HANDLER;
5187 link_bblock (cfg, try_bb, tblock);
5189 if (*(ip + clause->handler_offset) == CEE_POP)
5190 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5192 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5193 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5194 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5195 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5196 MONO_ADD_INS (tblock, ins);
5198 /* todo: is a fault block unsafe to optimize? */
5199 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5200 tblock->flags |= BB_EXCEPTION_UNSAFE;
5204 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5206 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5208 /* catch and filter blocks get the exception object on the stack */
5209 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5210 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5211 MonoInst *dummy_use;
5213 /* mostly like handle_stack_args (), but just sets the input args */
5214 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5215 tblock->in_scount = 1;
5216 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5217 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5220 * Add a dummy use for the exvar so its liveness info will be
5224 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5226 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5227 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5228 tblock->flags |= BB_EXCEPTION_HANDLER;
5229 tblock->real_offset = clause->data.filter_offset;
5230 tblock->in_scount = 1;
5231 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5232 /* The filter block shares the exvar with the handler block */
5233 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5234 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5235 MONO_ADD_INS (tblock, ins);
5239 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5240 clause->data.catch_class &&
5241 cfg->generic_sharing_context &&
5242 mono_class_check_context_used (clause->data.catch_class)) {
5244 * In shared generic code with catch
5245 * clauses containing type variables
5246 * the exception handling code has to
5247 * be able to get to the rgctx.
5248 * Therefore we have to make sure that
5249 * the vtable/mrgctx argument (for
5250 * static or generic methods) or the
5251 * "this" argument (for non-static
5252 * methods) are live.
5254 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5255 mini_method_get_context (method)->method_inst ||
5256 method->klass->valuetype) {
5257 mono_get_vtable_var (cfg);
5259 MonoInst *dummy_use;
5261 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5266 arg_array = alloca (sizeof (MonoInst *) * num_args);
5267 cfg->cbb = start_bblock;
5268 cfg->args = arg_array;
5269 mono_save_args (cfg, sig, inline_args);
5272 /* FIRST CODE BLOCK */
5273 NEW_BBLOCK (cfg, bblock);
5274 bblock->cil_code = ip;
5278 ADD_BBLOCK (cfg, bblock);
5280 if (cfg->method == method) {
5281 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5282 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5283 MONO_INST_NEW (cfg, ins, OP_BREAK);
5284 MONO_ADD_INS (bblock, ins);
5288 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5289 secman = mono_security_manager_get_methods ();
5291 security = (secman && mono_method_has_declsec (method));
5292 /* at this point having security doesn't mean we have any code to generate */
5293 if (security && (cfg->method == method)) {
5294 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5295 * And we do not want to enter the next section (with allocation) if we
5296 * have nothing to generate */
5297 security = mono_declsec_get_demands (method, &actions);
5300 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5301 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5303 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5304 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5305 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5307 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5308 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5312 mono_custom_attrs_free (custom);
5315 custom = mono_custom_attrs_from_class (wrapped->klass);
5316 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5320 mono_custom_attrs_free (custom);
5323 /* not a P/Invoke after all */
5328 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5329 /* we use a separate basic block for the initialization code */
5330 NEW_BBLOCK (cfg, init_localsbb);
5331 cfg->bb_init = init_localsbb;
5332 init_localsbb->real_offset = cfg->real_offset;
5333 start_bblock->next_bb = init_localsbb;
5334 init_localsbb->next_bb = bblock;
5335 link_bblock (cfg, start_bblock, init_localsbb);
5336 link_bblock (cfg, init_localsbb, bblock);
5338 cfg->cbb = init_localsbb;
5340 start_bblock->next_bb = bblock;
5341 link_bblock (cfg, start_bblock, bblock);
5344 /* at this point we know, if security is TRUE, that some code needs to be generated */
5345 if (security && (cfg->method == method)) {
5348 mono_jit_stats.cas_demand_generation++;
5350 if (actions.demand.blob) {
5351 /* Add code for SecurityAction.Demand */
5352 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5353 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5354 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5355 mono_emit_method_call (cfg, secman->demand, args, NULL);
5357 if (actions.noncasdemand.blob) {
5358 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5359 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5360 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5361 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5362 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5363 mono_emit_method_call (cfg, secman->demand, args, NULL);
5365 if (actions.demandchoice.blob) {
5366 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5367 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5368 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5369 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5370 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5374 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5376 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5379 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5380 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5381 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5382 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5383 if (!(method->klass && method->klass->image &&
5384 mono_security_core_clr_is_platform_image (method->klass->image))) {
5385 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5389 if (!method_is_safe (method))
5390 emit_throw_verification_exception (cfg, bblock, ip);
5393 if (header->code_size == 0)
5396 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5401 if (cfg->method == method)
5402 mono_debug_init_method (cfg, bblock, breakpoint_id);
5404 for (n = 0; n < header->num_locals; ++n) {
5405 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5410 /* We force the vtable variable here for all shared methods
5411 for the possibility that they might show up in a stack
5412 trace where their exact instantiation is needed. */
5413 if (cfg->generic_sharing_context)
5414 mono_get_vtable_var (cfg);
5416 /* add a check for this != NULL to inlined methods */
5417 if (is_virtual_call) {
5420 NEW_ARGLOAD (cfg, arg_ins, 0);
5421 MONO_ADD_INS (cfg->cbb, arg_ins);
5422 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5423 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5424 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5427 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5428 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5431 start_new_bblock = 0;
5435 if (cfg->method == method)
5436 cfg->real_offset = ip - header->code;
5438 cfg->real_offset = inline_offset;
5443 if (start_new_bblock) {
5444 bblock->cil_length = ip - bblock->cil_code;
5445 if (start_new_bblock == 2) {
5446 g_assert (ip == tblock->cil_code);
5448 GET_BBLOCK (cfg, tblock, ip);
5450 bblock->next_bb = tblock;
5453 start_new_bblock = 0;
5454 for (i = 0; i < bblock->in_scount; ++i) {
5455 if (cfg->verbose_level > 3)
5456 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5457 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5461 g_slist_free (class_inits);
5464 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5465 link_bblock (cfg, bblock, tblock);
5466 if (sp != stack_start) {
5467 handle_stack_args (cfg, stack_start, sp - stack_start);
5469 CHECK_UNVERIFIABLE (cfg);
5471 bblock->next_bb = tblock;
5474 for (i = 0; i < bblock->in_scount; ++i) {
5475 if (cfg->verbose_level > 3)
5476 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5477 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5480 g_slist_free (class_inits);
5485 bblock->real_offset = cfg->real_offset;
5487 if ((cfg->method == method) && cfg->coverage_info) {
5488 guint32 cil_offset = ip - header->code;
5489 cfg->coverage_info->data [cil_offset].cil_code = ip;
5491 /* TODO: Use an increment here */
5492 #if defined(__i386__)
5493 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5494 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5496 MONO_ADD_INS (cfg->cbb, ins);
5498 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5499 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5503 if (cfg->verbose_level > 3)
5504 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5508 if (cfg->keep_cil_nops)
5509 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5511 MONO_INST_NEW (cfg, ins, OP_NOP);
5513 MONO_ADD_INS (bblock, ins);
5516 MONO_INST_NEW (cfg, ins, OP_BREAK);
5518 MONO_ADD_INS (bblock, ins);
5524 CHECK_STACK_OVF (1);
5525 n = (*ip)-CEE_LDARG_0;
5527 EMIT_NEW_ARGLOAD (cfg, ins, n);
5535 CHECK_STACK_OVF (1);
5536 n = (*ip)-CEE_LDLOC_0;
5538 EMIT_NEW_LOCLOAD (cfg, ins, n);
5547 n = (*ip)-CEE_STLOC_0;
5550 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5552 emit_stloc_ir (cfg, sp, header, n);
5559 CHECK_STACK_OVF (1);
5562 EMIT_NEW_ARGLOAD (cfg, ins, n);
5568 CHECK_STACK_OVF (1);
5571 NEW_ARGLOADA (cfg, ins, n);
5572 MONO_ADD_INS (cfg->cbb, ins);
5582 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5584 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5589 CHECK_STACK_OVF (1);
5592 EMIT_NEW_LOCLOAD (cfg, ins, n);
5596 case CEE_LDLOCA_S: {
5597 unsigned char *tmp_ip;
5599 CHECK_STACK_OVF (1);
5600 CHECK_LOCAL (ip [1]);
5602 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5608 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5617 CHECK_LOCAL (ip [1]);
5618 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5620 emit_stloc_ir (cfg, sp, header, ip [1]);
5625 CHECK_STACK_OVF (1);
5626 EMIT_NEW_PCONST (cfg, ins, NULL);
5627 ins->type = STACK_OBJ;
5632 CHECK_STACK_OVF (1);
5633 EMIT_NEW_ICONST (cfg, ins, -1);
5646 CHECK_STACK_OVF (1);
5647 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5653 CHECK_STACK_OVF (1);
5655 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5661 CHECK_STACK_OVF (1);
5662 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5668 CHECK_STACK_OVF (1);
5669 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5670 ins->type = STACK_I8;
5671 ins->dreg = alloc_dreg (cfg, STACK_I8);
5673 ins->inst_l = (gint64)read64 (ip);
5674 MONO_ADD_INS (bblock, ins);
5680 /* FIXME: we should really allocate this only late in the compilation process */
5681 mono_domain_lock (cfg->domain);
5682 f = mono_domain_alloc (cfg->domain, sizeof (float));
5683 mono_domain_unlock (cfg->domain);
5685 CHECK_STACK_OVF (1);
5686 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5687 ins->type = STACK_R8;
5688 ins->dreg = alloc_dreg (cfg, STACK_R8);
5692 MONO_ADD_INS (bblock, ins);
5700 /* FIXME: we should really allocate this only late in the compilation process */
5701 mono_domain_lock (cfg->domain);
5702 d = mono_domain_alloc (cfg->domain, sizeof (double));
5703 mono_domain_unlock (cfg->domain);
5705 CHECK_STACK_OVF (1);
5706 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5707 ins->type = STACK_R8;
5708 ins->dreg = alloc_dreg (cfg, STACK_R8);
5712 MONO_ADD_INS (bblock, ins);
5719 MonoInst *temp, *store;
5721 CHECK_STACK_OVF (1);
5725 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5726 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5728 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5731 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5744 if (sp [0]->type == STACK_R8)
5745 /* we need to pop the value from the x86 FP stack */
5746 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5753 if (stack_start != sp)
5755 token = read32 (ip + 1);
5756 /* FIXME: check the signature matches */
5757 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5762 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5763 GENERIC_SHARING_FAILURE (CEE_JMP);
5765 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5766 if (check_linkdemand (cfg, method, cmethod))
5768 CHECK_CFG_EXCEPTION;
5773 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5776 /* Handle tail calls similarly to calls */
5777 n = fsig->param_count + fsig->hasthis;
5779 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5780 call->method = cmethod;
5781 call->tail_call = TRUE;
5782 call->signature = mono_method_signature (cmethod);
5783 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5784 call->inst.inst_p0 = cmethod;
5785 for (i = 0; i < n; ++i)
5786 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5788 mono_arch_emit_call (cfg, call);
5789 MONO_ADD_INS (bblock, (MonoInst*)call);
5792 for (i = 0; i < num_args; ++i)
5793 /* Prevent arguments from being optimized away */
5794 arg_array [i]->flags |= MONO_INST_VOLATILE;
5796 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5797 ins = (MonoInst*)call;
5798 ins->inst_p0 = cmethod;
5799 MONO_ADD_INS (bblock, ins);
5803 start_new_bblock = 1;
5808 case CEE_CALLVIRT: {
5809 MonoInst *addr = NULL;
5810 MonoMethodSignature *fsig = NULL;
5812 int virtual = *ip == CEE_CALLVIRT;
5813 int calli = *ip == CEE_CALLI;
5814 gboolean pass_imt_from_rgctx = FALSE;
5815 MonoInst *imt_arg = NULL;
5816 gboolean pass_vtable = FALSE;
5817 gboolean pass_mrgctx = FALSE;
5818 MonoInst *vtable_arg = NULL;
5819 gboolean check_this = FALSE;
5822 token = read32 (ip + 1);
5829 if (method->wrapper_type != MONO_WRAPPER_NONE)
5830 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5832 fsig = mono_metadata_parse_signature (image, token);
5834 n = fsig->param_count + fsig->hasthis;
5836 MonoMethod *cil_method;
5838 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5839 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5840 cil_method = cmethod;
5841 } else if (constrained_call) {
5842 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5844 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5845 cil_method = cmethod;
5850 if (!dont_verify && !cfg->skip_visibility) {
5851 MonoMethod *target_method = cil_method;
5852 if (method->is_inflated) {
5853 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5855 if (!mono_method_can_access_method (method_definition, target_method) &&
5856 !mono_method_can_access_method (method, cil_method))
5857 METHOD_ACCESS_FAILURE;
5860 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5861 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5863 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5864 /* MS.NET seems to silently convert this to a callvirt */
5867 if (!cmethod->klass->inited)
5868 if (!mono_class_init (cmethod->klass))
5871 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5872 mini_class_is_system_array (cmethod->klass)) {
5873 array_rank = cmethod->klass->rank;
5874 fsig = mono_method_signature (cmethod);
5876 if (mono_method_signature (cmethod)->pinvoke) {
5877 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5878 check_for_pending_exc, FALSE);
5879 fsig = mono_method_signature (wrapper);
5880 } else if (constrained_call) {
5881 fsig = mono_method_signature (cmethod);
5883 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5887 mono_save_token_info (cfg, image, token, cil_method);
5889 n = fsig->param_count + fsig->hasthis;
5891 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5892 if (check_linkdemand (cfg, method, cmethod))
5894 CHECK_CFG_EXCEPTION;
5897 if (cmethod->string_ctor)
5898 g_assert_not_reached ();
5901 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5904 if (!cfg->generic_sharing_context && cmethod)
5905 g_assert (!mono_method_check_context_used (cmethod));
5909 //g_assert (!virtual || fsig->hasthis);
5913 if (constrained_call) {
5915 * We have the `constrained.' prefix opcode.
5917 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5921 * The type parameter is instantiated as a valuetype,
5922 * but that type doesn't override the method we're
5923 * calling, so we need to box `this'.
5925 dreg = alloc_dreg (cfg, STACK_VTYPE);
5926 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5927 ins->klass = constrained_call;
5928 sp [0] = handle_box (cfg, ins, constrained_call);
5929 } else if (!constrained_call->valuetype) {
5930 int dreg = alloc_preg (cfg);
5933 * The type parameter is instantiated as a reference
5934 * type. We have a managed pointer on the stack, so
5935 * we need to dereference it here.
5937 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5938 ins->type = STACK_OBJ;
5940 } else if (cmethod->klass->valuetype)
5942 constrained_call = NULL;
5945 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5949 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5950 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5951 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5952 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5953 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5956 * Pass vtable iff target method might
5957 * be shared, which means that sharing
5958 * is enabled for its class and its
5959 * context is sharable (and it's not a
5962 if (sharing_enabled && context_sharable &&
5963 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5967 if (cmethod && mini_method_get_context (cmethod) &&
5968 mini_method_get_context (cmethod)->method_inst) {
5969 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5970 MonoGenericContext *context = mini_method_get_context (cmethod);
5971 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5973 g_assert (!pass_vtable);
5975 if (sharing_enabled && context_sharable)
5979 if (cfg->generic_sharing_context && cmethod) {
5980 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5982 context_used = mono_method_check_context_used (cmethod);
5984 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5985 /* Generic method interface
5986 calls are resolved via a
5987 helper function and don't
5989 if (!cmethod_context || !cmethod_context->method_inst)
5990 pass_imt_from_rgctx = TRUE;
5994 * If a shared method calls another
5995 * shared method then the caller must
5996 * have a generic sharing context
5997 * because the magic trampoline
5998 * requires it. FIXME: We shouldn't
5999 * have to force the vtable/mrgctx
6000 * variable here. Instead there
6001 * should be a flag in the cfg to
6002 * request a generic sharing context.
6005 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6006 mono_get_vtable_var (cfg);
6011 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6013 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6015 CHECK_TYPELOAD (cmethod->klass);
6016 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6021 g_assert (!vtable_arg);
6024 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6026 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6029 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6030 MONO_METHOD_IS_FINAL (cmethod)) {
6037 if (pass_imt_from_rgctx) {
6038 g_assert (!pass_vtable);
6041 imt_arg = emit_get_rgctx_method (cfg, context_used,
6042 cmethod, MONO_RGCTX_INFO_METHOD);
6048 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6049 check->sreg1 = sp [0]->dreg;
6050 MONO_ADD_INS (cfg->cbb, check);
6053 /* Calling virtual generic methods */
6054 if (cmethod && virtual &&
6055 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6056 !(MONO_METHOD_IS_FINAL (cmethod) &&
6057 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6058 mono_method_signature (cmethod)->generic_param_count) {
6059 MonoInst *this_temp, *this_arg_temp, *store;
6060 MonoInst *iargs [4];
6062 g_assert (mono_method_signature (cmethod)->is_inflated);
6064 /* Prevent inlining of methods that contain indirect calls */
6067 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6068 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6069 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6070 g_assert (!imt_arg);
6072 imt_arg = emit_get_rgctx_method (cfg, context_used,
6073 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6077 cfg->disable_aot = TRUE;
6078 g_assert (cmethod->is_inflated);
6079 EMIT_NEW_PCONST (cfg, imt_arg,
6080 ((MonoMethodInflated*)cmethod)->context.method_inst);
6082 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6086 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6087 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6088 MONO_ADD_INS (bblock, store);
6090 /* FIXME: This should be a managed pointer */
6091 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6093 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6095 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6096 cmethod, MONO_RGCTX_INFO_METHOD);
6097 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6098 addr = mono_emit_jit_icall (cfg,
6099 mono_helper_compile_generic_method, iargs);
6101 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6102 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6103 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6106 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6108 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6111 if (!MONO_TYPE_IS_VOID (fsig->ret))
6120 /* FIXME: runtime generic context pointer for jumps? */
6121 /* FIXME: handle this for generic sharing eventually */
6122 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6123 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6126 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6129 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6130 call->tail_call = TRUE;
6131 call->method = cmethod;
6132 call->signature = mono_method_signature (cmethod);
6135 /* Handle tail calls similarly to calls */
6136 call->inst.opcode = OP_TAILCALL;
6138 mono_arch_emit_call (cfg, call);
6141 * We implement tail calls by storing the actual arguments into the
6142 * argument variables, then emitting a CEE_JMP.
6144 for (i = 0; i < n; ++i) {
6145 /* Prevent argument from being register allocated */
6146 arg_array [i]->flags |= MONO_INST_VOLATILE;
6147 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6151 ins = (MonoInst*)call;
6152 ins->inst_p0 = cmethod;
6153 ins->inst_p1 = arg_array [0];
6154 MONO_ADD_INS (bblock, ins);
6155 link_bblock (cfg, bblock, end_bblock);
6156 start_new_bblock = 1;
6157 /* skip CEE_RET as well */
6163 /* Conversion to a JIT intrinsic */
6164 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6165 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6166 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6177 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6178 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6179 mono_method_check_inlining (cfg, cmethod) &&
6180 !g_list_find (dont_inline, cmethod)) {
6182 gboolean allways = FALSE;
6184 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6185 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6186 /* Prevent inlining of methods that call wrappers */
6188 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6192 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6194 cfg->real_offset += 5;
6197 if (!MONO_TYPE_IS_VOID (fsig->ret))
6198 /* *sp is already set by inline_method */
6201 inline_costs += costs;
6207 inline_costs += 10 * num_calls++;
6209 /* Tail recursion elimination */
6210 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6211 gboolean has_vtargs = FALSE;
6214 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6217 /* keep it simple */
6218 for (i = fsig->param_count - 1; i >= 0; i--) {
6219 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6224 for (i = 0; i < n; ++i)
6225 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6226 MONO_INST_NEW (cfg, ins, OP_BR);
6227 MONO_ADD_INS (bblock, ins);
6228 tblock = start_bblock->out_bb [0];
6229 link_bblock (cfg, bblock, tblock);
6230 ins->inst_target_bb = tblock;
6231 start_new_bblock = 1;
6233 /* skip the CEE_RET, too */
6234 if (ip_in_bb (cfg, bblock, ip + 5))
6244 /* Generic sharing */
6245 /* FIXME: only do this for generic methods if
6246 they are not shared! */
6247 if (context_used && !imt_arg && !array_rank &&
6248 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6249 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6250 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6251 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6254 g_assert (cfg->generic_sharing_context && cmethod);
6258 * We are compiling a call to a
6259 * generic method from shared code,
6260 * which means that we have to look up
6261 * the method in the rgctx and do an
6264 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6267 /* Indirect calls */
6269 g_assert (!imt_arg);
6271 if (*ip == CEE_CALL)
6272 g_assert (context_used);
6273 else if (*ip == CEE_CALLI)
6274 g_assert (!vtable_arg);
6276 /* FIXME: what the hell is this??? */
6277 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6278 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6280 /* Prevent inlining of methods with indirect calls */
6284 #ifdef MONO_ARCH_RGCTX_REG
6286 int rgctx_reg = mono_alloc_preg (cfg);
6288 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6289 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6290 call = (MonoCallInst*)ins;
6291 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6292 cfg->uses_rgctx_reg = TRUE;
6297 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6299 * Instead of emitting an indirect call, emit a direct call
6300 * with the contents of the aotconst as the patch info.
6302 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6305 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6308 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6309 if (fsig->pinvoke && !fsig->ret->byref) {
6313 * Native code might return non register sized integers
6314 * without initializing the upper bits.
6316 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6317 case OP_LOADI1_MEMBASE:
6318 widen_op = OP_ICONV_TO_I1;
6320 case OP_LOADU1_MEMBASE:
6321 widen_op = OP_ICONV_TO_U1;
6323 case OP_LOADI2_MEMBASE:
6324 widen_op = OP_ICONV_TO_I2;
6326 case OP_LOADU2_MEMBASE:
6327 widen_op = OP_ICONV_TO_U2;
6333 if (widen_op != -1) {
6334 int dreg = alloc_preg (cfg);
6337 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6338 widen->type = ins->type;
6355 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6356 if (sp [fsig->param_count]->type == STACK_OBJ) {
6357 MonoInst *iargs [2];
6360 iargs [1] = sp [fsig->param_count];
6362 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6365 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6366 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6367 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6368 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6373 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6374 if (!cmethod->klass->element_class->valuetype && !readonly)
6375 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6378 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6381 g_assert_not_reached ();
6389 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6391 if (!MONO_TYPE_IS_VOID (fsig->ret))
6402 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6404 } else if (imt_arg) {
6405 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6407 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6410 if (!MONO_TYPE_IS_VOID (fsig->ret))
6418 if (cfg->method != method) {
6419 /* return from inlined method */
6421 * If in_count == 0, that means the ret is unreachable due to
6422 * being preceeded by a throw. In that case, inline_method () will
6423 * handle setting the return value
6424 * (test case: test_0_inline_throw ()).
6426 if (return_var && cfg->cbb->in_count) {
6430 //g_assert (returnvar != -1);
6431 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6432 cfg->ret_var_set = TRUE;
6436 MonoType *ret_type = mono_method_signature (method)->ret;
6438 g_assert (!return_var);
6441 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6444 if (!cfg->vret_addr) {
6447 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6449 EMIT_NEW_RETLOADA (cfg, ret_addr);
6451 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6452 ins->klass = mono_class_from_mono_type (ret_type);
6455 #ifdef MONO_ARCH_SOFT_FLOAT
6456 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6457 MonoInst *iargs [1];
6461 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6462 mono_arch_emit_setret (cfg, method, conv);
6464 mono_arch_emit_setret (cfg, method, *sp);
6467 mono_arch_emit_setret (cfg, method, *sp);
6472 if (sp != stack_start)
6474 MONO_INST_NEW (cfg, ins, OP_BR);
6476 ins->inst_target_bb = end_bblock;
6477 MONO_ADD_INS (bblock, ins);
6478 link_bblock (cfg, bblock, end_bblock);
6479 start_new_bblock = 1;
6483 MONO_INST_NEW (cfg, ins, OP_BR);
6485 target = ip + 1 + (signed char)(*ip);
6487 GET_BBLOCK (cfg, tblock, target);
6488 link_bblock (cfg, bblock, tblock);
6489 ins->inst_target_bb = tblock;
6490 if (sp != stack_start) {
6491 handle_stack_args (cfg, stack_start, sp - stack_start);
6493 CHECK_UNVERIFIABLE (cfg);
6495 MONO_ADD_INS (bblock, ins);
6496 start_new_bblock = 1;
6497 inline_costs += BRANCH_COST;
6511 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6513 target = ip + 1 + *(signed char*)ip;
6519 inline_costs += BRANCH_COST;
6523 MONO_INST_NEW (cfg, ins, OP_BR);
6526 target = ip + 4 + (gint32)read32(ip);
6528 GET_BBLOCK (cfg, tblock, target);
6529 link_bblock (cfg, bblock, tblock);
6530 ins->inst_target_bb = tblock;
6531 if (sp != stack_start) {
6532 handle_stack_args (cfg, stack_start, sp - stack_start);
6534 CHECK_UNVERIFIABLE (cfg);
6537 MONO_ADD_INS (bblock, ins);
6539 start_new_bblock = 1;
6540 inline_costs += BRANCH_COST;
6547 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6548 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6549 guint32 opsize = is_short ? 1 : 4;
6551 CHECK_OPSIZE (opsize);
6553 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6556 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6561 GET_BBLOCK (cfg, tblock, target);
6562 link_bblock (cfg, bblock, tblock);
6563 GET_BBLOCK (cfg, tblock, ip);
6564 link_bblock (cfg, bblock, tblock);
6566 if (sp != stack_start) {
6567 handle_stack_args (cfg, stack_start, sp - stack_start);
6568 CHECK_UNVERIFIABLE (cfg);
6571 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6572 cmp->sreg1 = sp [0]->dreg;
6573 type_from_op (cmp, sp [0], NULL);
6576 #if SIZEOF_REGISTER == 4
6577 if (cmp->opcode == OP_LCOMPARE_IMM) {
6578 /* Convert it to OP_LCOMPARE */
6579 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6580 ins->type = STACK_I8;
6581 ins->dreg = alloc_dreg (cfg, STACK_I8);
6583 MONO_ADD_INS (bblock, ins);
6584 cmp->opcode = OP_LCOMPARE;
6585 cmp->sreg2 = ins->dreg;
6588 MONO_ADD_INS (bblock, cmp);
6590 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6591 type_from_op (ins, sp [0], NULL);
6592 MONO_ADD_INS (bblock, ins);
6593 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6594 GET_BBLOCK (cfg, tblock, target);
6595 ins->inst_true_bb = tblock;
6596 GET_BBLOCK (cfg, tblock, ip);
6597 ins->inst_false_bb = tblock;
6598 start_new_bblock = 2;
6601 inline_costs += BRANCH_COST;
6616 MONO_INST_NEW (cfg, ins, *ip);
6618 target = ip + 4 + (gint32)read32(ip);
6624 inline_costs += BRANCH_COST;
6628 MonoBasicBlock **targets;
6629 MonoBasicBlock *default_bblock;
6630 MonoJumpInfoBBTable *table;
6631 int offset_reg = alloc_preg (cfg);
6632 int target_reg = alloc_preg (cfg);
6633 int table_reg = alloc_preg (cfg);
6634 int sum_reg = alloc_preg (cfg);
6635 gboolean use_op_switch;
6639 n = read32 (ip + 1);
6642 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6646 CHECK_OPSIZE (n * sizeof (guint32));
6647 target = ip + n * sizeof (guint32);
6649 GET_BBLOCK (cfg, default_bblock, target);
6651 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6652 for (i = 0; i < n; ++i) {
6653 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6654 targets [i] = tblock;
6658 if (sp != stack_start) {
6660 * Link the current bb with the targets as well, so handle_stack_args
6661 * will set their in_stack correctly.
6663 link_bblock (cfg, bblock, default_bblock);
6664 for (i = 0; i < n; ++i)
6665 link_bblock (cfg, bblock, targets [i]);
6667 handle_stack_args (cfg, stack_start, sp - stack_start);
6669 CHECK_UNVERIFIABLE (cfg);
6672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6676 for (i = 0; i < n; ++i)
6677 link_bblock (cfg, bblock, targets [i]);
6679 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6680 table->table = targets;
6681 table->table_size = n;
6683 use_op_switch = FALSE;
6685 /* ARM implements SWITCH statements differently */
6686 /* FIXME: Make it use the generic implementation */
6687 if (!cfg->compile_aot)
6688 use_op_switch = TRUE;
6691 if (use_op_switch) {
6692 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6693 ins->sreg1 = src1->dreg;
6694 ins->inst_p0 = table;
6695 ins->inst_many_bb = targets;
6696 ins->klass = GUINT_TO_POINTER (n);
6697 MONO_ADD_INS (cfg->cbb, ins);
6699 if (sizeof (gpointer) == 8)
6700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6704 #if SIZEOF_REGISTER == 8
6705 /* The upper word might not be zero, and we add it to a 64 bit address later */
6706 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6709 if (cfg->compile_aot) {
6710 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6712 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6713 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6714 ins->inst_p0 = table;
6715 ins->dreg = table_reg;
6716 MONO_ADD_INS (cfg->cbb, ins);
6719 /* FIXME: Use load_memindex */
6720 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6722 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6724 start_new_bblock = 1;
6725 inline_costs += (BRANCH_COST * 2);
6745 dreg = alloc_freg (cfg);
6748 dreg = alloc_lreg (cfg);
6751 dreg = alloc_preg (cfg);
6754 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6755 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6756 ins->flags |= ins_flag;
6758 MONO_ADD_INS (bblock, ins);
6773 #if HAVE_WRITE_BARRIERS
6774 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6775 /* insert call to write barrier */
6776 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6777 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6784 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6785 ins->flags |= ins_flag;
6787 MONO_ADD_INS (bblock, ins);
6795 MONO_INST_NEW (cfg, ins, (*ip));
6797 ins->sreg1 = sp [0]->dreg;
6798 ins->sreg2 = sp [1]->dreg;
6799 type_from_op (ins, sp [0], sp [1]);
6801 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6803 /* Use the immediate opcodes if possible */
6804 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6805 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6806 if (imm_opcode != -1) {
6807 ins->opcode = imm_opcode;
6808 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6811 sp [1]->opcode = OP_NOP;
6815 MONO_ADD_INS ((cfg)->cbb, (ins));
6818 mono_decompose_opcode (cfg, ins);
6835 MONO_INST_NEW (cfg, ins, (*ip));
6837 ins->sreg1 = sp [0]->dreg;
6838 ins->sreg2 = sp [1]->dreg;
6839 type_from_op (ins, sp [0], sp [1]);
6841 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6842 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6844 /* FIXME: Pass opcode to is_inst_imm */
6846 /* Use the immediate opcodes if possible */
6847 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6850 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6851 if (imm_opcode != -1) {
6852 ins->opcode = imm_opcode;
6853 if (sp [1]->opcode == OP_I8CONST) {
6854 #if SIZEOF_REGISTER == 8
6855 ins->inst_imm = sp [1]->inst_l;
6857 ins->inst_ls_word = sp [1]->inst_ls_word;
6858 ins->inst_ms_word = sp [1]->inst_ms_word;
6862 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6865 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6866 if (sp [1]->next == NULL)
6867 sp [1]->opcode = OP_NOP;
6870 MONO_ADD_INS ((cfg)->cbb, (ins));
6873 mono_decompose_opcode (cfg, ins);
6886 case CEE_CONV_OVF_I8:
6887 case CEE_CONV_OVF_U8:
6891 /* Special case this earlier so we have long constants in the IR */
6892 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6893 int data = sp [-1]->inst_c0;
6894 sp [-1]->opcode = OP_I8CONST;
6895 sp [-1]->type = STACK_I8;
6896 #if SIZEOF_REGISTER == 8
6897 if ((*ip) == CEE_CONV_U8)
6898 sp [-1]->inst_c0 = (guint32)data;
6900 sp [-1]->inst_c0 = data;
6902 sp [-1]->inst_ls_word = data;
6903 if ((*ip) == CEE_CONV_U8)
6904 sp [-1]->inst_ms_word = 0;
6906 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6908 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6915 case CEE_CONV_OVF_I4:
6916 case CEE_CONV_OVF_I1:
6917 case CEE_CONV_OVF_I2:
6918 case CEE_CONV_OVF_I:
6919 case CEE_CONV_OVF_U:
6922 if (sp [-1]->type == STACK_R8) {
6923 ADD_UNOP (CEE_CONV_OVF_I8);
6930 case CEE_CONV_OVF_U1:
6931 case CEE_CONV_OVF_U2:
6932 case CEE_CONV_OVF_U4:
6935 if (sp [-1]->type == STACK_R8) {
6936 ADD_UNOP (CEE_CONV_OVF_U8);
6943 case CEE_CONV_OVF_I1_UN:
6944 case CEE_CONV_OVF_I2_UN:
6945 case CEE_CONV_OVF_I4_UN:
6946 case CEE_CONV_OVF_I8_UN:
6947 case CEE_CONV_OVF_U1_UN:
6948 case CEE_CONV_OVF_U2_UN:
6949 case CEE_CONV_OVF_U4_UN:
6950 case CEE_CONV_OVF_U8_UN:
6951 case CEE_CONV_OVF_I_UN:
6952 case CEE_CONV_OVF_U_UN:
6962 case CEE_ADD_OVF_UN:
6964 case CEE_MUL_OVF_UN:
6966 case CEE_SUB_OVF_UN:
6974 token = read32 (ip + 1);
6975 klass = mini_get_class (method, token, generic_context);
6976 CHECK_TYPELOAD (klass);
6978 if (generic_class_is_reference_type (cfg, klass)) {
6979 MonoInst *store, *load;
6980 int dreg = alloc_preg (cfg);
6982 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6983 load->flags |= ins_flag;
6984 MONO_ADD_INS (cfg->cbb, load);
6986 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6987 store->flags |= ins_flag;
6988 MONO_ADD_INS (cfg->cbb, store);
6990 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7002 token = read32 (ip + 1);
7003 klass = mini_get_class (method, token, generic_context);
7004 CHECK_TYPELOAD (klass);
7006 /* Optimize the common ldobj+stloc combination */
7016 loc_index = ip [5] - CEE_STLOC_0;
7023 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7024 CHECK_LOCAL (loc_index);
7026 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7027 ins->dreg = cfg->locals [loc_index]->dreg;
7033 /* Optimize the ldobj+stobj combination */
7034 /* The reference case ends up being a load+store anyway */
7035 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7040 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7047 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7056 CHECK_STACK_OVF (1);
7058 n = read32 (ip + 1);
7060 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7061 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7062 ins->type = STACK_OBJ;
7065 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7066 MonoInst *iargs [1];
7068 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7069 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7071 if (cfg->opt & MONO_OPT_SHARED) {
7072 MonoInst *iargs [3];
7074 if (cfg->compile_aot) {
7075 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7077 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7078 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7079 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7080 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7081 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7083 if (bblock->out_of_line) {
7084 MonoInst *iargs [2];
7086 if (image == mono_defaults.corlib) {
7088 * Avoid relocations in AOT and save some space by using a
7089 * version of helper_ldstr specialized to mscorlib.
7091 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7092 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7094 /* Avoid creating the string object */
7095 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7096 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7097 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7101 if (cfg->compile_aot) {
7102 NEW_LDSTRCONST (cfg, ins, image, n);
7104 MONO_ADD_INS (bblock, ins);
7107 NEW_PCONST (cfg, ins, NULL);
7108 ins->type = STACK_OBJ;
7109 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7111 MONO_ADD_INS (bblock, ins);
7120 MonoInst *iargs [2];
7121 MonoMethodSignature *fsig;
7124 MonoInst *vtable_arg = NULL;
7127 token = read32 (ip + 1);
7128 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7131 fsig = mono_method_get_signature (cmethod, image, token);
7133 mono_save_token_info (cfg, image, token, cmethod);
7135 if (!mono_class_init (cmethod->klass))
7138 if (cfg->generic_sharing_context)
7139 context_used = mono_method_check_context_used (cmethod);
7141 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7142 if (check_linkdemand (cfg, method, cmethod))
7144 CHECK_CFG_EXCEPTION;
7145 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7146 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7149 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7150 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7151 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7153 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7154 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7156 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7160 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7161 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7163 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7165 CHECK_TYPELOAD (cmethod->klass);
7166 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7171 n = fsig->param_count;
7175 * Generate smaller code for the common newobj <exception> instruction in
7176 * argument checking code.
7178 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7179 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7180 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7181 MonoInst *iargs [3];
7183 g_assert (!vtable_arg);
7187 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7190 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7194 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7199 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7202 g_assert_not_reached ();
7210 /* move the args to allow room for 'this' in the first position */
7216 /* check_call_signature () requires sp[0] to be set */
7217 this_ins.type = STACK_OBJ;
7219 if (check_call_signature (cfg, fsig, sp))
7224 if (mini_class_is_system_array (cmethod->klass)) {
7225 g_assert (!vtable_arg);
7228 *sp = emit_get_rgctx_method (cfg, context_used,
7229 cmethod, MONO_RGCTX_INFO_METHOD);
7231 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7234 /* Avoid varargs in the common case */
7235 if (fsig->param_count == 1)
7236 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7237 else if (fsig->param_count == 2)
7238 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7240 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7241 } else if (cmethod->string_ctor) {
7242 g_assert (!context_used);
7243 g_assert (!vtable_arg);
7244 /* we simply pass a null pointer */
7245 EMIT_NEW_PCONST (cfg, *sp, NULL);
7246 /* now call the string ctor */
7247 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7249 MonoInst* callvirt_this_arg = NULL;
7251 if (cmethod->klass->valuetype) {
7252 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7253 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7254 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7259 * The code generated by mini_emit_virtual_call () expects
7260 * iargs [0] to be a boxed instance, but luckily the vcall
7261 * will be transformed into a normal call there.
7263 } else if (context_used) {
7267 if (cfg->opt & MONO_OPT_SHARED)
7268 rgctx_info = MONO_RGCTX_INFO_KLASS;
7270 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7271 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7273 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7276 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7278 CHECK_TYPELOAD (cmethod->klass);
7281 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7282 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7283 * As a workaround, we call class cctors before allocating objects.
7285 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7286 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7287 if (cfg->verbose_level > 2)
7288 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7289 class_inits = g_slist_prepend (class_inits, vtable);
7292 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7297 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7299 /* Now call the actual ctor */
7300 /* Avoid virtual calls to ctors if possible */
7301 if (cmethod->klass->marshalbyref)
7302 callvirt_this_arg = sp [0];
7304 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7305 mono_method_check_inlining (cfg, cmethod) &&
7306 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7307 !g_list_find (dont_inline, cmethod)) {
7310 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7311 cfg->real_offset += 5;
7314 inline_costs += costs - 5;
7317 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7319 } else if (context_used &&
7320 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7321 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7322 MonoInst *cmethod_addr;
7324 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7325 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7327 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7330 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7331 callvirt_this_arg, NULL, vtable_arg);
7335 if (alloc == NULL) {
7337 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7338 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7352 token = read32 (ip + 1);
7353 klass = mini_get_class (method, token, generic_context);
7354 CHECK_TYPELOAD (klass);
7355 if (sp [0]->type != STACK_OBJ)
7358 if (cfg->generic_sharing_context)
7359 context_used = mono_class_check_context_used (klass);
7368 args [1] = emit_get_rgctx_klass (cfg, context_used,
7369 klass, MONO_RGCTX_INFO_KLASS);
7371 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7375 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7376 MonoMethod *mono_castclass;
7377 MonoInst *iargs [1];
7380 mono_castclass = mono_marshal_get_castclass (klass);
7383 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7384 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7385 g_assert (costs > 0);
7388 cfg->real_offset += 5;
7393 inline_costs += costs;
7396 ins = handle_castclass (cfg, klass, *sp);
7406 token = read32 (ip + 1);
7407 klass = mini_get_class (method, token, generic_context);
7408 CHECK_TYPELOAD (klass);
7409 if (sp [0]->type != STACK_OBJ)
7412 if (cfg->generic_sharing_context)
7413 context_used = mono_class_check_context_used (klass);
7422 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7424 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7428 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7429 MonoMethod *mono_isinst;
7430 MonoInst *iargs [1];
7433 mono_isinst = mono_marshal_get_isinst (klass);
7436 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7437 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7438 g_assert (costs > 0);
7441 cfg->real_offset += 5;
7446 inline_costs += costs;
7449 ins = handle_isinst (cfg, klass, *sp);
7456 case CEE_UNBOX_ANY: {
7460 token = read32 (ip + 1);
7461 klass = mini_get_class (method, token, generic_context);
7462 CHECK_TYPELOAD (klass);
7464 mono_save_token_info (cfg, image, token, klass);
7466 if (cfg->generic_sharing_context)
7467 context_used = mono_class_check_context_used (klass);
7469 if (generic_class_is_reference_type (cfg, klass)) {
7472 MonoInst *iargs [2];
7477 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7478 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7482 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7483 MonoMethod *mono_castclass;
7484 MonoInst *iargs [1];
7487 mono_castclass = mono_marshal_get_castclass (klass);
7490 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7491 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7493 g_assert (costs > 0);
7496 cfg->real_offset += 5;
7500 inline_costs += costs;
7502 ins = handle_castclass (cfg, klass, *sp);
7510 if (mono_class_is_nullable (klass)) {
7511 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7518 ins = handle_unbox (cfg, klass, sp, context_used);
7524 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7537 token = read32 (ip + 1);
7538 klass = mini_get_class (method, token, generic_context);
7539 CHECK_TYPELOAD (klass);
7541 mono_save_token_info (cfg, image, token, klass);
7543 if (cfg->generic_sharing_context)
7544 context_used = mono_class_check_context_used (klass);
7546 if (generic_class_is_reference_type (cfg, klass)) {
7552 if (klass == mono_defaults.void_class)
7554 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7556 /* frequent check in generic code: box (struct), brtrue */
7557 if (!mono_class_is_nullable (klass) &&
7558 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7559 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7561 MONO_INST_NEW (cfg, ins, OP_BR);
7562 if (*ip == CEE_BRTRUE_S) {
7565 target = ip + 1 + (signed char)(*ip);
7570 target = ip + 4 + (gint)(read32 (ip));
7573 GET_BBLOCK (cfg, tblock, target);
7574 link_bblock (cfg, bblock, tblock);
7575 ins->inst_target_bb = tblock;
7576 GET_BBLOCK (cfg, tblock, ip);
7578 * This leads to some inconsistency, since the two bblocks are
7579 * not really connected, but it is needed for handling stack
7580 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7581 * FIXME: This should only be needed if sp != stack_start, but that
7582 * doesn't work for some reason (test failure in mcs/tests on x86).
7584 link_bblock (cfg, bblock, tblock);
7585 if (sp != stack_start) {
7586 handle_stack_args (cfg, stack_start, sp - stack_start);
7588 CHECK_UNVERIFIABLE (cfg);
7590 MONO_ADD_INS (bblock, ins);
7591 start_new_bblock = 1;
7599 if (cfg->opt & MONO_OPT_SHARED)
7600 rgctx_info = MONO_RGCTX_INFO_KLASS;
7602 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7603 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7604 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7606 *sp++ = handle_box (cfg, val, klass);
7617 token = read32 (ip + 1);
7618 klass = mini_get_class (method, token, generic_context);
7619 CHECK_TYPELOAD (klass);
7621 mono_save_token_info (cfg, image, token, klass);
7623 if (cfg->generic_sharing_context)
7624 context_used = mono_class_check_context_used (klass);
7626 if (mono_class_is_nullable (klass)) {
7629 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7630 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7634 ins = handle_unbox (cfg, klass, sp, context_used);
7644 MonoClassField *field;
7648 if (*ip == CEE_STFLD) {
7655 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7657 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7660 token = read32 (ip + 1);
7661 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7662 field = mono_method_get_wrapper_data (method, token);
7663 klass = field->parent;
7666 field = mono_field_from_token (image, token, &klass, generic_context);
7670 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7671 FIELD_ACCESS_FAILURE;
7672 mono_class_init (klass);
7674 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7675 if (*ip == CEE_STFLD) {
7676 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7678 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7679 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7680 MonoInst *iargs [5];
7683 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7684 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7685 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7689 if (cfg->opt & MONO_OPT_INLINE) {
7690 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7691 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7692 g_assert (costs > 0);
7694 cfg->real_offset += 5;
7697 inline_costs += costs;
7699 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7704 #if HAVE_WRITE_BARRIERS
7705 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7706 /* insert call to write barrier */
7707 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7708 MonoInst *iargs [2];
7711 dreg = alloc_preg (cfg);
7712 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7714 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7718 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7720 store->flags |= ins_flag;
7727 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7728 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7729 MonoInst *iargs [4];
7732 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7733 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7734 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7735 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7736 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7737 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7739 g_assert (costs > 0);
7741 cfg->real_offset += 5;
7745 inline_costs += costs;
7747 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7751 if (sp [0]->type == STACK_VTYPE) {
7754 /* Have to compute the address of the variable */
7756 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7758 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7760 g_assert (var->klass == klass);
7762 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7766 if (*ip == CEE_LDFLDA) {
7767 dreg = alloc_preg (cfg);
7769 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7770 ins->klass = mono_class_from_mono_type (field->type);
7771 ins->type = STACK_MP;
7776 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7777 load->flags |= ins_flag;
7788 MonoClassField *field;
7789 gpointer addr = NULL;
7790 gboolean is_special_static;
7793 token = read32 (ip + 1);
7795 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7796 field = mono_method_get_wrapper_data (method, token);
7797 klass = field->parent;
7800 field = mono_field_from_token (image, token, &klass, generic_context);
7803 mono_class_init (klass);
7804 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7805 FIELD_ACCESS_FAILURE;
7808 * We can only support shared generic static
7809 * field access on architectures where the
7810 * trampoline code has been extended to handle
7811 * the generic class init.
7813 #ifndef MONO_ARCH_VTABLE_REG
7814 GENERIC_SHARING_FAILURE (*ip);
7817 if (cfg->generic_sharing_context)
7818 context_used = mono_class_check_context_used (klass);
7820 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7822 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7823 * to be called here.
7825 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7826 mono_class_vtable (cfg->domain, klass);
7827 CHECK_TYPELOAD (klass);
7829 mono_domain_lock (cfg->domain);
7830 if (cfg->domain->special_static_fields)
7831 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7832 mono_domain_unlock (cfg->domain);
7834 is_special_static = mono_class_field_is_special_static (field);
7836 /* Generate IR to compute the field address */
7838 if ((cfg->opt & MONO_OPT_SHARED) ||
7839 (cfg->compile_aot && is_special_static) ||
7840 (context_used && is_special_static)) {
7841 MonoInst *iargs [2];
7843 g_assert (field->parent);
7844 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7846 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7847 field, MONO_RGCTX_INFO_CLASS_FIELD);
7849 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7851 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7852 } else if (context_used) {
7853 MonoInst *static_data;
7856 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7857 method->klass->name_space, method->klass->name, method->name,
7858 depth, field->offset);
7861 if (mono_class_needs_cctor_run (klass, method)) {
7865 vtable = emit_get_rgctx_klass (cfg, context_used,
7866 klass, MONO_RGCTX_INFO_VTABLE);
7868 // FIXME: This doesn't work since it tries to pass the argument
7869 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7871 * The vtable pointer is always passed in a register regardless of
7872 * the calling convention, so assign it manually, and make a call
7873 * using a signature without parameters.
7875 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7876 #ifdef MONO_ARCH_VTABLE_REG
7877 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7878 cfg->uses_vtable_reg = TRUE;
7885 * The pointer we're computing here is
7887 * super_info.static_data + field->offset
7889 static_data = emit_get_rgctx_klass (cfg, context_used,
7890 klass, MONO_RGCTX_INFO_STATIC_DATA);
7892 if (field->offset == 0) {
7895 int addr_reg = mono_alloc_preg (cfg);
7896 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7898 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7899 MonoInst *iargs [2];
7901 g_assert (field->parent);
7902 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7903 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7904 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7906 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7908 CHECK_TYPELOAD (klass);
7910 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7911 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7912 if (cfg->verbose_level > 2)
7913 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7914 class_inits = g_slist_prepend (class_inits, vtable);
7916 if (cfg->run_cctors) {
7918 /* This makes so that inline cannot trigger */
7919 /* .cctors: too many apps depend on them */
7920 /* running with a specific order... */
7921 if (! vtable->initialized)
7923 ex = mono_runtime_class_init_full (vtable, FALSE);
7925 set_exception_object (cfg, ex);
7926 goto exception_exit;
7930 addr = (char*)vtable->data + field->offset;
7932 if (cfg->compile_aot)
7933 EMIT_NEW_SFLDACONST (cfg, ins, field);
7935 EMIT_NEW_PCONST (cfg, ins, addr);
7938 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7939 * This could be later optimized to do just a couple of
7940 * memory dereferences with constant offsets.
7942 MonoInst *iargs [1];
7943 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7944 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7948 /* Generate IR to do the actual load/store operation */
7950 if (*ip == CEE_LDSFLDA) {
7951 ins->klass = mono_class_from_mono_type (field->type);
7952 ins->type = STACK_PTR;
7954 } else if (*ip == CEE_STSFLD) {
7959 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7960 store->flags |= ins_flag;
7962 gboolean is_const = FALSE;
7963 MonoVTable *vtable = NULL;
7965 if (!context_used) {
7966 vtable = mono_class_vtable (cfg->domain, klass);
7967 CHECK_TYPELOAD (klass);
7969 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7970 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7971 gpointer addr = (char*)vtable->data + field->offset;
7972 int ro_type = field->type->type;
7973 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7974 ro_type = field->type->data.klass->enum_basetype->type;
7976 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
7979 case MONO_TYPE_BOOLEAN:
7981 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7985 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7988 case MONO_TYPE_CHAR:
7990 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7994 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7999 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8003 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8006 #ifndef HAVE_MOVING_COLLECTOR
8009 case MONO_TYPE_STRING:
8010 case MONO_TYPE_OBJECT:
8011 case MONO_TYPE_CLASS:
8012 case MONO_TYPE_SZARRAY:
8014 case MONO_TYPE_FNPTR:
8015 case MONO_TYPE_ARRAY:
8016 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8017 type_to_eval_stack_type ((cfg), field->type, *sp);
8023 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8028 case MONO_TYPE_VALUETYPE:
8038 CHECK_STACK_OVF (1);
8040 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8041 load->flags |= ins_flag;
8054 token = read32 (ip + 1);
8055 klass = mini_get_class (method, token, generic_context);
8056 CHECK_TYPELOAD (klass);
8057 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8069 const char *data_ptr;
8071 guint32 field_token;
8077 token = read32 (ip + 1);
8079 klass = mini_get_class (method, token, generic_context);
8080 CHECK_TYPELOAD (klass);
8082 if (cfg->generic_sharing_context)
8083 context_used = mono_class_check_context_used (klass);
8088 /* FIXME: Decompose later to help abcrem */
8091 args [0] = emit_get_rgctx_klass (cfg, context_used,
8092 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8097 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8099 if (cfg->opt & MONO_OPT_SHARED) {
8100 /* Decompose now to avoid problems with references to the domainvar */
8101 MonoInst *iargs [3];
8103 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8104 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8107 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8109 /* Decompose later since it is needed by abcrem */
8110 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8111 ins->dreg = alloc_preg (cfg);
8112 ins->sreg1 = sp [0]->dreg;
8113 ins->inst_newa_class = klass;
8114 ins->type = STACK_OBJ;
8116 MONO_ADD_INS (cfg->cbb, ins);
8117 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8118 cfg->cbb->has_array_access = TRUE;
8120 /* Needed so mono_emit_load_get_addr () gets called */
8121 mono_get_got_var (cfg);
8131 * we inline/optimize the initialization sequence if possible.
8132 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8133 * for small sizes open code the memcpy
8134 * ensure the rva field is big enough
8136 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8137 MonoMethod *memcpy_method = get_memcpy_method ();
8138 MonoInst *iargs [3];
8139 int add_reg = alloc_preg (cfg);
8141 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8142 if (cfg->compile_aot) {
8143 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8145 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8147 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8148 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8157 if (sp [0]->type != STACK_OBJ)
8160 dreg = alloc_preg (cfg);
8161 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8162 ins->dreg = alloc_preg (cfg);
8163 ins->sreg1 = sp [0]->dreg;
8164 ins->type = STACK_I4;
8165 MONO_ADD_INS (cfg->cbb, ins);
8166 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8167 cfg->cbb->has_array_access = TRUE;
8175 if (sp [0]->type != STACK_OBJ)
8178 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8180 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8181 CHECK_TYPELOAD (klass);
8182 /* we need to make sure that this array is exactly the type it needs
8183 * to be for correctness. the wrappers are lax with their usage
8184 * so we need to ignore them here
8186 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8187 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8190 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8194 case CEE_LDELEM_ANY:
8205 case CEE_LDELEM_REF: {
8211 if (*ip == CEE_LDELEM_ANY) {
8213 token = read32 (ip + 1);
8214 klass = mini_get_class (method, token, generic_context);
8215 CHECK_TYPELOAD (klass);
8216 mono_class_init (klass);
8219 klass = array_access_to_klass (*ip);
8221 if (sp [0]->type != STACK_OBJ)
8224 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8226 if (sp [1]->opcode == OP_ICONST) {
8227 int array_reg = sp [0]->dreg;
8228 int index_reg = sp [1]->dreg;
8229 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8231 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8232 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8234 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8235 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8238 if (*ip == CEE_LDELEM_ANY)
8251 case CEE_STELEM_REF:
8252 case CEE_STELEM_ANY: {
8258 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8260 if (*ip == CEE_STELEM_ANY) {
8262 token = read32 (ip + 1);
8263 klass = mini_get_class (method, token, generic_context);
8264 CHECK_TYPELOAD (klass);
8265 mono_class_init (klass);
8268 klass = array_access_to_klass (*ip);
8270 if (sp [0]->type != STACK_OBJ)
8273 /* storing a NULL doesn't need any of the complex checks in stelemref */
8274 if (generic_class_is_reference_type (cfg, klass) &&
8275 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8276 MonoMethod* helper = mono_marshal_get_stelemref ();
8277 MonoInst *iargs [3];
8279 if (sp [0]->type != STACK_OBJ)
8281 if (sp [2]->type != STACK_OBJ)
8288 mono_emit_method_call (cfg, helper, iargs, NULL);
8290 if (sp [1]->opcode == OP_ICONST) {
8291 int array_reg = sp [0]->dreg;
8292 int index_reg = sp [1]->dreg;
8293 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8295 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8296 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8298 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8299 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8303 if (*ip == CEE_STELEM_ANY)
8310 case CEE_CKFINITE: {
8314 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8315 ins->sreg1 = sp [0]->dreg;
8316 ins->dreg = alloc_freg (cfg);
8317 ins->type = STACK_R8;
8318 MONO_ADD_INS (bblock, ins);
8321 mono_decompose_opcode (cfg, ins);
8326 case CEE_REFANYVAL: {
8327 MonoInst *src_var, *src;
8329 int klass_reg = alloc_preg (cfg);
8330 int dreg = alloc_preg (cfg);
8333 MONO_INST_NEW (cfg, ins, *ip);
8336 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8337 CHECK_TYPELOAD (klass);
8338 mono_class_init (klass);
8340 if (cfg->generic_sharing_context)
8341 context_used = mono_class_check_context_used (klass);
8344 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8346 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8347 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8351 MonoInst *klass_ins;
8353 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8354 klass, MONO_RGCTX_INFO_KLASS);
8357 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8358 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8360 mini_emit_class_check (cfg, klass_reg, klass);
8362 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8363 ins->type = STACK_MP;
8368 case CEE_MKREFANY: {
8369 MonoInst *loc, *addr;
8372 MONO_INST_NEW (cfg, ins, *ip);
8375 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8376 CHECK_TYPELOAD (klass);
8377 mono_class_init (klass);
8379 if (cfg->generic_sharing_context)
8380 context_used = mono_class_check_context_used (klass);
8382 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8383 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8386 MonoInst *const_ins;
8387 int type_reg = alloc_preg (cfg);
8389 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8390 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8392 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8393 } else if (cfg->compile_aot) {
8394 int const_reg = alloc_preg (cfg);
8395 int type_reg = alloc_preg (cfg);
8397 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8400 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8402 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8403 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8405 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8407 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8408 ins->type = STACK_VTYPE;
8409 ins->klass = mono_defaults.typed_reference_class;
8416 MonoClass *handle_class;
8418 CHECK_STACK_OVF (1);
8421 n = read32 (ip + 1);
8423 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8424 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8425 handle = mono_method_get_wrapper_data (method, n);
8426 handle_class = mono_method_get_wrapper_data (method, n + 1);
8427 if (handle_class == mono_defaults.typehandle_class)
8428 handle = &((MonoClass*)handle)->byval_arg;
8431 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8435 mono_class_init (handle_class);
8436 if (cfg->generic_sharing_context) {
8437 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8438 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8439 /* This case handles ldtoken
8440 of an open type, like for
8443 } else if (handle_class == mono_defaults.typehandle_class) {
8444 /* If we get a MONO_TYPE_CLASS
8445 then we need to provide the
8447 instantiation of it. */
8448 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8451 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8452 } else if (handle_class == mono_defaults.fieldhandle_class)
8453 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8454 else if (handle_class == mono_defaults.methodhandle_class)
8455 context_used = mono_method_check_context_used (handle);
8457 g_assert_not_reached ();
8460 if ((cfg->opt & MONO_OPT_SHARED) &&
8461 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8462 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8463 MonoInst *addr, *vtvar, *iargs [3];
8464 int method_context_used;
8466 if (cfg->generic_sharing_context)
8467 method_context_used = mono_method_check_context_used (method);
8469 method_context_used = 0;
8471 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8473 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8474 EMIT_NEW_ICONST (cfg, iargs [1], n);
8475 if (method_context_used) {
8476 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8477 method, MONO_RGCTX_INFO_METHOD);
8478 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8480 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8481 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8483 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8485 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8487 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8489 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8490 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8491 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8492 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8493 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8494 MonoClass *tclass = mono_class_from_mono_type (handle);
8496 mono_class_init (tclass);
8498 ins = emit_get_rgctx_klass (cfg, context_used,
8499 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8500 } else if (cfg->compile_aot) {
8501 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8503 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8505 ins->type = STACK_OBJ;
8506 ins->klass = cmethod->klass;
8509 MonoInst *addr, *vtvar;
8511 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8514 if (handle_class == mono_defaults.typehandle_class) {
8515 ins = emit_get_rgctx_klass (cfg, context_used,
8516 mono_class_from_mono_type (handle),
8517 MONO_RGCTX_INFO_TYPE);
8518 } else if (handle_class == mono_defaults.methodhandle_class) {
8519 ins = emit_get_rgctx_method (cfg, context_used,
8520 handle, MONO_RGCTX_INFO_METHOD);
8521 } else if (handle_class == mono_defaults.fieldhandle_class) {
8522 ins = emit_get_rgctx_field (cfg, context_used,
8523 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8525 g_assert_not_reached ();
8527 } else if (cfg->compile_aot) {
8528 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8530 EMIT_NEW_PCONST (cfg, ins, handle);
8532 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8534 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8544 MONO_INST_NEW (cfg, ins, OP_THROW);
8546 ins->sreg1 = sp [0]->dreg;
8548 bblock->out_of_line = TRUE;
8549 MONO_ADD_INS (bblock, ins);
8550 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8551 MONO_ADD_INS (bblock, ins);
8554 link_bblock (cfg, bblock, end_bblock);
8555 start_new_bblock = 1;
8557 case CEE_ENDFINALLY:
8558 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8559 MONO_ADD_INS (bblock, ins);
8561 start_new_bblock = 1;
8564 * Control will leave the method so empty the stack, otherwise
8565 * the next basic block will start with a nonempty stack.
8567 while (sp != stack_start) {
8575 if (*ip == CEE_LEAVE) {
8577 target = ip + 5 + (gint32)read32(ip + 1);
8580 target = ip + 2 + (signed char)(ip [1]);
8583 /* empty the stack */
8584 while (sp != stack_start) {
8589 * If this leave statement is in a catch block, check for a
8590 * pending exception, and rethrow it if necessary.
8592 for (i = 0; i < header->num_clauses; ++i) {
8593 MonoExceptionClause *clause = &header->clauses [i];
8596 * Use <= in the final comparison to handle clauses with multiple
8597 * leave statements, like in bug #78024.
8598 * The ordering of the exception clauses guarantees that we find the
8601 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8603 MonoBasicBlock *dont_throw;
8608 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8611 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8613 NEW_BBLOCK (cfg, dont_throw);
8616 * Currently, we allways rethrow the abort exception, despite the
8617 * fact that this is not correct. See thread6.cs for an example.
8618 * But propagating the abort exception is more important than
8619 * getting the sematics right.
8621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8623 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8625 MONO_START_BB (cfg, dont_throw);
8630 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8632 for (tmp = handlers; tmp; tmp = tmp->next) {
8634 link_bblock (cfg, bblock, tblock);
8635 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8636 ins->inst_target_bb = tblock;
8637 MONO_ADD_INS (bblock, ins);
8639 g_list_free (handlers);
8642 MONO_INST_NEW (cfg, ins, OP_BR);
8643 MONO_ADD_INS (bblock, ins);
8644 GET_BBLOCK (cfg, tblock, target);
8645 link_bblock (cfg, bblock, tblock);
8646 ins->inst_target_bb = tblock;
8647 start_new_bblock = 1;
8649 if (*ip == CEE_LEAVE)
8658 * Mono specific opcodes
8660 case MONO_CUSTOM_PREFIX: {
8662 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8666 case CEE_MONO_ICALL: {
8668 MonoJitICallInfo *info;
8670 token = read32 (ip + 2);
8671 func = mono_method_get_wrapper_data (method, token);
8672 info = mono_find_jit_icall_by_addr (func);
8675 CHECK_STACK (info->sig->param_count);
8676 sp -= info->sig->param_count;
8678 ins = mono_emit_jit_icall (cfg, info->func, sp);
8679 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8683 inline_costs += 10 * num_calls++;
8687 case CEE_MONO_LDPTR: {
8690 CHECK_STACK_OVF (1);
8692 token = read32 (ip + 2);
8694 ptr = mono_method_get_wrapper_data (method, token);
8695 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8696 MonoJitICallInfo *callinfo;
8697 const char *icall_name;
8699 icall_name = method->name + strlen ("__icall_wrapper_");
8700 g_assert (icall_name);
8701 callinfo = mono_find_jit_icall_by_name (icall_name);
8702 g_assert (callinfo);
8704 if (ptr == callinfo->func) {
8705 /* Will be transformed into an AOTCONST later */
8706 EMIT_NEW_PCONST (cfg, ins, ptr);
8712 /* FIXME: Generalize this */
8713 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8714 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8719 EMIT_NEW_PCONST (cfg, ins, ptr);
8722 inline_costs += 10 * num_calls++;
8723 /* Can't embed random pointers into AOT code */
8724 cfg->disable_aot = 1;
8727 case CEE_MONO_ICALL_ADDR: {
8728 MonoMethod *cmethod;
8731 CHECK_STACK_OVF (1);
8733 token = read32 (ip + 2);
8735 cmethod = mono_method_get_wrapper_data (method, token);
8737 if (cfg->compile_aot) {
8738 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8740 ptr = mono_lookup_internal_call (cmethod);
8742 EMIT_NEW_PCONST (cfg, ins, ptr);
8748 case CEE_MONO_VTADDR: {
8749 MonoInst *src_var, *src;
8755 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8756 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8761 case CEE_MONO_NEWOBJ: {
8762 MonoInst *iargs [2];
8764 CHECK_STACK_OVF (1);
8766 token = read32 (ip + 2);
8767 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8768 mono_class_init (klass);
8769 NEW_DOMAINCONST (cfg, iargs [0]);
8770 MONO_ADD_INS (cfg->cbb, iargs [0]);
8771 NEW_CLASSCONST (cfg, iargs [1], klass);
8772 MONO_ADD_INS (cfg->cbb, iargs [1]);
8773 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8775 inline_costs += 10 * num_calls++;
8778 case CEE_MONO_OBJADDR:
8781 MONO_INST_NEW (cfg, ins, OP_MOVE);
8782 ins->dreg = alloc_preg (cfg);
8783 ins->sreg1 = sp [0]->dreg;
8784 ins->type = STACK_MP;
8785 MONO_ADD_INS (cfg->cbb, ins);
8789 case CEE_MONO_LDNATIVEOBJ:
8791 * Similar to LDOBJ, but instead load the unmanaged
8792 * representation of the vtype to the stack.
8797 token = read32 (ip + 2);
8798 klass = mono_method_get_wrapper_data (method, token);
8799 g_assert (klass->valuetype);
8800 mono_class_init (klass);
8803 MonoInst *src, *dest, *temp;
8806 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8807 temp->backend.is_pinvoke = 1;
8808 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8809 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8811 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8812 dest->type = STACK_VTYPE;
8813 dest->klass = klass;
8819 case CEE_MONO_RETOBJ: {
8821 * Same as RET, but return the native representation of a vtype
8824 g_assert (cfg->ret);
8825 g_assert (mono_method_signature (method)->pinvoke);
8830 token = read32 (ip + 2);
8831 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8833 if (!cfg->vret_addr) {
8834 g_assert (cfg->ret_var_is_local);
8836 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8838 EMIT_NEW_RETLOADA (cfg, ins);
8840 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8842 if (sp != stack_start)
8845 MONO_INST_NEW (cfg, ins, OP_BR);
8846 ins->inst_target_bb = end_bblock;
8847 MONO_ADD_INS (bblock, ins);
8848 link_bblock (cfg, bblock, end_bblock);
8849 start_new_bblock = 1;
8853 case CEE_MONO_CISINST:
8854 case CEE_MONO_CCASTCLASS: {
8859 token = read32 (ip + 2);
8860 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8861 if (ip [1] == CEE_MONO_CISINST)
8862 ins = handle_cisinst (cfg, klass, sp [0]);
8864 ins = handle_ccastclass (cfg, klass, sp [0]);
8870 case CEE_MONO_SAVE_LMF:
8871 case CEE_MONO_RESTORE_LMF:
8872 #ifdef MONO_ARCH_HAVE_LMF_OPS
8873 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8874 MONO_ADD_INS (bblock, ins);
8875 cfg->need_lmf_area = TRUE;
8879 case CEE_MONO_CLASSCONST:
8880 CHECK_STACK_OVF (1);
8882 token = read32 (ip + 2);
8883 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8886 inline_costs += 10 * num_calls++;
8888 case CEE_MONO_NOT_TAKEN:
8889 bblock->out_of_line = TRUE;
8893 CHECK_STACK_OVF (1);
8895 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8896 ins->dreg = alloc_preg (cfg);
8897 ins->inst_offset = (gint32)read32 (ip + 2);
8898 ins->type = STACK_PTR;
8899 MONO_ADD_INS (bblock, ins);
8904 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8914 /* somewhat similar to LDTOKEN */
8915 MonoInst *addr, *vtvar;
8916 CHECK_STACK_OVF (1);
8917 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8919 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8920 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8922 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8923 ins->type = STACK_VTYPE;
8924 ins->klass = mono_defaults.argumenthandle_class;
8937 * The following transforms:
8938 * CEE_CEQ into OP_CEQ
8939 * CEE_CGT into OP_CGT
8940 * CEE_CGT_UN into OP_CGT_UN
8941 * CEE_CLT into OP_CLT
8942 * CEE_CLT_UN into OP_CLT_UN
8944 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8946 MONO_INST_NEW (cfg, ins, cmp->opcode);
8948 cmp->sreg1 = sp [0]->dreg;
8949 cmp->sreg2 = sp [1]->dreg;
8950 type_from_op (cmp, sp [0], sp [1]);
8952 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8953 cmp->opcode = OP_LCOMPARE;
8954 else if (sp [0]->type == STACK_R8)
8955 cmp->opcode = OP_FCOMPARE;
8957 cmp->opcode = OP_ICOMPARE;
8958 MONO_ADD_INS (bblock, cmp);
8959 ins->type = STACK_I4;
8960 ins->dreg = alloc_dreg (cfg, ins->type);
8961 type_from_op (ins, sp [0], sp [1]);
8963 if (cmp->opcode == OP_FCOMPARE) {
8965 * The backends expect the fceq opcodes to do the
8968 cmp->opcode = OP_NOP;
8969 ins->sreg1 = cmp->sreg1;
8970 ins->sreg2 = cmp->sreg2;
8972 MONO_ADD_INS (bblock, ins);
8979 MonoMethod *cil_method;
8980 gboolean needs_static_rgctx_invoke;
8982 CHECK_STACK_OVF (1);
8984 n = read32 (ip + 2);
8985 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8988 mono_class_init (cmethod->klass);
8990 mono_save_token_info (cfg, image, n, cmethod);
8992 if (cfg->generic_sharing_context)
8993 context_used = mono_method_check_context_used (cmethod);
8995 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8997 cil_method = cmethod;
8998 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8999 METHOD_ACCESS_FAILURE;
9001 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9002 if (check_linkdemand (cfg, method, cmethod))
9004 CHECK_CFG_EXCEPTION;
9005 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9006 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9010 * Optimize the common case of ldftn+delegate creation
9012 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9013 /* FIXME: SGEN support */
9014 /* FIXME: handle shared static generic methods */
9015 /* FIXME: handle this in shared code */
9016 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9017 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9018 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9019 MonoInst *target_ins;
9022 if (cfg->verbose_level > 3)
9023 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9024 target_ins = sp [-1];
9026 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9035 if (needs_static_rgctx_invoke)
9036 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9038 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9039 } else if (needs_static_rgctx_invoke) {
9040 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9042 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9044 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9048 inline_costs += 10 * num_calls++;
9051 case CEE_LDVIRTFTN: {
9056 n = read32 (ip + 2);
9057 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9060 mono_class_init (cmethod->klass);
9062 if (cfg->generic_sharing_context)
9063 context_used = mono_method_check_context_used (cmethod);
9065 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9066 if (check_linkdemand (cfg, method, cmethod))
9068 CHECK_CFG_EXCEPTION;
9069 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9070 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9077 args [1] = emit_get_rgctx_method (cfg, context_used,
9078 cmethod, MONO_RGCTX_INFO_METHOD);
9079 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9081 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9082 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9086 inline_costs += 10 * num_calls++;
9090 CHECK_STACK_OVF (1);
9092 n = read16 (ip + 2);
9094 EMIT_NEW_ARGLOAD (cfg, ins, n);
9099 CHECK_STACK_OVF (1);
9101 n = read16 (ip + 2);
9103 NEW_ARGLOADA (cfg, ins, n);
9104 MONO_ADD_INS (cfg->cbb, ins);
9112 n = read16 (ip + 2);
9114 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9116 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9120 CHECK_STACK_OVF (1);
9122 n = read16 (ip + 2);
9124 EMIT_NEW_LOCLOAD (cfg, ins, n);
9129 unsigned char *tmp_ip;
9130 CHECK_STACK_OVF (1);
9132 n = read16 (ip + 2);
9135 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9141 EMIT_NEW_LOCLOADA (cfg, ins, n);
9150 n = read16 (ip + 2);
9152 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9154 emit_stloc_ir (cfg, sp, header, n);
9161 if (sp != stack_start)
9163 if (cfg->method != method)
9165 * Inlining this into a loop in a parent could lead to
9166 * stack overflows which is different behavior than the
9167 * non-inlined case, thus disable inlining in this case.
9169 goto inline_failure;
9171 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9172 ins->dreg = alloc_preg (cfg);
9173 ins->sreg1 = sp [0]->dreg;
9174 ins->type = STACK_PTR;
9175 MONO_ADD_INS (cfg->cbb, ins);
9177 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9178 if (header->init_locals)
9179 ins->flags |= MONO_INST_INIT;
9184 case CEE_ENDFILTER: {
9185 MonoExceptionClause *clause, *nearest;
9186 int cc, nearest_num;
9190 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9192 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9193 ins->sreg1 = (*sp)->dreg;
9194 MONO_ADD_INS (bblock, ins);
9195 start_new_bblock = 1;
9200 for (cc = 0; cc < header->num_clauses; ++cc) {
9201 clause = &header->clauses [cc];
9202 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9203 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9204 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9210 if ((ip - header->code) != nearest->handler_offset)
9215 case CEE_UNALIGNED_:
9216 ins_flag |= MONO_INST_UNALIGNED;
9217 /* FIXME: record alignment? we can assume 1 for now */
9222 ins_flag |= MONO_INST_VOLATILE;
9226 ins_flag |= MONO_INST_TAILCALL;
9227 cfg->flags |= MONO_CFG_HAS_TAIL;
9228 /* Can't inline tail calls at this time */
9229 inline_costs += 100000;
9236 token = read32 (ip + 2);
9237 klass = mini_get_class (method, token, generic_context);
9238 CHECK_TYPELOAD (klass);
9239 if (generic_class_is_reference_type (cfg, klass))
9240 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9242 mini_emit_initobj (cfg, *sp, NULL, klass);
9246 case CEE_CONSTRAINED_:
9248 token = read32 (ip + 2);
9249 constrained_call = mono_class_get_full (image, token, generic_context);
9250 CHECK_TYPELOAD (constrained_call);
9255 MonoInst *iargs [3];
9259 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9260 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9261 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9262 /* emit_memset only works when val == 0 */
9263 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9268 if (ip [1] == CEE_CPBLK) {
9269 MonoMethod *memcpy_method = get_memcpy_method ();
9270 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9272 MonoMethod *memset_method = get_memset_method ();
9273 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9283 ins_flag |= MONO_INST_NOTYPECHECK;
9285 ins_flag |= MONO_INST_NORANGECHECK;
9286 /* we ignore the no-nullcheck for now since we
9287 * really do it explicitly only when doing callvirt->call
9293 int handler_offset = -1;
9295 for (i = 0; i < header->num_clauses; ++i) {
9296 MonoExceptionClause *clause = &header->clauses [i];
9297 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9298 handler_offset = clause->handler_offset;
9303 bblock->flags |= BB_EXCEPTION_UNSAFE;
9305 g_assert (handler_offset != -1);
9307 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9308 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9309 ins->sreg1 = load->dreg;
9310 MONO_ADD_INS (bblock, ins);
9312 link_bblock (cfg, bblock, end_bblock);
9313 start_new_bblock = 1;
9321 CHECK_STACK_OVF (1);
9323 token = read32 (ip + 2);
9324 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9325 MonoType *type = mono_type_create_from_typespec (image, token);
9326 token = mono_type_size (type, &ialign);
9328 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9329 CHECK_TYPELOAD (klass);
9330 mono_class_init (klass);
9331 token = mono_class_value_size (klass, &align);
9333 EMIT_NEW_ICONST (cfg, ins, token);
9338 case CEE_REFANYTYPE: {
9339 MonoInst *src_var, *src;
9345 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9347 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9348 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9349 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9359 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9364 g_error ("opcode 0x%02x not handled", *ip);
9367 if (start_new_bblock != 1)
9370 bblock->cil_length = ip - bblock->cil_code;
9371 bblock->next_bb = end_bblock;
9373 if (cfg->method == method && cfg->domainvar) {
9375 MonoInst *get_domain;
9377 cfg->cbb = init_localsbb;
9379 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9380 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9383 get_domain->dreg = alloc_preg (cfg);
9384 MONO_ADD_INS (cfg->cbb, get_domain);
9386 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9387 MONO_ADD_INS (cfg->cbb, store);
9390 if (cfg->method == method && cfg->got_var)
9391 mono_emit_load_got_addr (cfg);
9393 if (header->init_locals) {
9396 cfg->cbb = init_localsbb;
9398 for (i = 0; i < header->num_locals; ++i) {
9399 MonoType *ptype = header->locals [i];
9400 int t = ptype->type;
9401 dreg = cfg->locals [i]->dreg;
9403 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9404 t = ptype->data.klass->enum_basetype->type;
9406 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9407 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9408 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9409 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9410 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9411 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9412 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9413 ins->type = STACK_R8;
9414 ins->inst_p0 = (void*)&r8_0;
9415 ins->dreg = alloc_dreg (cfg, STACK_R8);
9416 MONO_ADD_INS (init_localsbb, ins);
9417 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9418 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9419 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9420 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9422 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9429 if (cfg->method == method) {
9431 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9432 bb->region = mono_find_block_region (cfg, bb->real_offset);
9434 mono_create_spvar_for_region (cfg, bb->region);
9435 if (cfg->verbose_level > 2)
9436 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9440 g_slist_free (class_inits);
9441 dont_inline = g_list_remove (dont_inline, method);
9443 if (inline_costs < 0) {
9446 /* Method is too large */
9447 mname = mono_method_full_name (method, TRUE);
9448 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9449 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9454 if ((cfg->verbose_level > 2) && (cfg->method == method))
9455 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9457 return inline_costs;
9460 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9461 g_slist_free (class_inits);
9462 dont_inline = g_list_remove (dont_inline, method);
9466 g_slist_free (class_inits);
9467 dont_inline = g_list_remove (dont_inline, method);
9471 g_slist_free (class_inits);
9472 dont_inline = g_list_remove (dont_inline, method);
9473 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9477 g_slist_free (class_inits);
9478 dont_inline = g_list_remove (dont_inline, method);
9479 set_exception_type_from_invalid_il (cfg, method, ip);
9484 store_membase_reg_to_store_membase_imm (int opcode)
9487 case OP_STORE_MEMBASE_REG:
9488 return OP_STORE_MEMBASE_IMM;
9489 case OP_STOREI1_MEMBASE_REG:
9490 return OP_STOREI1_MEMBASE_IMM;
9491 case OP_STOREI2_MEMBASE_REG:
9492 return OP_STOREI2_MEMBASE_IMM;
9493 case OP_STOREI4_MEMBASE_REG:
9494 return OP_STOREI4_MEMBASE_IMM;
9495 case OP_STOREI8_MEMBASE_REG:
9496 return OP_STOREI8_MEMBASE_IMM;
9498 g_assert_not_reached ();
9504 #endif /* DISABLE_JIT */
9507 mono_op_to_op_imm (int opcode)
9517 return OP_IDIV_UN_IMM;
9521 return OP_IREM_UN_IMM;
9535 return OP_ISHR_UN_IMM;
9552 return OP_LSHR_UN_IMM;
9555 return OP_COMPARE_IMM;
9557 return OP_ICOMPARE_IMM;
9559 return OP_LCOMPARE_IMM;
9561 case OP_STORE_MEMBASE_REG:
9562 return OP_STORE_MEMBASE_IMM;
9563 case OP_STOREI1_MEMBASE_REG:
9564 return OP_STOREI1_MEMBASE_IMM;
9565 case OP_STOREI2_MEMBASE_REG:
9566 return OP_STOREI2_MEMBASE_IMM;
9567 case OP_STOREI4_MEMBASE_REG:
9568 return OP_STOREI4_MEMBASE_IMM;
9570 #if defined(__i386__) || defined (__x86_64__)
9572 return OP_X86_PUSH_IMM;
9573 case OP_X86_COMPARE_MEMBASE_REG:
9574 return OP_X86_COMPARE_MEMBASE_IMM;
9576 #if defined(__x86_64__)
9577 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9578 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9580 case OP_VOIDCALL_REG:
9589 return OP_LOCALLOC_IMM;
9596 ldind_to_load_membase (int opcode)
9600 return OP_LOADI1_MEMBASE;
9602 return OP_LOADU1_MEMBASE;
9604 return OP_LOADI2_MEMBASE;
9606 return OP_LOADU2_MEMBASE;
9608 return OP_LOADI4_MEMBASE;
9610 return OP_LOADU4_MEMBASE;
9612 return OP_LOAD_MEMBASE;
9614 return OP_LOAD_MEMBASE;
9616 return OP_LOADI8_MEMBASE;
9618 return OP_LOADR4_MEMBASE;
9620 return OP_LOADR8_MEMBASE;
9622 g_assert_not_reached ();
9629 stind_to_store_membase (int opcode)
9633 return OP_STOREI1_MEMBASE_REG;
9635 return OP_STOREI2_MEMBASE_REG;
9637 return OP_STOREI4_MEMBASE_REG;
9640 return OP_STORE_MEMBASE_REG;
9642 return OP_STOREI8_MEMBASE_REG;
9644 return OP_STORER4_MEMBASE_REG;
9646 return OP_STORER8_MEMBASE_REG;
9648 g_assert_not_reached ();
9655 mono_load_membase_to_load_mem (int opcode)
9657 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9658 #if defined(__i386__) || defined(__x86_64__)
9660 case OP_LOAD_MEMBASE:
9662 case OP_LOADU1_MEMBASE:
9663 return OP_LOADU1_MEM;
9664 case OP_LOADU2_MEMBASE:
9665 return OP_LOADU2_MEM;
9666 case OP_LOADI4_MEMBASE:
9667 return OP_LOADI4_MEM;
9668 case OP_LOADU4_MEMBASE:
9669 return OP_LOADU4_MEM;
9670 #if SIZEOF_REGISTER == 8
9671 case OP_LOADI8_MEMBASE:
9672 return OP_LOADI8_MEM;
9681 op_to_op_dest_membase (int store_opcode, int opcode)
9683 #if defined(__i386__)
9684 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9689 return OP_X86_ADD_MEMBASE_REG;
9691 return OP_X86_SUB_MEMBASE_REG;
9693 return OP_X86_AND_MEMBASE_REG;
9695 return OP_X86_OR_MEMBASE_REG;
9697 return OP_X86_XOR_MEMBASE_REG;
9700 return OP_X86_ADD_MEMBASE_IMM;
9703 return OP_X86_SUB_MEMBASE_IMM;
9706 return OP_X86_AND_MEMBASE_IMM;
9709 return OP_X86_OR_MEMBASE_IMM;
9712 return OP_X86_XOR_MEMBASE_IMM;
9718 #if defined(__x86_64__)
9719 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9724 return OP_X86_ADD_MEMBASE_REG;
9726 return OP_X86_SUB_MEMBASE_REG;
9728 return OP_X86_AND_MEMBASE_REG;
9730 return OP_X86_OR_MEMBASE_REG;
9732 return OP_X86_XOR_MEMBASE_REG;
9734 return OP_X86_ADD_MEMBASE_IMM;
9736 return OP_X86_SUB_MEMBASE_IMM;
9738 return OP_X86_AND_MEMBASE_IMM;
9740 return OP_X86_OR_MEMBASE_IMM;
9742 return OP_X86_XOR_MEMBASE_IMM;
9744 return OP_AMD64_ADD_MEMBASE_REG;
9746 return OP_AMD64_SUB_MEMBASE_REG;
9748 return OP_AMD64_AND_MEMBASE_REG;
9750 return OP_AMD64_OR_MEMBASE_REG;
9752 return OP_AMD64_XOR_MEMBASE_REG;
9755 return OP_AMD64_ADD_MEMBASE_IMM;
9758 return OP_AMD64_SUB_MEMBASE_IMM;
9761 return OP_AMD64_AND_MEMBASE_IMM;
9764 return OP_AMD64_OR_MEMBASE_IMM;
9767 return OP_AMD64_XOR_MEMBASE_IMM;
9777 op_to_op_store_membase (int store_opcode, int opcode)
9779 #if defined(__i386__) || defined(__x86_64__)
9782 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9783 return OP_X86_SETEQ_MEMBASE;
9785 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9786 return OP_X86_SETNE_MEMBASE;
9794 op_to_op_src1_membase (int load_opcode, int opcode)
9797 /* FIXME: This has sign extension issues */
9799 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9800 return OP_X86_COMPARE_MEMBASE8_IMM;
9803 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9808 return OP_X86_PUSH_MEMBASE;
9809 case OP_COMPARE_IMM:
9810 case OP_ICOMPARE_IMM:
9811 return OP_X86_COMPARE_MEMBASE_IMM;
9814 return OP_X86_COMPARE_MEMBASE_REG;
9819 /* FIXME: This has sign extension issues */
9821 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9822 return OP_X86_COMPARE_MEMBASE8_IMM;
9827 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9828 return OP_X86_PUSH_MEMBASE;
9830 /* FIXME: This only works for 32 bit immediates
9831 case OP_COMPARE_IMM:
9832 case OP_LCOMPARE_IMM:
9833 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9834 return OP_AMD64_COMPARE_MEMBASE_IMM;
9836 case OP_ICOMPARE_IMM:
9837 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9838 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9842 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9843 return OP_AMD64_COMPARE_MEMBASE_REG;
9846 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9847 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9856 op_to_op_src2_membase (int load_opcode, int opcode)
9859 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9865 return OP_X86_COMPARE_REG_MEMBASE;
9867 return OP_X86_ADD_REG_MEMBASE;
9869 return OP_X86_SUB_REG_MEMBASE;
9871 return OP_X86_AND_REG_MEMBASE;
9873 return OP_X86_OR_REG_MEMBASE;
9875 return OP_X86_XOR_REG_MEMBASE;
9882 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9883 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9887 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9888 return OP_AMD64_COMPARE_REG_MEMBASE;
9891 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9892 return OP_X86_ADD_REG_MEMBASE;
9894 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9895 return OP_X86_SUB_REG_MEMBASE;
9897 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9898 return OP_X86_AND_REG_MEMBASE;
9900 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9901 return OP_X86_OR_REG_MEMBASE;
9903 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9904 return OP_X86_XOR_REG_MEMBASE;
9906 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9907 return OP_AMD64_ADD_REG_MEMBASE;
9909 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9910 return OP_AMD64_SUB_REG_MEMBASE;
9912 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9913 return OP_AMD64_AND_REG_MEMBASE;
9915 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9916 return OP_AMD64_OR_REG_MEMBASE;
9918 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9919 return OP_AMD64_XOR_REG_MEMBASE;
9927 mono_op_to_op_imm_noemul (int opcode)
9930 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9935 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9943 return mono_op_to_op_imm (opcode);
9950 * mono_handle_global_vregs:
9952 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9956 mono_handle_global_vregs (MonoCompile *cfg)
9962 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9964 #ifdef MONO_ARCH_SIMD_INTRINSICS
9965 if (cfg->uses_simd_intrinsics)
9966 mono_simd_simplify_indirection (cfg);
9969 /* Find local vregs used in more than one bb */
9970 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9971 MonoInst *ins = bb->code;
9972 int block_num = bb->block_num;
9974 if (cfg->verbose_level > 2)
9975 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9978 for (; ins; ins = ins->next) {
9979 const char *spec = INS_INFO (ins->opcode);
9980 int regtype, regindex;
9983 if (G_UNLIKELY (cfg->verbose_level > 2))
9984 mono_print_ins (ins);
9986 g_assert (ins->opcode >= MONO_CEE_LAST);
9988 for (regindex = 0; regindex < 3; regindex ++) {
9991 if (regindex == 0) {
9992 regtype = spec [MONO_INST_DEST];
9996 } else if (regindex == 1) {
9997 regtype = spec [MONO_INST_SRC1];
10002 regtype = spec [MONO_INST_SRC2];
10003 if (regtype == ' ')
10008 #if SIZEOF_REGISTER == 4
10009 if (regtype == 'l') {
10011 * Since some instructions reference the original long vreg,
10012 * and some reference the two component vregs, it is quite hard
10013 * to determine when it needs to be global. So be conservative.
10015 if (!get_vreg_to_inst (cfg, vreg)) {
10016 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10018 if (cfg->verbose_level > 2)
10019 printf ("LONG VREG R%d made global.\n", vreg);
10023 * Make the component vregs volatile since the optimizations can
10024 * get confused otherwise.
10026 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10027 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10031 g_assert (vreg != -1);
10033 prev_bb = vreg_to_bb [vreg];
10034 if (prev_bb == 0) {
10035 /* 0 is a valid block num */
10036 vreg_to_bb [vreg] = block_num + 1;
10037 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10038 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10041 if (!get_vreg_to_inst (cfg, vreg)) {
10042 if (G_UNLIKELY (cfg->verbose_level > 2))
10043 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10047 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10050 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10053 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10056 g_assert_not_reached ();
10060 /* Flag as having been used in more than one bb */
10061 vreg_to_bb [vreg] = -1;
10067 /* If a variable is used in only one bblock, convert it into a local vreg */
10068 for (i = 0; i < cfg->num_varinfo; i++) {
10069 MonoInst *var = cfg->varinfo [i];
10070 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10072 switch (var->type) {
10078 #if SIZEOF_REGISTER == 8
10081 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10082 /* Enabling this screws up the fp stack on x86 */
10085 /* Arguments are implicitly global */
10086 /* Putting R4 vars into registers doesn't work currently */
10087 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10089 * Make that the variable's liveness interval doesn't contain a call, since
10090 * that would cause the lvreg to be spilled, making the whole optimization
10093 /* This is too slow for JIT compilation */
10095 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10097 int def_index, call_index, ins_index;
10098 gboolean spilled = FALSE;
10103 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10104 const char *spec = INS_INFO (ins->opcode);
10106 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10107 def_index = ins_index;
10109 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10110 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10111 if (call_index > def_index) {
10117 if (MONO_IS_CALL (ins))
10118 call_index = ins_index;
10128 if (G_UNLIKELY (cfg->verbose_level > 2))
10129 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10130 var->flags |= MONO_INST_IS_DEAD;
10131 cfg->vreg_to_inst [var->dreg] = NULL;
10138 * Compress the varinfo and vars tables so the liveness computation is faster and
10139 * takes up less space.
10142 for (i = 0; i < cfg->num_varinfo; ++i) {
10143 MonoInst *var = cfg->varinfo [i];
10144 if (pos < i && cfg->locals_start == i)
10145 cfg->locals_start = pos;
10146 if (!(var->flags & MONO_INST_IS_DEAD)) {
10148 cfg->varinfo [pos] = cfg->varinfo [i];
10149 cfg->varinfo [pos]->inst_c0 = pos;
10150 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10151 cfg->vars [pos].idx = pos;
10152 #if SIZEOF_REGISTER == 4
10153 if (cfg->varinfo [pos]->type == STACK_I8) {
10154 /* Modify the two component vars too */
10157 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10158 var1->inst_c0 = pos;
10159 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10160 var1->inst_c0 = pos;
10167 cfg->num_varinfo = pos;
10168 if (cfg->locals_start > cfg->num_varinfo)
10169 cfg->locals_start = cfg->num_varinfo;
10173 * mono_spill_global_vars:
10175 * Generate spill code for variables which are not allocated to registers,
10176 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10177 * code is generated which could be optimized by the local optimization passes.
10180 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10182 MonoBasicBlock *bb;
10184 int orig_next_vreg;
10185 guint32 *vreg_to_lvreg;
10187 guint32 i, lvregs_len;
10188 gboolean dest_has_lvreg = FALSE;
10189 guint32 stacktypes [128];
10191 *need_local_opts = FALSE;
10193 memset (spec2, 0, sizeof (spec2));
10195 /* FIXME: Move this function to mini.c */
10196 stacktypes ['i'] = STACK_PTR;
10197 stacktypes ['l'] = STACK_I8;
10198 stacktypes ['f'] = STACK_R8;
10199 #ifdef MONO_ARCH_SIMD_INTRINSICS
10200 stacktypes ['x'] = STACK_VTYPE;
10203 #if SIZEOF_REGISTER == 4
10204 /* Create MonoInsts for longs */
10205 for (i = 0; i < cfg->num_varinfo; i++) {
10206 MonoInst *ins = cfg->varinfo [i];
10208 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10209 switch (ins->type) {
10210 #ifdef MONO_ARCH_SOFT_FLOAT
10216 g_assert (ins->opcode == OP_REGOFFSET);
10218 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10220 tree->opcode = OP_REGOFFSET;
10221 tree->inst_basereg = ins->inst_basereg;
10222 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10224 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10226 tree->opcode = OP_REGOFFSET;
10227 tree->inst_basereg = ins->inst_basereg;
10228 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10238 /* FIXME: widening and truncation */
10241 * As an optimization, when a variable allocated to the stack is first loaded into
10242 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10243 * the variable again.
10245 orig_next_vreg = cfg->next_vreg;
10246 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10247 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10250 /* Add spill loads/stores */
10251 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10254 if (cfg->verbose_level > 2)
10255 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10257 /* Clear vreg_to_lvreg array */
10258 for (i = 0; i < lvregs_len; i++)
10259 vreg_to_lvreg [lvregs [i]] = 0;
10263 MONO_BB_FOR_EACH_INS (bb, ins) {
10264 const char *spec = INS_INFO (ins->opcode);
10265 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10266 gboolean store, no_lvreg;
10268 if (G_UNLIKELY (cfg->verbose_level > 2))
10269 mono_print_ins (ins);
10271 if (ins->opcode == OP_NOP)
10275 * We handle LDADDR here as well, since it can only be decomposed
10276 * when variable addresses are known.
10278 if (ins->opcode == OP_LDADDR) {
10279 MonoInst *var = ins->inst_p0;
10281 if (var->opcode == OP_VTARG_ADDR) {
10282 /* Happens on SPARC/S390 where vtypes are passed by reference */
10283 MonoInst *vtaddr = var->inst_left;
10284 if (vtaddr->opcode == OP_REGVAR) {
10285 ins->opcode = OP_MOVE;
10286 ins->sreg1 = vtaddr->dreg;
10288 else if (var->inst_left->opcode == OP_REGOFFSET) {
10289 ins->opcode = OP_LOAD_MEMBASE;
10290 ins->inst_basereg = vtaddr->inst_basereg;
10291 ins->inst_offset = vtaddr->inst_offset;
10295 g_assert (var->opcode == OP_REGOFFSET);
10297 ins->opcode = OP_ADD_IMM;
10298 ins->sreg1 = var->inst_basereg;
10299 ins->inst_imm = var->inst_offset;
10302 *need_local_opts = TRUE;
10303 spec = INS_INFO (ins->opcode);
10306 if (ins->opcode < MONO_CEE_LAST) {
10307 mono_print_ins (ins);
10308 g_assert_not_reached ();
10312 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10316 if (MONO_IS_STORE_MEMBASE (ins)) {
10317 tmp_reg = ins->dreg;
10318 ins->dreg = ins->sreg2;
10319 ins->sreg2 = tmp_reg;
10322 spec2 [MONO_INST_DEST] = ' ';
10323 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10324 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10326 } else if (MONO_IS_STORE_MEMINDEX (ins))
10327 g_assert_not_reached ();
10332 if (G_UNLIKELY (cfg->verbose_level > 2))
10333 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10338 regtype = spec [MONO_INST_DEST];
10339 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10342 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10343 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10344 MonoInst *store_ins;
10347 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10349 if (var->opcode == OP_REGVAR) {
10350 ins->dreg = var->dreg;
10351 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10353 * Instead of emitting a load+store, use a _membase opcode.
10355 g_assert (var->opcode == OP_REGOFFSET);
10356 if (ins->opcode == OP_MOVE) {
10359 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10360 ins->inst_basereg = var->inst_basereg;
10361 ins->inst_offset = var->inst_offset;
10364 spec = INS_INFO (ins->opcode);
10368 g_assert (var->opcode == OP_REGOFFSET);
10370 prev_dreg = ins->dreg;
10372 /* Invalidate any previous lvreg for this vreg */
10373 vreg_to_lvreg [ins->dreg] = 0;
10377 #ifdef MONO_ARCH_SOFT_FLOAT
10378 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10380 store_opcode = OP_STOREI8_MEMBASE_REG;
10384 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10386 if (regtype == 'l') {
10387 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10388 mono_bblock_insert_after_ins (bb, ins, store_ins);
10389 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10390 mono_bblock_insert_after_ins (bb, ins, store_ins);
10393 g_assert (store_opcode != OP_STOREV_MEMBASE);
10395 /* Try to fuse the store into the instruction itself */
10396 /* FIXME: Add more instructions */
10397 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10398 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10399 ins->inst_imm = ins->inst_c0;
10400 ins->inst_destbasereg = var->inst_basereg;
10401 ins->inst_offset = var->inst_offset;
10402 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10403 ins->opcode = store_opcode;
10404 ins->inst_destbasereg = var->inst_basereg;
10405 ins->inst_offset = var->inst_offset;
10409 tmp_reg = ins->dreg;
10410 ins->dreg = ins->sreg2;
10411 ins->sreg2 = tmp_reg;
10414 spec2 [MONO_INST_DEST] = ' ';
10415 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10416 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10418 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10419 // FIXME: The backends expect the base reg to be in inst_basereg
10420 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10422 ins->inst_basereg = var->inst_basereg;
10423 ins->inst_offset = var->inst_offset;
10424 spec = INS_INFO (ins->opcode);
10426 /* printf ("INS: "); mono_print_ins (ins); */
10427 /* Create a store instruction */
10428 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10430 /* Insert it after the instruction */
10431 mono_bblock_insert_after_ins (bb, ins, store_ins);
10434 * We can't assign ins->dreg to var->dreg here, since the
10435 * sregs could use it. So set a flag, and do it after
10438 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10439 dest_has_lvreg = TRUE;
10448 for (srcindex = 0; srcindex < 2; ++srcindex) {
10449 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10450 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10452 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10453 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10454 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10455 MonoInst *load_ins;
10456 guint32 load_opcode;
10458 if (var->opcode == OP_REGVAR) {
10460 ins->sreg1 = var->dreg;
10462 ins->sreg2 = var->dreg;
10466 g_assert (var->opcode == OP_REGOFFSET);
10468 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10470 g_assert (load_opcode != OP_LOADV_MEMBASE);
10472 if (vreg_to_lvreg [sreg]) {
10473 /* The variable is already loaded to an lvreg */
10474 if (G_UNLIKELY (cfg->verbose_level > 2))
10475 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10477 ins->sreg1 = vreg_to_lvreg [sreg];
10479 ins->sreg2 = vreg_to_lvreg [sreg];
10483 /* Try to fuse the load into the instruction */
10484 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10485 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10486 ins->inst_basereg = var->inst_basereg;
10487 ins->inst_offset = var->inst_offset;
10488 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10489 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10490 ins->sreg2 = var->inst_basereg;
10491 ins->inst_offset = var->inst_offset;
10493 if (MONO_IS_REAL_MOVE (ins)) {
10494 ins->opcode = OP_NOP;
10497 //printf ("%d ", srcindex); mono_print_ins (ins);
10499 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10501 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10502 if (var->dreg == prev_dreg) {
10504 * sreg refers to the value loaded by the load
10505 * emitted below, but we need to use ins->dreg
10506 * since it refers to the store emitted earlier.
10510 vreg_to_lvreg [var->dreg] = sreg;
10511 g_assert (lvregs_len < 1024);
10512 lvregs [lvregs_len ++] = var->dreg;
10521 if (regtype == 'l') {
10522 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10523 mono_bblock_insert_before_ins (bb, ins, load_ins);
10524 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10525 mono_bblock_insert_before_ins (bb, ins, load_ins);
10528 #if SIZEOF_REGISTER == 4
10529 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10531 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10532 mono_bblock_insert_before_ins (bb, ins, load_ins);
10538 if (dest_has_lvreg) {
10539 vreg_to_lvreg [prev_dreg] = ins->dreg;
10540 g_assert (lvregs_len < 1024);
10541 lvregs [lvregs_len ++] = prev_dreg;
10542 dest_has_lvreg = FALSE;
10546 tmp_reg = ins->dreg;
10547 ins->dreg = ins->sreg2;
10548 ins->sreg2 = tmp_reg;
10551 if (MONO_IS_CALL (ins)) {
10552 /* Clear vreg_to_lvreg array */
10553 for (i = 0; i < lvregs_len; i++)
10554 vreg_to_lvreg [lvregs [i]] = 0;
10558 if (cfg->verbose_level > 2)
10559 mono_print_ins_index (1, ins);
10566 * - use 'iadd' instead of 'int_add'
10567 * - handling ovf opcodes: decompose in method_to_ir.
10568 * - unify iregs/fregs
10569 * -> partly done, the missing parts are:
10570 * - a more complete unification would involve unifying the hregs as well, so
10571 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10572 * would no longer map to the machine hregs, so the code generators would need to
10573 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10574 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10575 * fp/non-fp branches speeds it up by about 15%.
10576 * - use sext/zext opcodes instead of shifts
10578 * - get rid of TEMPLOADs if possible and use vregs instead
10579 * - clean up usage of OP_P/OP_ opcodes
10580 * - cleanup usage of DUMMY_USE
10581 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10583 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10584 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10585 * - make sure handle_stack_args () is called before the branch is emitted
10586 * - when the new IR is done, get rid of all unused stuff
10587 * - COMPARE/BEQ as separate instructions or unify them ?
10588 * - keeping them separate allows specialized compare instructions like
10589 * compare_imm, compare_membase
10590 * - most back ends unify fp compare+branch, fp compare+ceq
10591 * - integrate mono_save_args into inline_method
10592 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10593 * - handle long shift opts on 32 bit platforms somehow: they require
10594 * 3 sregs (2 for arg1 and 1 for arg2)
10595 * - make byref a 'normal' type.
10596 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10597 * variable if needed.
10598 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10599 * like inline_method.
10600 * - remove inlining restrictions
10601 * - fix LNEG and enable cfold of INEG
10602 * - generalize x86 optimizations like ldelema as a peephole optimization
10603 * - add store_mem_imm for amd64
10604 * - optimize the loading of the interruption flag in the managed->native wrappers
10605 * - avoid special handling of OP_NOP in passes
10606 * - move code inserting instructions into one function/macro.
10607 * - try a coalescing phase after liveness analysis
10608 * - add float -> vreg conversion + local optimizations on !x86
10609 * - figure out how to handle decomposed branches during optimizations, ie.
10610 * compare+branch, op_jump_table+op_br etc.
10611 * - promote RuntimeXHandles to vregs
10612 * - vtype cleanups:
10613 * - add a NEW_VARLOADA_VREG macro
10614 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10615 * accessing vtype fields.
10616 * - get rid of I8CONST on 64 bit platforms
10617 * - dealing with the increase in code size due to branches created during opcode
10619 * - use extended basic blocks
10620 * - all parts of the JIT
10621 * - handle_global_vregs () && local regalloc
10622 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10623 * - sources of increase in code size:
10626 * - isinst and castclass
10627 * - lvregs not allocated to global registers even if used multiple times
10628 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10630 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10631 * - add all micro optimizations from the old JIT
10632 * - put tree optimizations into the deadce pass
10633 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10634 * specific function.
10635 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10636 * fcompare + branchCC.
10637 * - create a helper function for allocating a stack slot, taking into account
10638 * MONO_CFG_HAS_SPILLUP.
10640 * - merge the ia64 switch changes.
10641 * - optimize mono_regstate2_alloc_int/float.
10642 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10643 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10644 * parts of the tree could be separated by other instructions, killing the tree
10645 * arguments, or stores killing loads etc. Also, should we fold loads into other
10646 * instructions if the result of the load is used multiple times ?
10647 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10648 * - LAST MERGE: 108395.
10649 * - when returning vtypes in registers, generate IR and append it to the end of the
10650 * last bb instead of doing it in the epilog.
10651 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10659 - When to decompose opcodes:
10660 - earlier: this makes some optimizations hard to implement, since the low level IR
10661 no longer contains the neccessary information. But it is easier to do.
10662 - later: harder to implement, enables more optimizations.
10663 - Branches inside bblocks:
10664 - created when decomposing complex opcodes.
10665 - branches to another bblock: harmless, but not tracked by the branch
10666 optimizations, so need to branch to a label at the start of the bblock.
10667 - branches to inside the same bblock: very problematic, trips up the local
10668 reg allocator. Can be fixed by spitting the current bblock, but that is a
10669 complex operation, since some local vregs can become global vregs etc.
10670 - Local/global vregs:
10671 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10672 local register allocator.
10673 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10674 structure, created by mono_create_var (). Assigned to hregs or the stack by
10675 the global register allocator.
10676 - When to do optimizations like alu->alu_imm:
10677 - earlier -> saves work later on since the IR will be smaller/simpler
10678 - later -> can work on more instructions
10679 - Handling of valuetypes:
10680 - When a vtype is pushed on the stack, a new temporary is created, an
10681 instruction computing its address (LDADDR) is emitted and pushed on
10682 the stack. Need to optimize cases when the vtype is used immediately as in
10683 argument passing, stloc etc.
10684 - Instead of the to_end stuff in the old JIT, simply call the function handling
10685 the values on the stack before emitting the last instruction of the bb.
10688 #endif /* DISABLE_JIT */