2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
55 #include "jit-icalls.h"
57 #define BRANCH_COST 100
58 #define INLINE_LENGTH_LIMIT 20
59 #define INLINE_FAILURE do {\
60 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
63 #define CHECK_CFG_EXCEPTION do {\
64 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
67 #define METHOD_ACCESS_FAILURE do { \
68 char *method_fname = mono_method_full_name (method, TRUE); \
69 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
70 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
71 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
72 g_free (method_fname); \
73 g_free (cil_method_fname); \
74 goto exception_exit; \
76 #define FIELD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *field_fname = mono_field_full_name (field); \
79 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
80 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (field_fname); \
83 goto exception_exit; \
85 #define GENERIC_SHARING_FAILURE(opcode) do { \
86 if (cfg->generic_sharing_context) { \
87 if (cfg->verbose_level > 2) \
88 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
89 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
90 goto exception_exit; \
94 /* Determine whenever 'ins' represents a load of the 'this' argument */
95 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
97 static int ldind_to_load_membase (int opcode);
98 static int stind_to_store_membase (int opcode);
100 int mono_op_to_op_imm (int opcode);
101 int mono_op_to_op_imm_noemul (int opcode);
103 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
104 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
105 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
107 /* helper methods signature */
108 extern MonoMethodSignature *helper_sig_class_init_trampoline;
109 extern MonoMethodSignature *helper_sig_domain_get;
110 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
111 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
112 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
115 * Instruction metadata
120 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
126 #if SIZEOF_REGISTER == 8
131 /* keep in sync with the enum in mini.h */
134 #include "mini-ops.h"
138 extern GHashTable *jit_icall_name_hash;
140 #define MONO_INIT_VARINFO(vi,id) do { \
141 (vi)->range.first_use.pos.bid = 0xffff; \
147 mono_alloc_ireg (MonoCompile *cfg)
149 return alloc_ireg (cfg);
153 mono_alloc_freg (MonoCompile *cfg)
155 return alloc_freg (cfg);
159 mono_alloc_preg (MonoCompile *cfg)
161 return alloc_preg (cfg);
165 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
167 return alloc_dreg (cfg, stack_type);
171 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 switch (type->type) {
180 case MONO_TYPE_BOOLEAN:
192 case MONO_TYPE_FNPTR:
194 case MONO_TYPE_CLASS:
195 case MONO_TYPE_STRING:
196 case MONO_TYPE_OBJECT:
197 case MONO_TYPE_SZARRAY:
198 case MONO_TYPE_ARRAY:
202 #if SIZEOF_REGISTER == 8
211 case MONO_TYPE_VALUETYPE:
212 if (type->data.klass->enumtype) {
213 type = type->data.klass->enum_basetype;
216 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
219 case MONO_TYPE_TYPEDBYREF:
221 case MONO_TYPE_GENERICINST:
222 type = &type->data.generic_class->container_class->byval_arg;
226 g_assert (cfg->generic_sharing_context);
229 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 mono_print_bb (MonoBasicBlock *bb, const char *msg)
240 printf ("\n%s %d: [IN: ", msg, bb->block_num);
241 for (i = 0; i < bb->in_count; ++i)
242 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
244 for (i = 0; i < bb->out_count; ++i)
245 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
247 for (tree = bb->code; tree; tree = tree->next)
248 mono_print_ins_index (-1, tree);
252 * Can't put this at the beginning, since other files reference stuff from this
257 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
259 #define GET_BBLOCK(cfg,tblock,ip) do { \
260 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
262 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
263 NEW_BBLOCK (cfg, (tblock)); \
264 (tblock)->cil_code = (ip); \
265 ADD_BBLOCK (cfg, (tblock)); \
269 #if defined(__i386__) || defined(__x86_64__)
270 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
271 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
272 (dest)->dreg = alloc_preg ((cfg)); \
273 (dest)->sreg1 = (sr1); \
274 (dest)->sreg2 = (sr2); \
275 (dest)->inst_imm = (imm); \
276 (dest)->backend.shift_amount = (shift); \
277 MONO_ADD_INS ((cfg)->cbb, (dest)); \
281 #if SIZEOF_REGISTER == 8
282 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
283 /* FIXME: Need to add many more cases */ \
284 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
286 int dr = alloc_preg (cfg); \
287 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
288 (ins)->sreg2 = widen->dreg; \
292 #define ADD_WIDEN_OP(ins, arg1, arg2)
295 #define ADD_BINOP(op) do { \
296 MONO_INST_NEW (cfg, ins, (op)); \
298 ins->sreg1 = sp [0]->dreg; \
299 ins->sreg2 = sp [1]->dreg; \
300 type_from_op (ins, sp [0], sp [1]); \
302 /* Have to insert a widening op */ \
303 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
304 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
305 MONO_ADD_INS ((cfg)->cbb, (ins)); \
307 mono_decompose_opcode ((cfg), (ins)); \
310 #define ADD_UNOP(op) do { \
311 MONO_INST_NEW (cfg, ins, (op)); \
313 ins->sreg1 = sp [0]->dreg; \
314 type_from_op (ins, sp [0], NULL); \
316 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
317 MONO_ADD_INS ((cfg)->cbb, (ins)); \
319 mono_decompose_opcode (cfg, ins); \
322 #define ADD_BINCOND(next_block) do { \
325 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
326 cmp->sreg1 = sp [0]->dreg; \
327 cmp->sreg2 = sp [1]->dreg; \
328 type_from_op (cmp, sp [0], sp [1]); \
330 type_from_op (ins, sp [0], sp [1]); \
331 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
332 GET_BBLOCK (cfg, tblock, target); \
333 link_bblock (cfg, bblock, tblock); \
334 ins->inst_true_bb = tblock; \
335 if ((next_block)) { \
336 link_bblock (cfg, bblock, (next_block)); \
337 ins->inst_false_bb = (next_block); \
338 start_new_bblock = 1; \
340 GET_BBLOCK (cfg, tblock, ip); \
341 link_bblock (cfg, bblock, tblock); \
342 ins->inst_false_bb = tblock; \
343 start_new_bblock = 2; \
345 if (sp != stack_start) { \
346 handle_stack_args (cfg, stack_start, sp - stack_start); \
347 CHECK_UNVERIFIABLE (cfg); \
349 MONO_ADD_INS (bblock, cmp); \
350 MONO_ADD_INS (bblock, ins); \
354 * link_bblock: Links two basic blocks
356 * links two basic blocks in the control flow graph, the 'from'
357 * argument is the starting block and the 'to' argument is the block
358 * the control flow ends to after 'from'.
361 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
363 MonoBasicBlock **newa;
367 if (from->cil_code) {
369 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
371 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
374 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
376 printf ("edge from entry to exit\n");
381 for (i = 0; i < from->out_count; ++i) {
382 if (to == from->out_bb [i]) {
388 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
389 for (i = 0; i < from->out_count; ++i) {
390 newa [i] = from->out_bb [i];
398 for (i = 0; i < to->in_count; ++i) {
399 if (from == to->in_bb [i]) {
405 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
406 for (i = 0; i < to->in_count; ++i) {
407 newa [i] = to->in_bb [i];
416 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
418 link_bblock (cfg, from, to);
422 * mono_find_block_region:
424 * We mark each basic block with a region ID. We use that to avoid BB
425 * optimizations when blocks are in different regions.
428 * A region token that encodes where this region is, and information
429 * about the clause owner for this block.
431 * The region encodes the try/catch/filter clause that owns this block
432 * as well as the type. -1 is a special value that represents a block
433 * that is in none of try/catch/filter.
436 mono_find_block_region (MonoCompile *cfg, int offset)
438 MonoMethod *method = cfg->method;
439 MonoMethodHeader *header = mono_method_get_header (method);
440 MonoExceptionClause *clause;
443 /* first search for handlers and filters */
444 for (i = 0; i < header->num_clauses; ++i) {
445 clause = &header->clauses [i];
446 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
447 (offset < (clause->handler_offset)))
448 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
450 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
452 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
453 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
454 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
456 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
460 /* search the try blocks */
461 for (i = 0; i < header->num_clauses; ++i) {
462 clause = &header->clauses [i];
463 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
464 return ((i + 1) << 8) | clause->flags;
471 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
473 MonoMethod *method = cfg->method;
474 MonoMethodHeader *header = mono_method_get_header (method);
475 MonoExceptionClause *clause;
476 MonoBasicBlock *handler;
480 for (i = 0; i < header->num_clauses; ++i) {
481 clause = &header->clauses [i];
482 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
483 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
484 if (clause->flags == type) {
485 handler = cfg->cil_offset_to_bb [clause->handler_offset];
487 res = g_list_append (res, handler);
495 mono_create_spvar_for_region (MonoCompile *cfg, int region)
499 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
504 /* prevent it from being register allocated */
505 var->flags |= MONO_INST_INDIRECT;
507 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
511 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
513 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
517 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
521 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
525 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 * Returns the type used in the eval stack when @type is loaded.
536 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
539 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
543 inst->klass = klass = mono_class_from_mono_type (type);
545 inst->type = STACK_MP;
550 switch (type->type) {
552 inst->type = STACK_INV;
556 case MONO_TYPE_BOOLEAN:
562 inst->type = STACK_I4;
567 case MONO_TYPE_FNPTR:
568 inst->type = STACK_PTR;
570 case MONO_TYPE_CLASS:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_OBJECT:
573 case MONO_TYPE_SZARRAY:
574 case MONO_TYPE_ARRAY:
575 inst->type = STACK_OBJ;
579 inst->type = STACK_I8;
583 inst->type = STACK_R8;
585 case MONO_TYPE_VALUETYPE:
586 if (type->data.klass->enumtype) {
587 type = type->data.klass->enum_basetype;
591 inst->type = STACK_VTYPE;
594 case MONO_TYPE_TYPEDBYREF:
595 inst->klass = mono_defaults.typed_reference_class;
596 inst->type = STACK_VTYPE;
598 case MONO_TYPE_GENERICINST:
599 type = &type->data.generic_class->container_class->byval_arg;
602 case MONO_TYPE_MVAR :
603 /* FIXME: all the arguments must be references for now,
604 * later look inside cfg and see if the arg num is
607 g_assert (cfg->generic_sharing_context);
608 inst->type = STACK_OBJ;
611 g_error ("unknown type 0x%02x in eval stack type", type->type);
616 * The following tables are used to quickly validate the IL code in type_from_op ().
619 bin_num_table [STACK_MAX] [STACK_MAX] = {
620 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
621 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
622 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
623 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
632 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
635 /* reduce the size of this table */
637 bin_int_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 bin_comp_table [STACK_MAX] [STACK_MAX] = {
650 /* Inv i L p F & O vt */
652 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
653 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
654 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
655 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
656 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
657 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
658 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
661 /* reduce the size of this table */
663 shift_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 * Tables to map from the non-specific opcode to the matching
676 * type-specific opcode.
678 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
680 binops_op_map [STACK_MAX] = {
681 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
684 /* handles from CEE_NEG to CEE_CONV_U8 */
686 unops_op_map [STACK_MAX] = {
687 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
690 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
692 ovfops_op_map [STACK_MAX] = {
693 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
696 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
698 ovf2ops_op_map [STACK_MAX] = {
699 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
702 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
704 ovf3ops_op_map [STACK_MAX] = {
705 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
708 /* handles from CEE_BEQ to CEE_BLT_UN */
710 beqops_op_map [STACK_MAX] = {
711 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
714 /* handles from CEE_CEQ to CEE_CLT_UN */
716 ceqops_op_map [STACK_MAX] = {
717 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
721 * Sets ins->type (the type on the eval stack) according to the
722 * type of the opcode and the arguments to it.
723 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
725 * FIXME: this function sets ins->type unconditionally in some cases, but
726 * it should set it to invalid for some types (a conv.x on an object)
729 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
731 switch (ins->opcode) {
738 /* FIXME: check unverifiable args for STACK_MP */
739 ins->type = bin_num_table [src1->type] [src2->type];
740 ins->opcode += binops_op_map [ins->type];
747 ins->type = bin_int_table [src1->type] [src2->type];
748 ins->opcode += binops_op_map [ins->type];
753 ins->type = shift_table [src1->type] [src2->type];
754 ins->opcode += binops_op_map [ins->type];
759 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
760 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
761 ins->opcode = OP_LCOMPARE;
762 else if (src1->type == STACK_R8)
763 ins->opcode = OP_FCOMPARE;
765 ins->opcode = OP_ICOMPARE;
767 case OP_ICOMPARE_IMM:
768 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
769 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
770 ins->opcode = OP_LCOMPARE_IMM;
782 ins->opcode += beqops_op_map [src1->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 ins->opcode += ceqops_op_map [src1->type];
792 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
793 ins->opcode += ceqops_op_map [src1->type];
797 ins->type = neg_table [src1->type];
798 ins->opcode += unops_op_map [ins->type];
801 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
802 ins->type = src1->type;
804 ins->type = STACK_INV;
805 ins->opcode += unops_op_map [ins->type];
811 ins->type = STACK_I4;
812 ins->opcode += unops_op_map [src1->type];
815 ins->type = STACK_R8;
816 switch (src1->type) {
819 ins->opcode = OP_ICONV_TO_R_UN;
822 ins->opcode = OP_LCONV_TO_R_UN;
826 case CEE_CONV_OVF_I1:
827 case CEE_CONV_OVF_U1:
828 case CEE_CONV_OVF_I2:
829 case CEE_CONV_OVF_U2:
830 case CEE_CONV_OVF_I4:
831 case CEE_CONV_OVF_U4:
832 ins->type = STACK_I4;
833 ins->opcode += ovf3ops_op_map [src1->type];
835 case CEE_CONV_OVF_I_UN:
836 case CEE_CONV_OVF_U_UN:
837 ins->type = STACK_PTR;
838 ins->opcode += ovf2ops_op_map [src1->type];
840 case CEE_CONV_OVF_I1_UN:
841 case CEE_CONV_OVF_I2_UN:
842 case CEE_CONV_OVF_I4_UN:
843 case CEE_CONV_OVF_U1_UN:
844 case CEE_CONV_OVF_U2_UN:
845 case CEE_CONV_OVF_U4_UN:
846 ins->type = STACK_I4;
847 ins->opcode += ovf2ops_op_map [src1->type];
850 ins->type = STACK_PTR;
851 switch (src1->type) {
853 ins->opcode = OP_ICONV_TO_U;
857 #if SIZEOF_REGISTER == 8
858 ins->opcode = OP_LCONV_TO_U;
860 ins->opcode = OP_MOVE;
864 ins->opcode = OP_LCONV_TO_U;
867 ins->opcode = OP_FCONV_TO_U;
873 ins->type = STACK_I8;
874 ins->opcode += unops_op_map [src1->type];
876 case CEE_CONV_OVF_I8:
877 case CEE_CONV_OVF_U8:
878 ins->type = STACK_I8;
879 ins->opcode += ovf3ops_op_map [src1->type];
881 case CEE_CONV_OVF_U8_UN:
882 case CEE_CONV_OVF_I8_UN:
883 ins->type = STACK_I8;
884 ins->opcode += ovf2ops_op_map [src1->type];
888 ins->type = STACK_R8;
889 ins->opcode += unops_op_map [src1->type];
892 ins->type = STACK_R8;
896 ins->type = STACK_I4;
897 ins->opcode += ovfops_op_map [src1->type];
902 ins->type = STACK_PTR;
903 ins->opcode += ovfops_op_map [src1->type];
911 ins->type = bin_num_table [src1->type] [src2->type];
912 ins->opcode += ovfops_op_map [src1->type];
913 if (ins->type == STACK_R8)
914 ins->type = STACK_INV;
916 case OP_LOAD_MEMBASE:
917 ins->type = STACK_PTR;
919 case OP_LOADI1_MEMBASE:
920 case OP_LOADU1_MEMBASE:
921 case OP_LOADI2_MEMBASE:
922 case OP_LOADU2_MEMBASE:
923 case OP_LOADI4_MEMBASE:
924 case OP_LOADU4_MEMBASE:
925 ins->type = STACK_PTR;
927 case OP_LOADI8_MEMBASE:
928 ins->type = STACK_I8;
930 case OP_LOADR4_MEMBASE:
931 case OP_LOADR8_MEMBASE:
932 ins->type = STACK_R8;
935 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
939 if (ins->type == STACK_MP)
940 ins->klass = mono_defaults.object_class;
945 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
951 param_table [STACK_MAX] [STACK_MAX] = {
956 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
960 switch (args->type) {
970 for (i = 0; i < sig->param_count; ++i) {
971 switch (args [i].type) {
975 if (!sig->params [i]->byref)
979 if (sig->params [i]->byref)
981 switch (sig->params [i]->type) {
982 case MONO_TYPE_CLASS:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_OBJECT:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
993 if (sig->params [i]->byref)
995 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1004 /*if (!param_table [args [i].type] [sig->params [i]->type])
1012 * When we need a pointer to the current domain many times in a method, we
1013 * call mono_domain_get() once and we store the result in a local variable.
1014 * This function returns the variable that represents the MonoDomain*.
1016 inline static MonoInst *
1017 mono_get_domainvar (MonoCompile *cfg)
1019 if (!cfg->domainvar)
1020 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1021 return cfg->domainvar;
1025 * The got_var contains the address of the Global Offset Table when AOT
1028 inline static MonoInst *
1029 mono_get_got_var (MonoCompile *cfg)
1031 #ifdef MONO_ARCH_NEED_GOT_VAR
1032 if (!cfg->compile_aot)
1034 if (!cfg->got_var) {
1035 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1037 return cfg->got_var;
1044 mono_get_vtable_var (MonoCompile *cfg)
1046 g_assert (cfg->generic_sharing_context);
1048 if (!cfg->rgctx_var) {
1049 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1050 /* force the var to be stack allocated */
1051 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1054 return cfg->rgctx_var;
1058 type_from_stack_type (MonoInst *ins) {
1059 switch (ins->type) {
1060 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1061 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1062 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1063 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1065 return &ins->klass->this_arg;
1066 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1067 case STACK_VTYPE: return &ins->klass->byval_arg;
1069 g_error ("stack type %d to monotype not handled\n", ins->type);
1074 static G_GNUC_UNUSED int
1075 type_to_stack_type (MonoType *t)
1077 switch (mono_type_get_underlying_type (t)->type) {
1080 case MONO_TYPE_BOOLEAN:
1083 case MONO_TYPE_CHAR:
1090 case MONO_TYPE_FNPTR:
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1104 case MONO_TYPE_VALUETYPE:
1105 case MONO_TYPE_TYPEDBYREF:
1107 case MONO_TYPE_GENERICINST:
1108 if (mono_type_generic_inst_is_valuetype (t))
1114 g_assert_not_reached ();
1121 array_access_to_klass (int opcode)
1125 return mono_defaults.byte_class;
1127 return mono_defaults.uint16_class;
1130 return mono_defaults.int_class;
1133 return mono_defaults.sbyte_class;
1136 return mono_defaults.int16_class;
1139 return mono_defaults.int32_class;
1141 return mono_defaults.uint32_class;
1144 return mono_defaults.int64_class;
1147 return mono_defaults.single_class;
1150 return mono_defaults.double_class;
1151 case CEE_LDELEM_REF:
1152 case CEE_STELEM_REF:
1153 return mono_defaults.object_class;
1155 g_assert_not_reached ();
1161 * We try to share variables when possible
1164 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1169 /* inlining can result in deeper stacks */
1170 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1171 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1173 pos = ins->type - 1 + slot * STACK_MAX;
1175 switch (ins->type) {
1182 if ((vnum = cfg->intvars [pos]))
1183 return cfg->varinfo [vnum];
1184 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1185 cfg->intvars [pos] = res->inst_c0;
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1197 * Don't use this if a generic_context is set, since that means AOT can't
1198 * look up the method using just the image+token.
1199 * table == 0 means this is a reference made from a wrapper.
1201 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1202 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1203 jump_info_token->image = image;
1204 jump_info_token->token = token;
1205 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1210 * This function is called to handle items that are left on the evaluation stack
1211 * at basic block boundaries. What happens is that we save the values to local variables
1212 * and we reload them later when first entering the target basic block (with the
1213 * handle_loaded_temps () function).
1214 * A single joint point will use the same variables (stored in the array bb->out_stack or
1215 * bb->in_stack, if the basic block is before or after the joint point).
1217 * This function needs to be called _before_ emitting the last instruction of
1218 * the bb (i.e. before emitting a branch).
1219 * If the stack merge fails at a join point, cfg->unverifiable is set.
1222 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1225 MonoBasicBlock *bb = cfg->cbb;
1226 MonoBasicBlock *outb;
1227 MonoInst *inst, **locals;
1232 if (cfg->verbose_level > 3)
1233 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1234 if (!bb->out_scount) {
1235 bb->out_scount = count;
1236 //printf ("bblock %d has out:", bb->block_num);
1238 for (i = 0; i < bb->out_count; ++i) {
1239 outb = bb->out_bb [i];
1240 /* exception handlers are linked, but they should not be considered for stack args */
1241 if (outb->flags & BB_EXCEPTION_HANDLER)
1243 //printf (" %d", outb->block_num);
1244 if (outb->in_stack) {
1246 bb->out_stack = outb->in_stack;
1252 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1253 for (i = 0; i < count; ++i) {
1255 * try to reuse temps already allocated for this purpouse, if they occupy the same
1256 * stack slot and if they are of the same type.
1257 * This won't cause conflicts since if 'local' is used to
1258 * store one of the values in the in_stack of a bblock, then
1259 * the same variable will be used for the same outgoing stack
1261 * This doesn't work when inlining methods, since the bblocks
1262 * in the inlined methods do not inherit their in_stack from
1263 * the bblock they are inlined to. See bug #58863 for an
1266 if (cfg->inlined_method)
1267 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1269 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1274 for (i = 0; i < bb->out_count; ++i) {
1275 outb = bb->out_bb [i];
1276 /* exception handlers are linked, but they should not be considered for stack args */
1277 if (outb->flags & BB_EXCEPTION_HANDLER)
1279 if (outb->in_scount) {
1280 if (outb->in_scount != bb->out_scount) {
1281 cfg->unverifiable = TRUE;
1284 continue; /* check they are the same locals */
1286 outb->in_scount = count;
1287 outb->in_stack = bb->out_stack;
1290 locals = bb->out_stack;
1292 for (i = 0; i < count; ++i) {
1293 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1294 inst->cil_code = sp [i]->cil_code;
1295 sp [i] = locals [i];
1296 if (cfg->verbose_level > 3)
1297 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1301 * It is possible that the out bblocks already have in_stack assigned, and
1302 * the in_stacks differ. In this case, we will store to all the different
1309 /* Find a bblock which has a different in_stack */
1311 while (bindex < bb->out_count) {
1312 outb = bb->out_bb [bindex];
1313 /* exception handlers are linked, but they should not be considered for stack args */
1314 if (outb->flags & BB_EXCEPTION_HANDLER) {
1318 if (outb->in_stack != locals) {
1319 for (i = 0; i < count; ++i) {
1320 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1321 inst->cil_code = sp [i]->cil_code;
1322 sp [i] = locals [i];
1323 if (cfg->verbose_level > 3)
1324 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1326 locals = outb->in_stack;
1335 /* Emit code which loads interface_offsets [klass->interface_id]
1336 * The array is stored in memory before vtable.
1339 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1341 if (cfg->compile_aot) {
1342 int ioffset_reg = alloc_preg (cfg);
1343 int iid_reg = alloc_preg (cfg);
1345 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1346 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1355 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1356 * stored in "klass_reg" implements the interface "klass".
1359 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1361 int ibitmap_reg = alloc_preg (cfg);
1362 int ibitmap_byte_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1366 if (cfg->compile_aot) {
1367 int iid_reg = alloc_preg (cfg);
1368 int shifted_iid_reg = alloc_preg (cfg);
1369 int ibitmap_byte_address_reg = alloc_preg (cfg);
1370 int masked_iid_reg = alloc_preg (cfg);
1371 int iid_one_bit_reg = alloc_preg (cfg);
1372 int iid_bit_reg = alloc_preg (cfg);
1373 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1375 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1376 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1378 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1380 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1388 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1389 * stored in "vtable_reg" implements the interface "klass".
1392 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1394 int ibitmap_reg = alloc_preg (cfg);
1395 int ibitmap_byte_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1399 if (cfg->compile_aot) {
1400 int iid_reg = alloc_preg (cfg);
1401 int shifted_iid_reg = alloc_preg (cfg);
1402 int ibitmap_byte_address_reg = alloc_preg (cfg);
1403 int masked_iid_reg = alloc_preg (cfg);
1404 int iid_one_bit_reg = alloc_preg (cfg);
1405 int iid_bit_reg = alloc_preg (cfg);
1406 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1411 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1413 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which checks whenever the interface id of @klass is smaller than
1422 * than the value given by max_iid_reg.
1425 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1426 MonoBasicBlock *false_target)
1428 if (cfg->compile_aot) {
1429 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1438 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1441 /* Same as above, but obtains max_iid from a vtable */
1443 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 int max_iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1449 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1452 /* Same as above, but obtains max_iid from a klass */
1454 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1455 MonoBasicBlock *false_target)
1457 int max_iid_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1460 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1464 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1466 int idepth_reg = alloc_preg (cfg);
1467 int stypes_reg = alloc_preg (cfg);
1468 int stype = alloc_preg (cfg);
1470 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1477 if (cfg->compile_aot) {
1478 int const_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1482 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1488 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int intf_reg = alloc_preg (cfg);
1492 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1493 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1502 * Variant of the above that takes a register to the class, not the vtable.
1505 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1507 int intf_bit_reg = alloc_preg (cfg);
1509 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1510 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1519 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1521 if (cfg->compile_aot) {
1522 int const_reg = alloc_preg (cfg);
1523 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1528 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1532 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1534 if (cfg->compile_aot) {
1535 int const_reg = alloc_preg (cfg);
1536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1545 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1548 int rank_reg = alloc_preg (cfg);
1549 int eclass_reg = alloc_preg (cfg);
1551 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1556 if (klass->cast_class == mono_defaults.object_class) {
1557 int parent_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1559 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1560 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1563 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1564 } else if (klass->cast_class == mono_defaults.enum_class) {
1565 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1569 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1570 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1574 /* Check that the object is a vector too */
1575 int bounds_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1581 int idepth_reg = alloc_preg (cfg);
1582 int stypes_reg = alloc_preg (cfg);
1583 int stype = alloc_preg (cfg);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1592 mini_emit_class_check (cfg, stype, klass);
1597 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1601 g_assert (val == 0);
1606 if ((size <= 4) && (size <= align)) {
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1612 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1617 #if SIZEOF_REGISTER == 8
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1625 val_reg = alloc_preg (cfg);
1627 if (SIZEOF_REGISTER == 8)
1628 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1630 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1633 /* This could be optimized further if neccesary */
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1642 #if !NO_UNALIGNED_ACCESS
1643 if (SIZEOF_REGISTER == 8) {
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1674 #endif /* DISABLE_JIT */
1677 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1685 /* This could be optimized further if neccesary */
1687 cur_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1696 #if !NO_UNALIGNED_ACCESS
1697 if (SIZEOF_REGISTER == 8) {
1699 cur_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1738 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1741 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1744 type = mini_get_basic_type_from_generic (gsctx, type);
1745 switch (type->type) {
1746 case MONO_TYPE_VOID:
1747 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1750 case MONO_TYPE_BOOLEAN:
1753 case MONO_TYPE_CHAR:
1756 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1760 case MONO_TYPE_FNPTR:
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 case MONO_TYPE_CLASS:
1763 case MONO_TYPE_STRING:
1764 case MONO_TYPE_OBJECT:
1765 case MONO_TYPE_SZARRAY:
1766 case MONO_TYPE_ARRAY:
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1773 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1774 case MONO_TYPE_VALUETYPE:
1775 if (type->data.klass->enumtype) {
1776 type = type->data.klass->enum_basetype;
1779 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1780 case MONO_TYPE_TYPEDBYREF:
1781 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1782 case MONO_TYPE_GENERICINST:
1783 type = &type->data.generic_class->container_class->byval_arg;
1786 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 * target_type_is_incompatible:
1793 * @cfg: MonoCompile context
1795 * Check that the item @arg on the evaluation stack can be stored
1796 * in the target type (can be a local, or field, etc).
1797 * The cfg arg can be used to check if we need verification or just
1800 * Returns: non-0 value if arg can't be stored on a target.
1803 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1805 MonoType *simple_type;
1808 if (target->byref) {
1809 /* FIXME: check that the pointed to types match */
1810 if (arg->type == STACK_MP)
1811 return arg->klass != mono_class_from_mono_type (target);
1812 if (arg->type == STACK_PTR)
1817 simple_type = mono_type_get_underlying_type (target);
1818 switch (simple_type->type) {
1819 case MONO_TYPE_VOID:
1823 case MONO_TYPE_BOOLEAN:
1826 case MONO_TYPE_CHAR:
1829 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1833 /* STACK_MP is needed when setting pinned locals */
1834 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 case MONO_TYPE_FNPTR:
1840 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1843 case MONO_TYPE_CLASS:
1844 case MONO_TYPE_STRING:
1845 case MONO_TYPE_OBJECT:
1846 case MONO_TYPE_SZARRAY:
1847 case MONO_TYPE_ARRAY:
1848 if (arg->type != STACK_OBJ)
1850 /* FIXME: check type compatibility */
1854 if (arg->type != STACK_I8)
1859 if (arg->type != STACK_R8)
1862 case MONO_TYPE_VALUETYPE:
1863 if (arg->type != STACK_VTYPE)
1865 klass = mono_class_from_mono_type (simple_type);
1866 if (klass != arg->klass)
1869 case MONO_TYPE_TYPEDBYREF:
1870 if (arg->type != STACK_VTYPE)
1872 klass = mono_class_from_mono_type (simple_type);
1873 if (klass != arg->klass)
1876 case MONO_TYPE_GENERICINST:
1877 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1878 if (arg->type != STACK_VTYPE)
1880 klass = mono_class_from_mono_type (simple_type);
1881 if (klass != arg->klass)
1885 if (arg->type != STACK_OBJ)
1887 /* FIXME: check type compatibility */
1891 case MONO_TYPE_MVAR:
1892 /* FIXME: all the arguments must be references for now,
1893 * later look inside cfg and see if the arg num is
1894 * really a reference
1896 g_assert (cfg->generic_sharing_context);
1897 if (arg->type != STACK_OBJ)
1901 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 * Prepare arguments for passing to a function call.
1908 * Return a non-zero value if the arguments can't be passed to the given
1910 * The type checks are not yet complete and some conversions may need
1911 * casts on 32 or 64 bit architectures.
1913 * FIXME: implement this using target_type_is_incompatible ()
1916 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1918 MonoType *simple_type;
1922 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1926 for (i = 0; i < sig->param_count; ++i) {
1927 if (sig->params [i]->byref) {
1928 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1932 simple_type = sig->params [i];
1933 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1935 switch (simple_type->type) {
1936 case MONO_TYPE_VOID:
1941 case MONO_TYPE_BOOLEAN:
1944 case MONO_TYPE_CHAR:
1947 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1953 case MONO_TYPE_FNPTR:
1954 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1957 case MONO_TYPE_CLASS:
1958 case MONO_TYPE_STRING:
1959 case MONO_TYPE_OBJECT:
1960 case MONO_TYPE_SZARRAY:
1961 case MONO_TYPE_ARRAY:
1962 if (args [i]->type != STACK_OBJ)
1967 if (args [i]->type != STACK_I8)
1972 if (args [i]->type != STACK_R8)
1975 case MONO_TYPE_VALUETYPE:
1976 if (simple_type->data.klass->enumtype) {
1977 simple_type = simple_type->data.klass->enum_basetype;
1980 if (args [i]->type != STACK_VTYPE)
1983 case MONO_TYPE_TYPEDBYREF:
1984 if (args [i]->type != STACK_VTYPE)
1987 case MONO_TYPE_GENERICINST:
1988 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1992 g_error ("unknown type 0x%02x in check_call_signature",
2000 callvirt_to_call (int opcode)
2005 case OP_VOIDCALLVIRT:
2014 g_assert_not_reached ();
2021 callvirt_to_call_membase (int opcode)
2025 return OP_CALL_MEMBASE;
2026 case OP_VOIDCALLVIRT:
2027 return OP_VOIDCALL_MEMBASE;
2029 return OP_FCALL_MEMBASE;
2031 return OP_LCALL_MEMBASE;
2033 return OP_VCALL_MEMBASE;
2035 g_assert_not_reached ();
2041 #ifdef MONO_ARCH_HAVE_IMT
2043 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2045 #ifdef MONO_ARCH_IMT_REG
2046 int method_reg = alloc_preg (cfg);
2049 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2050 } else if (cfg->compile_aot) {
2051 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2054 MONO_INST_NEW (cfg, ins, OP_PCONST);
2055 ins->inst_p0 = call->method;
2056 ins->dreg = method_reg;
2057 MONO_ADD_INS (cfg->cbb, ins);
2060 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2062 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 static MonoJumpInfo *
2068 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2070 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2074 ji->data.target = target;
2079 inline static MonoInst*
2080 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2082 inline static MonoCallInst *
2083 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2084 MonoInst **args, int calli, int virtual)
2087 #ifdef MONO_ARCH_SOFT_FLOAT
2091 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2094 call->signature = sig;
2096 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2098 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2099 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2102 temp->backend.is_pinvoke = sig->pinvoke;
2105 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2106 * address of return value to increase optimization opportunities.
2107 * Before vtype decomposition, the dreg of the call ins itself represents the
2108 * fact the call modifies the return value. After decomposition, the call will
2109 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2110 * will be transformed into an LDADDR.
2112 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2113 loada->dreg = alloc_preg (cfg);
2114 loada->inst_p0 = temp;
2115 /* We reference the call too since call->dreg could change during optimization */
2116 loada->inst_p1 = call;
2117 MONO_ADD_INS (cfg->cbb, loada);
2119 call->inst.dreg = temp->dreg;
2121 call->vret_var = loada;
2122 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2123 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2127 * If the call has a float argument, we would need to do an r8->r4 conversion using
2128 * an icall, but that cannot be done during the call sequence since it would clobber
2129 * the call registers + the stack. So we do it before emitting the call.
2131 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 MonoInst *in = call->args [i];
2135 if (i >= sig->hasthis)
2136 t = sig->params [i - sig->hasthis];
2138 t = &mono_defaults.int_class->byval_arg;
2139 t = mono_type_get_underlying_type (t);
2141 if (!t->byref && t->type == MONO_TYPE_R4) {
2142 MonoInst *iargs [1];
2146 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2148 /* The result will be in an int vreg */
2149 call->args [i] = conv;
2154 mono_arch_emit_call (cfg, call);
2156 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2157 cfg->flags |= MONO_CFG_HAS_CALLS;
2162 inline static MonoInst*
2163 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2165 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2167 call->inst.sreg1 = addr->dreg;
2169 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2171 return (MonoInst*)call;
2174 inline static MonoInst*
2175 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2177 #ifdef MONO_ARCH_RGCTX_REG
2182 rgctx_reg = mono_alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2185 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2187 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2188 cfg->uses_rgctx_reg = TRUE;
2190 return (MonoInst*)call;
2192 g_assert_not_reached ();
2198 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2199 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2201 gboolean virtual = this != NULL;
2202 gboolean enable_for_aot = TRUE;
2205 if (method->string_ctor) {
2206 /* Create the real signature */
2207 /* FIXME: Cache these */
2208 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2209 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2214 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2216 if (this && sig->hasthis &&
2217 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2218 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2219 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2221 call->method = method;
2223 call->inst.flags |= MONO_INST_HAS_METHOD;
2224 call->inst.inst_left = this;
2227 int vtable_reg, slot_reg, this_reg;
2229 this_reg = this->dreg;
2231 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2232 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2233 /* Make a call to delegate->invoke_impl */
2234 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2235 call->inst.inst_basereg = this_reg;
2236 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2243 if ((!cfg->compile_aot || enable_for_aot) &&
2244 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2245 (MONO_METHOD_IS_FINAL (method) &&
2246 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2248 * the method is not virtual, we just need to ensure this is not null
2249 * and then we can call the method directly.
2251 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2252 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2255 if (!method->string_ctor) {
2256 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2257 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2258 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2261 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2263 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2265 return (MonoInst*)call;
2268 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2270 * the method is virtual, but we can statically dispatch since either
2271 * it's class or the method itself are sealed.
2272 * But first we need to ensure it's not a null reference.
2274 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2275 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2276 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2278 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2279 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2281 return (MonoInst*)call;
2284 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2286 vtable_reg = alloc_preg (cfg);
2287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2288 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2290 #ifdef MONO_ARCH_HAVE_IMT
2292 guint32 imt_slot = mono_method_get_imt_slot (method);
2293 emit_imt_argument (cfg, call, imt_arg);
2294 slot_reg = vtable_reg;
2295 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2298 if (slot_reg == -1) {
2299 slot_reg = alloc_preg (cfg);
2300 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2301 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2304 slot_reg = vtable_reg;
2305 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2306 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2307 #ifdef MONO_ARCH_HAVE_IMT
2309 g_assert (mono_method_signature (method)->generic_param_count);
2310 emit_imt_argument (cfg, call, imt_arg);
2315 call->inst.sreg1 = slot_reg;
2316 call->virtual = TRUE;
2319 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2321 return (MonoInst*)call;
2325 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2326 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2333 #ifdef MONO_ARCH_RGCTX_REG
2334 rgctx_reg = mono_alloc_preg (cfg);
2335 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2340 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2342 call = (MonoCallInst*)ins;
2344 #ifdef MONO_ARCH_RGCTX_REG
2345 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2346 cfg->uses_rgctx_reg = TRUE;
2355 static inline MonoInst*
2356 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2358 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2362 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2369 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2374 return (MonoInst*)call;
2377 inline static MonoInst*
2378 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2380 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2384 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2388 * mono_emit_abs_call:
2390 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2392 inline static MonoInst*
2393 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2394 MonoMethodSignature *sig, MonoInst **args)
2396 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2400 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2403 if (cfg->abs_patches == NULL)
2404 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2405 g_hash_table_insert (cfg->abs_patches, ji, ji);
2406 ins = mono_emit_native_call (cfg, ji, sig, args);
2407 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2412 get_memcpy_method (void)
2414 static MonoMethod *memcpy_method = NULL;
2415 if (!memcpy_method) {
2416 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2418 g_error ("Old corlib found. Install a new one");
2420 return memcpy_method;
2424 * Emit code to copy a valuetype of type @klass whose address is stored in
2425 * @src->dreg to memory whose address is stored at @dest->dreg.
2428 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2430 MonoInst *iargs [3];
2433 MonoMethod *memcpy_method;
2437 * This check breaks with spilled vars... need to handle it during verification anyway.
2438 * g_assert (klass && klass == src->klass && klass == dest->klass);
2442 n = mono_class_native_size (klass, &align);
2444 n = mono_class_value_size (klass, &align);
2446 #if HAVE_WRITE_BARRIERS
2447 /* if native is true there should be no references in the struct */
2448 if (klass->has_references && !native) {
2449 /* Avoid barriers when storing to the stack */
2450 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2451 (dest->opcode == OP_LDADDR))) {
2454 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2456 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2461 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2462 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2463 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2467 EMIT_NEW_ICONST (cfg, iargs [2], n);
2469 memcpy_method = get_memcpy_method ();
2470 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2475 get_memset_method (void)
2477 static MonoMethod *memset_method = NULL;
2478 if (!memset_method) {
2479 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2481 g_error ("Old corlib found. Install a new one");
2483 return memset_method;
2487 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2489 MonoInst *iargs [3];
2492 MonoMethod *memset_method;
2494 /* FIXME: Optimize this for the case when dest is an LDADDR */
2496 mono_class_init (klass);
2497 n = mono_class_value_size (klass, &align);
2499 if (n <= sizeof (gpointer) * 5) {
2500 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2503 memset_method = get_memset_method ();
2505 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2506 EMIT_NEW_ICONST (cfg, iargs [2], n);
2507 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2512 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2514 MonoInst *this = NULL;
2516 g_assert (cfg->generic_sharing_context);
2518 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2519 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2520 !method->klass->valuetype)
2521 EMIT_NEW_ARGLOAD (cfg, this, 0);
2523 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2524 MonoInst *mrgctx_loc, *mrgctx_var;
2527 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2529 mrgctx_loc = mono_get_vtable_var (cfg);
2530 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2533 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2534 MonoInst *vtable_loc, *vtable_var;
2538 vtable_loc = mono_get_vtable_var (cfg);
2539 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2541 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2542 MonoInst *mrgctx_var = vtable_var;
2545 vtable_reg = alloc_preg (cfg);
2546 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2547 vtable_var->type = STACK_PTR;
2553 int vtable_reg, res_reg;
2555 vtable_reg = alloc_preg (cfg);
2556 res_reg = alloc_preg (cfg);
2557 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2562 static MonoJumpInfoRgctxEntry *
2563 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2565 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2566 res->method = method;
2567 res->in_mrgctx = in_mrgctx;
2568 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2569 res->data->type = patch_type;
2570 res->data->data.target = patch_data;
2571 res->info_type = info_type;
2576 static inline MonoInst*
2577 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2579 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2583 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2584 MonoClass *klass, int rgctx_type)
2586 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2587 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2589 return emit_rgctx_fetch (cfg, rgctx, entry);
2593 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2594 MonoMethod *cmethod, int rgctx_type)
2596 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2597 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2599 return emit_rgctx_fetch (cfg, rgctx, entry);
2603 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2604 MonoClassField *field, int rgctx_type)
2606 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2607 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2609 return emit_rgctx_fetch (cfg, rgctx, entry);
2613 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2615 int vtable_reg = alloc_preg (cfg);
2616 int context_used = 0;
2618 if (cfg->generic_sharing_context)
2619 context_used = mono_class_check_context_used (array_class);
2621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2623 if (cfg->opt & MONO_OPT_SHARED) {
2624 int class_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2626 if (cfg->compile_aot) {
2627 int klass_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2633 } else if (context_used) {
2634 MonoInst *vtable_ins;
2636 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2639 if (cfg->compile_aot) {
2640 int vt_reg = alloc_preg (cfg);
2641 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2652 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2654 if (mini_get_debug_options ()->better_cast_details) {
2655 int to_klass_reg = alloc_preg (cfg);
2656 int vtable_reg = alloc_preg (cfg);
2657 int klass_reg = alloc_preg (cfg);
2658 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2661 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2665 MONO_ADD_INS (cfg->cbb, tls_get);
2666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2670 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2676 reset_cast_details (MonoCompile *cfg)
2678 /* Reset the variables holding the cast details */
2679 if (mini_get_debug_options ()->better_cast_details) {
2680 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2682 MONO_ADD_INS (cfg->cbb, tls_get);
2683 /* It is enough to reset the from field */
2684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2689 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2690 * generic code is generated.
2693 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2695 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2698 MonoInst *rgctx, *addr;
2700 /* FIXME: What if the class is shared? We might not
2701 have to get the address of the method from the
2703 addr = emit_get_rgctx_method (cfg, context_used, method,
2704 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2706 rgctx = emit_get_rgctx (cfg, method, context_used);
2708 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2710 return mono_emit_method_call (cfg, method, &val, NULL);
2715 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2719 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2720 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2721 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2722 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2724 obj_reg = sp [0]->dreg;
2725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2728 /* FIXME: generics */
2729 g_assert (klass->rank == 0);
2732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2733 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2739 MonoInst *element_class;
2741 /* This assertion is from the unboxcast insn */
2742 g_assert (klass->rank == 0);
2744 element_class = emit_get_rgctx_klass (cfg, context_used,
2745 klass->element_class, MONO_RGCTX_INFO_KLASS);
2747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2750 save_cast_details (cfg, klass->element_class, obj_reg);
2751 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2752 reset_cast_details (cfg);
2755 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2756 MONO_ADD_INS (cfg->cbb, add);
2757 add->type = STACK_MP;
2764 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2766 MonoInst *iargs [2];
2769 if (cfg->opt & MONO_OPT_SHARED) {
2770 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2771 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2773 alloc_ftn = mono_object_new;
2774 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2775 /* This happens often in argument checking code, eg. throw new FooException... */
2776 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2777 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2778 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2780 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2781 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2784 if (managed_alloc) {
2785 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2786 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2788 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2790 guint32 lw = vtable->klass->instance_size;
2791 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2792 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2793 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2796 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2800 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2804 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2807 MonoInst *iargs [2];
2808 MonoMethod *managed_alloc = NULL;
2812 FIXME: we cannot get managed_alloc here because we can't get
2813 the class's vtable (because it's not a closed class)
2815 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2816 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2819 if (cfg->opt & MONO_OPT_SHARED) {
2820 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2821 iargs [1] = data_inst;
2822 alloc_ftn = mono_object_new;
2824 if (managed_alloc) {
2825 iargs [0] = data_inst;
2826 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2829 iargs [0] = data_inst;
2830 alloc_ftn = mono_object_new_specific;
2833 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2837 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2839 MonoInst *alloc, *ins;
2841 if (mono_class_is_nullable (klass)) {
2842 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2843 return mono_emit_method_call (cfg, method, &val, NULL);
2846 alloc = handle_alloc (cfg, klass, TRUE);
2848 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2854 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2856 MonoInst *alloc, *ins;
2858 if (mono_class_is_nullable (klass)) {
2859 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2860 /* FIXME: What if the class is shared? We might not
2861 have to get the method address from the RGCTX. */
2862 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2863 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2864 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2866 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2868 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2870 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2877 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2879 MonoBasicBlock *is_null_bb;
2880 int obj_reg = src->dreg;
2881 int vtable_reg = alloc_preg (cfg);
2883 NEW_BBLOCK (cfg, is_null_bb);
2885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2888 save_cast_details (cfg, klass, obj_reg);
2890 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2892 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2894 int klass_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2899 /* the remoting code is broken, access the class for now */
2901 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2907 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2910 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2914 MONO_START_BB (cfg, is_null_bb);
2916 reset_cast_details (cfg);
2922 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2925 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2926 int obj_reg = src->dreg;
2927 int vtable_reg = alloc_preg (cfg);
2928 int res_reg = alloc_preg (cfg);
2930 NEW_BBLOCK (cfg, is_null_bb);
2931 NEW_BBLOCK (cfg, false_bb);
2932 NEW_BBLOCK (cfg, end_bb);
2934 /* Do the assignment at the beginning, so the other assignment can be if converted */
2935 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2936 ins->type = STACK_OBJ;
2939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2942 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2944 /* the is_null_bb target simply copies the input register to the output */
2945 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2947 int klass_reg = alloc_preg (cfg);
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2952 int rank_reg = alloc_preg (cfg);
2953 int eclass_reg = alloc_preg (cfg);
2955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2957 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2958 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2959 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2960 if (klass->cast_class == mono_defaults.object_class) {
2961 int parent_reg = alloc_preg (cfg);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2963 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2964 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2965 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2966 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2967 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2968 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2970 } else if (klass->cast_class == mono_defaults.enum_class) {
2971 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2972 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2973 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2974 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2976 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2977 /* Check that the object is a vector too */
2978 int bounds_reg = alloc_preg (cfg);
2979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2984 /* the is_null_bb target simply copies the input register to the output */
2985 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2987 } else if (mono_class_is_nullable (klass)) {
2988 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2989 /* the is_null_bb target simply copies the input register to the output */
2990 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2992 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2993 /* the remoting code is broken, access the class for now */
2995 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2998 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3004 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3005 /* the is_null_bb target simply copies the input register to the output */
3006 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3011 MONO_START_BB (cfg, false_bb);
3013 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3016 MONO_START_BB (cfg, is_null_bb);
3018 MONO_START_BB (cfg, end_bb);
3024 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3026 /* This opcode takes as input an object reference and a class, and returns:
3027 0) if the object is an instance of the class,
3028 1) if the object is not instance of the class,
3029 2) if the object is a proxy whose type cannot be determined */
3032 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3033 int obj_reg = src->dreg;
3034 int dreg = alloc_ireg (cfg);
3036 int klass_reg = alloc_preg (cfg);
3038 NEW_BBLOCK (cfg, true_bb);
3039 NEW_BBLOCK (cfg, false_bb);
3040 NEW_BBLOCK (cfg, false2_bb);
3041 NEW_BBLOCK (cfg, end_bb);
3042 NEW_BBLOCK (cfg, no_proxy_bb);
3044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3047 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3048 NEW_BBLOCK (cfg, interface_fail_bb);
3050 tmp_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3052 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3053 MONO_START_BB (cfg, interface_fail_bb);
3054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3056 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3058 tmp_reg = alloc_preg (cfg);
3059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3063 tmp_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3067 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3068 tmp_reg = alloc_preg (cfg);
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3077 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3080 MONO_START_BB (cfg, no_proxy_bb);
3082 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3085 MONO_START_BB (cfg, false_bb);
3087 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3088 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3090 MONO_START_BB (cfg, false2_bb);
3092 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3093 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3095 MONO_START_BB (cfg, true_bb);
3097 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3099 MONO_START_BB (cfg, end_bb);
3102 MONO_INST_NEW (cfg, ins, OP_ICONST);
3104 ins->type = STACK_I4;
3110 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3112 /* This opcode takes as input an object reference and a class, and returns:
3113 0) if the object is an instance of the class,
3114 1) if the object is a proxy whose type cannot be determined
3115 an InvalidCastException exception is thrown otherwhise*/
3118 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3119 int obj_reg = src->dreg;
3120 int dreg = alloc_ireg (cfg);
3121 int tmp_reg = alloc_preg (cfg);
3122 int klass_reg = alloc_preg (cfg);
3124 NEW_BBLOCK (cfg, end_bb);
3125 NEW_BBLOCK (cfg, ok_result_bb);
3127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3128 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3130 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3131 NEW_BBLOCK (cfg, interface_fail_bb);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3134 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3135 MONO_START_BB (cfg, interface_fail_bb);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3138 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3140 tmp_reg = alloc_preg (cfg);
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3143 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3145 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3149 NEW_BBLOCK (cfg, no_proxy_bb);
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3153 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3155 tmp_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3159 tmp_reg = alloc_preg (cfg);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3164 NEW_BBLOCK (cfg, fail_1_bb);
3166 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3168 MONO_START_BB (cfg, fail_1_bb);
3170 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3173 MONO_START_BB (cfg, no_proxy_bb);
3175 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3178 MONO_START_BB (cfg, ok_result_bb);
3180 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3182 MONO_START_BB (cfg, end_bb);
3185 MONO_INST_NEW (cfg, ins, OP_ICONST);
3187 ins->type = STACK_I4;
3192 static G_GNUC_UNUSED MonoInst*
3193 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3195 gpointer *trampoline;
3196 MonoInst *obj, *method_ins, *tramp_ins;
3200 obj = handle_alloc (cfg, klass, FALSE);
3202 /* Inline the contents of mono_delegate_ctor */
3204 /* Set target field */
3205 /* Optimize away setting of NULL target */
3206 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3207 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3209 /* Set method field */
3210 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3214 * To avoid looking up the compiled code belonging to the target method
3215 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3216 * store it, and we fill it after the method has been compiled.
3218 if (!cfg->compile_aot && !method->dynamic) {
3219 MonoInst *code_slot_ins;
3221 domain = mono_domain_get ();
3222 mono_domain_lock (domain);
3223 if (!domain_jit_info (domain)->method_code_hash)
3224 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3225 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3227 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3228 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3230 mono_domain_unlock (domain);
3232 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3233 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3236 /* Set invoke_impl field */
3237 if (cfg->compile_aot) {
3238 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3240 trampoline = mono_create_delegate_trampoline (klass);
3241 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3245 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3251 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3253 MonoJitICallInfo *info;
3255 /* Need to register the icall so it gets an icall wrapper */
3256 info = mono_get_array_new_va_icall (rank);
3258 cfg->flags |= MONO_CFG_HAS_VARARGS;
3260 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3261 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3265 mono_emit_load_got_addr (MonoCompile *cfg)
3267 MonoInst *getaddr, *dummy_use;
3269 if (!cfg->got_var || cfg->got_var_allocated)
3272 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3273 getaddr->dreg = cfg->got_var->dreg;
3275 /* Add it to the start of the first bblock */
3276 if (cfg->bb_entry->code) {
3277 getaddr->next = cfg->bb_entry->code;
3278 cfg->bb_entry->code = getaddr;
3281 MONO_ADD_INS (cfg->bb_entry, getaddr);
3283 cfg->got_var_allocated = TRUE;
3286 * Add a dummy use to keep the got_var alive, since real uses might
3287 * only be generated by the back ends.
3288 * Add it to end_bblock, so the variable's lifetime covers the whole
3290 * It would be better to make the usage of the got var explicit in all
3291 * cases when the backend needs it (i.e. calls, throw etc.), so this
3292 * wouldn't be needed.
3294 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3295 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3298 static int inline_limit;
3299 static gboolean inline_limit_inited;
3302 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3304 MonoMethodHeader *header = mono_method_get_header (method);
3306 #ifdef MONO_ARCH_SOFT_FLOAT
3307 MonoMethodSignature *sig = mono_method_signature (method);
3311 if (cfg->generic_sharing_context)
3314 #ifdef MONO_ARCH_HAVE_LMF_OPS
3315 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3316 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3317 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3321 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3322 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3323 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3324 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3325 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3326 (method->klass->marshalbyref) ||
3327 !header || header->num_clauses)
3330 /* also consider num_locals? */
3331 /* Do the size check early to avoid creating vtables */
3332 if (!inline_limit_inited) {
3333 if (getenv ("MONO_INLINELIMIT"))
3334 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3336 inline_limit = INLINE_LENGTH_LIMIT;
3337 inline_limit_inited = TRUE;
3339 if (header->code_size >= inline_limit)
3343 * if we can initialize the class of the method right away, we do,
3344 * otherwise we don't allow inlining if the class needs initialization,
3345 * since it would mean inserting a call to mono_runtime_class_init()
3346 * inside the inlined code
3348 if (!(cfg->opt & MONO_OPT_SHARED)) {
3349 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3350 if (cfg->run_cctors && method->klass->has_cctor) {
3351 if (!method->klass->runtime_info)
3352 /* No vtable created yet */
3354 vtable = mono_class_vtable (cfg->domain, method->klass);
3357 /* This makes so that inline cannot trigger */
3358 /* .cctors: too many apps depend on them */
3359 /* running with a specific order... */
3360 if (! vtable->initialized)
3362 mono_runtime_class_init (vtable);
3364 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3365 if (!method->klass->runtime_info)
3366 /* No vtable created yet */
3368 vtable = mono_class_vtable (cfg->domain, method->klass);
3371 if (!vtable->initialized)
3376 * If we're compiling for shared code
3377 * the cctor will need to be run at aot method load time, for example,
3378 * or at the end of the compilation of the inlining method.
3380 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3385 * CAS - do not inline methods with declarative security
3386 * Note: this has to be before any possible return TRUE;
3388 if (mono_method_has_declsec (method))
3391 #ifdef MONO_ARCH_SOFT_FLOAT
3393 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3395 for (i = 0; i < sig->param_count; ++i)
3396 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3404 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3406 if (vtable->initialized && !cfg->compile_aot)
3409 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3412 if (!mono_class_needs_cctor_run (vtable->klass, method))
3415 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3416 /* The initialization is already done before the method is called */
3423 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3427 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3429 mono_class_init (klass);
3430 size = mono_class_array_element_size (klass);
3432 mult_reg = alloc_preg (cfg);
3433 array_reg = arr->dreg;
3434 index_reg = index->dreg;
3436 #if SIZEOF_REGISTER == 8
3437 /* The array reg is 64 bits but the index reg is only 32 */
3438 index2_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3441 index2_reg = index_reg;
3444 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3446 #if defined(__i386__) || defined(__x86_64__)
3447 if (size == 1 || size == 2 || size == 4 || size == 8) {
3448 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3450 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3451 ins->type = STACK_PTR;
3457 add_reg = alloc_preg (cfg);
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3461 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3462 ins->type = STACK_PTR;
3463 MONO_ADD_INS (cfg->cbb, ins);
3468 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3470 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3472 int bounds_reg = alloc_preg (cfg);
3473 int add_reg = alloc_preg (cfg);
3474 int mult_reg = alloc_preg (cfg);
3475 int mult2_reg = alloc_preg (cfg);
3476 int low1_reg = alloc_preg (cfg);
3477 int low2_reg = alloc_preg (cfg);
3478 int high1_reg = alloc_preg (cfg);
3479 int high2_reg = alloc_preg (cfg);
3480 int realidx1_reg = alloc_preg (cfg);
3481 int realidx2_reg = alloc_preg (cfg);
3482 int sum_reg = alloc_preg (cfg);
3487 mono_class_init (klass);
3488 size = mono_class_array_element_size (klass);
3490 index1 = index_ins1->dreg;
3491 index2 = index_ins2->dreg;
3493 /* range checking */
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3495 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3498 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3499 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3501 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3503 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3506 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3507 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3509 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3511 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3513 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3516 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3517 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3519 ins->type = STACK_MP;
3521 MONO_ADD_INS (cfg->cbb, ins);
3528 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3532 MonoMethod *addr_method;
3535 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3538 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3540 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3541 /* emit_ldelema_2 depends on OP_LMUL */
3542 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3543 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3547 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3548 addr_method = mono_marshal_get_array_address (rank, element_size);
3549 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3555 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3557 MonoInst *ins = NULL;
3559 static MonoClass *runtime_helpers_class = NULL;
3560 if (! runtime_helpers_class)
3561 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3562 "System.Runtime.CompilerServices", "RuntimeHelpers");
3564 if (cmethod->klass == mono_defaults.string_class) {
3565 if (strcmp (cmethod->name, "get_Chars") == 0) {
3566 int dreg = alloc_ireg (cfg);
3567 int index_reg = alloc_preg (cfg);
3568 int mult_reg = alloc_preg (cfg);
3569 int add_reg = alloc_preg (cfg);
3571 #if SIZEOF_REGISTER == 8
3572 /* The array reg is 64 bits but the index reg is only 32 */
3573 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3575 index_reg = args [1]->dreg;
3577 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3579 #if defined(__i386__) || defined(__x86_64__)
3580 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3581 add_reg = ins->dreg;
3582 /* Avoid a warning */
3584 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3588 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3589 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3590 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3592 type_from_op (ins, NULL, NULL);
3594 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3595 int dreg = alloc_ireg (cfg);
3596 /* Decompose later to allow more optimizations */
3597 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3598 ins->type = STACK_I4;
3599 cfg->cbb->has_array_access = TRUE;
3600 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3603 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3604 int mult_reg = alloc_preg (cfg);
3605 int add_reg = alloc_preg (cfg);
3607 /* The corlib functions check for oob already. */
3608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3609 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3613 } else if (cmethod->klass == mono_defaults.object_class) {
3615 if (strcmp (cmethod->name, "GetType") == 0) {
3616 int dreg = alloc_preg (cfg);
3617 int vt_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3619 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3620 type_from_op (ins, NULL, NULL);
3623 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3624 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3625 int dreg = alloc_ireg (cfg);
3626 int t1 = alloc_ireg (cfg);
3628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3629 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3630 ins->type = STACK_I4;
3634 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3635 MONO_INST_NEW (cfg, ins, OP_NOP);
3636 MONO_ADD_INS (cfg->cbb, ins);
3640 } else if (cmethod->klass == mono_defaults.array_class) {
3641 if (cmethod->name [0] != 'g')
3644 if (strcmp (cmethod->name, "get_Rank") == 0) {
3645 int dreg = alloc_ireg (cfg);
3646 int vtable_reg = alloc_preg (cfg);
3647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3648 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3649 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3650 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3651 type_from_op (ins, NULL, NULL);
3654 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3655 int dreg = alloc_ireg (cfg);
3657 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3658 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3659 type_from_op (ins, NULL, NULL);
3664 } else if (cmethod->klass == runtime_helpers_class) {
3666 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3667 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3671 } else if (cmethod->klass == mono_defaults.thread_class) {
3672 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3673 ins->dreg = alloc_preg (cfg);
3674 ins->type = STACK_OBJ;
3675 MONO_ADD_INS (cfg->cbb, ins);
3677 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3678 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3679 MONO_ADD_INS (cfg->cbb, ins);
3681 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3682 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3683 MONO_ADD_INS (cfg->cbb, ins);
3686 } else if (cmethod->klass == mono_defaults.monitor_class) {
3687 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3688 if (strcmp (cmethod->name, "Enter") == 0) {
3691 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3692 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3693 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3694 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3696 return (MonoInst*)call;
3697 } else if (strcmp (cmethod->name, "Exit") == 0) {
3700 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3701 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3702 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3703 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3705 return (MonoInst*)call;
3707 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3708 MonoMethod *fast_method = NULL;
3710 /* Avoid infinite recursion */
3711 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3712 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3713 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3716 if (strcmp (cmethod->name, "Enter") == 0 ||
3717 strcmp (cmethod->name, "Exit") == 0)
3718 fast_method = mono_monitor_get_fast_path (cmethod);
3722 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3724 } else if (mini_class_is_system_array (cmethod->klass) &&
3725 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3726 MonoInst *addr, *store, *load;
3727 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3729 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3733 } else if (cmethod->klass->image == mono_defaults.corlib &&
3734 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3735 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3738 #if SIZEOF_REGISTER == 8
3739 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3740 /* 64 bit reads are already atomic */
3741 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3742 ins->dreg = mono_alloc_preg (cfg);
3743 ins->inst_basereg = args [0]->dreg;
3744 ins->inst_offset = 0;
3745 MONO_ADD_INS (cfg->cbb, ins);
3749 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3750 if (strcmp (cmethod->name, "Increment") == 0) {
3751 MonoInst *ins_iconst;
3754 if (fsig->params [0]->type == MONO_TYPE_I4)
3755 opcode = OP_ATOMIC_ADD_NEW_I4;
3756 #if SIZEOF_REGISTER == 8
3757 else if (fsig->params [0]->type == MONO_TYPE_I8)
3758 opcode = OP_ATOMIC_ADD_NEW_I8;
3761 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3762 ins_iconst->inst_c0 = 1;
3763 ins_iconst->dreg = mono_alloc_ireg (cfg);
3764 MONO_ADD_INS (cfg->cbb, ins_iconst);
3766 MONO_INST_NEW (cfg, ins, opcode);
3767 ins->dreg = mono_alloc_ireg (cfg);
3768 ins->inst_basereg = args [0]->dreg;
3769 ins->inst_offset = 0;
3770 ins->sreg2 = ins_iconst->dreg;
3771 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3772 MONO_ADD_INS (cfg->cbb, ins);
3774 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3775 MonoInst *ins_iconst;
3778 if (fsig->params [0]->type == MONO_TYPE_I4)
3779 opcode = OP_ATOMIC_ADD_NEW_I4;
3780 #if SIZEOF_REGISTER == 8
3781 else if (fsig->params [0]->type == MONO_TYPE_I8)
3782 opcode = OP_ATOMIC_ADD_NEW_I8;
3785 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3786 ins_iconst->inst_c0 = -1;
3787 ins_iconst->dreg = mono_alloc_ireg (cfg);
3788 MONO_ADD_INS (cfg->cbb, ins_iconst);
3790 MONO_INST_NEW (cfg, ins, opcode);
3791 ins->dreg = mono_alloc_ireg (cfg);
3792 ins->inst_basereg = args [0]->dreg;
3793 ins->inst_offset = 0;
3794 ins->sreg2 = ins_iconst->dreg;
3795 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3796 MONO_ADD_INS (cfg->cbb, ins);
3798 } else if (strcmp (cmethod->name, "Add") == 0) {
3801 if (fsig->params [0]->type == MONO_TYPE_I4)
3802 opcode = OP_ATOMIC_ADD_NEW_I4;
3803 #if SIZEOF_REGISTER == 8
3804 else if (fsig->params [0]->type == MONO_TYPE_I8)
3805 opcode = OP_ATOMIC_ADD_NEW_I8;
3809 MONO_INST_NEW (cfg, ins, opcode);
3810 ins->dreg = mono_alloc_ireg (cfg);
3811 ins->inst_basereg = args [0]->dreg;
3812 ins->inst_offset = 0;
3813 ins->sreg2 = args [1]->dreg;
3814 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3815 MONO_ADD_INS (cfg->cbb, ins);
3818 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3820 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3821 if (strcmp (cmethod->name, "Exchange") == 0) {
3824 if (fsig->params [0]->type == MONO_TYPE_I4)
3825 opcode = OP_ATOMIC_EXCHANGE_I4;
3826 #if SIZEOF_REGISTER == 8
3827 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3828 (fsig->params [0]->type == MONO_TYPE_I) ||
3829 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3830 opcode = OP_ATOMIC_EXCHANGE_I8;
3832 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3833 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3834 opcode = OP_ATOMIC_EXCHANGE_I4;
3839 MONO_INST_NEW (cfg, ins, opcode);
3840 ins->dreg = mono_alloc_ireg (cfg);
3841 ins->inst_basereg = args [0]->dreg;
3842 ins->inst_offset = 0;
3843 ins->sreg2 = args [1]->dreg;
3844 MONO_ADD_INS (cfg->cbb, ins);
3846 switch (fsig->params [0]->type) {
3848 ins->type = STACK_I4;
3852 ins->type = STACK_I8;
3854 case MONO_TYPE_OBJECT:
3855 ins->type = STACK_OBJ;
3858 g_assert_not_reached ();
3861 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3863 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3865 * Can't implement CompareExchange methods this way since they have
3866 * three arguments. We can implement one of the common cases, where the new
3867 * value is a constant.
3869 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3870 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3871 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3872 && args [2]->opcode == OP_ICONST) {
3873 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3874 ins->dreg = alloc_ireg (cfg);
3875 ins->sreg1 = args [0]->dreg;
3876 ins->sreg2 = args [1]->dreg;
3877 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3878 ins->type = STACK_I4;
3879 MONO_ADD_INS (cfg->cbb, ins);
3881 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3883 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3887 } else if (cmethod->klass->image == mono_defaults.corlib) {
3888 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3889 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3890 MONO_INST_NEW (cfg, ins, OP_BREAK);
3891 MONO_ADD_INS (cfg->cbb, ins);
3894 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3895 && strcmp (cmethod->klass->name, "Environment") == 0) {
3896 #ifdef PLATFORM_WIN32
3897 EMIT_NEW_ICONST (cfg, ins, 1);
3899 EMIT_NEW_ICONST (cfg, ins, 0);
3903 } else if (cmethod->klass == mono_defaults.math_class) {
3905 * There is general branches code for Min/Max, but it does not work for
3907 * http://everything2.com/?node_id=1051618
3911 #ifdef MONO_ARCH_SIMD_INTRINSICS
3912 if (cfg->opt & MONO_OPT_SIMD) {
3913 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3919 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3923 * This entry point could be used later for arbitrary method
3926 inline static MonoInst*
3927 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3928 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3930 if (method->klass == mono_defaults.string_class) {
3931 /* managed string allocation support */
3932 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3933 MonoInst *iargs [2];
3934 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3935 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3938 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3939 iargs [1] = args [0];
3940 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3947 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3949 MonoInst *store, *temp;
3952 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3953 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3956 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3957 * would be different than the MonoInst's used to represent arguments, and
3958 * the ldelema implementation can't deal with that.
3959 * Solution: When ldelema is used on an inline argument, create a var for
3960 * it, emit ldelema on that var, and emit the saving code below in
3961 * inline_method () if needed.
3963 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3964 cfg->args [i] = temp;
3965 /* This uses cfg->args [i] which is set by the preceeding line */
3966 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3967 store->cil_code = sp [0]->cil_code;
3972 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3973 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3975 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3977 check_inline_called_method_name_limit (MonoMethod *called_method)
3980 static char *limit = NULL;
3982 if (limit == NULL) {
3983 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3985 if (limit_string != NULL)
3986 limit = limit_string;
3988 limit = (char *) "";
3991 if (limit [0] != '\0') {
3992 char *called_method_name = mono_method_full_name (called_method, TRUE);
3994 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
3995 g_free (called_method_name);
3997 //return (strncmp_result <= 0);
3998 return (strncmp_result == 0);
4005 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4007 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4010 static char *limit = NULL;
4012 if (limit == NULL) {
4013 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4014 if (limit_string != NULL) {
4015 limit = limit_string;
4017 limit = (char *) "";
4021 if (limit [0] != '\0') {
4022 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4024 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4025 g_free (caller_method_name);
4027 //return (strncmp_result <= 0);
4028 return (strncmp_result == 0);
4036 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4037 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4039 MonoInst *ins, *rvar = NULL;
4040 MonoMethodHeader *cheader;
4041 MonoBasicBlock *ebblock, *sbblock;
4043 MonoMethod *prev_inlined_method;
4044 MonoInst **prev_locals, **prev_args;
4045 MonoType **prev_arg_types;
4046 guint prev_real_offset;
4047 GHashTable *prev_cbb_hash;
4048 MonoBasicBlock **prev_cil_offset_to_bb;
4049 MonoBasicBlock *prev_cbb;
4050 unsigned char* prev_cil_start;
4051 guint32 prev_cil_offset_to_bb_len;
4052 MonoMethod *prev_current_method;
4053 MonoGenericContext *prev_generic_context;
4055 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4057 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4058 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4061 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4062 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4066 if (cfg->verbose_level > 2)
4067 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4069 if (!cmethod->inline_info) {
4070 mono_jit_stats.inlineable_methods++;
4071 cmethod->inline_info = 1;
4073 /* allocate space to store the return value */
4074 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4075 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4078 /* allocate local variables */
4079 cheader = mono_method_get_header (cmethod);
4080 prev_locals = cfg->locals;
4081 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4082 for (i = 0; i < cheader->num_locals; ++i)
4083 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4085 /* allocate start and end blocks */
4086 /* This is needed so if the inline is aborted, we can clean up */
4087 NEW_BBLOCK (cfg, sbblock);
4088 sbblock->real_offset = real_offset;
4090 NEW_BBLOCK (cfg, ebblock);
4091 ebblock->block_num = cfg->num_bblocks++;
4092 ebblock->real_offset = real_offset;
4094 prev_args = cfg->args;
4095 prev_arg_types = cfg->arg_types;
4096 prev_inlined_method = cfg->inlined_method;
4097 cfg->inlined_method = cmethod;
4098 cfg->ret_var_set = FALSE;
4099 prev_real_offset = cfg->real_offset;
4100 prev_cbb_hash = cfg->cbb_hash;
4101 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4102 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4103 prev_cil_start = cfg->cil_start;
4104 prev_cbb = cfg->cbb;
4105 prev_current_method = cfg->current_method;
4106 prev_generic_context = cfg->generic_context;
4108 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4110 cfg->inlined_method = prev_inlined_method;
4111 cfg->real_offset = prev_real_offset;
4112 cfg->cbb_hash = prev_cbb_hash;
4113 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4114 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4115 cfg->cil_start = prev_cil_start;
4116 cfg->locals = prev_locals;
4117 cfg->args = prev_args;
4118 cfg->arg_types = prev_arg_types;
4119 cfg->current_method = prev_current_method;
4120 cfg->generic_context = prev_generic_context;
4122 if ((costs >= 0 && costs < 60) || inline_allways) {
4123 if (cfg->verbose_level > 2)
4124 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4126 mono_jit_stats.inlined_methods++;
4128 /* always add some code to avoid block split failures */
4129 MONO_INST_NEW (cfg, ins, OP_NOP);
4130 MONO_ADD_INS (prev_cbb, ins);
4132 prev_cbb->next_bb = sbblock;
4133 link_bblock (cfg, prev_cbb, sbblock);
4136 * Get rid of the begin and end bblocks if possible to aid local
4139 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4141 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4142 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4144 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4145 MonoBasicBlock *prev = ebblock->in_bb [0];
4146 mono_merge_basic_blocks (cfg, prev, ebblock);
4148 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4149 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4150 cfg->cbb = prev_cbb;
4158 * If the inlined method contains only a throw, then the ret var is not
4159 * set, so set it to a dummy value.
4161 if (!cfg->ret_var_set) {
4162 static double r8_0 = 0.0;
4164 switch (rvar->type) {
4166 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4169 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4174 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4177 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4178 ins->type = STACK_R8;
4179 ins->inst_p0 = (void*)&r8_0;
4180 ins->dreg = rvar->dreg;
4181 MONO_ADD_INS (cfg->cbb, ins);
4184 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4187 g_assert_not_reached ();
4191 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4196 if (cfg->verbose_level > 2)
4197 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4198 cfg->exception_type = MONO_EXCEPTION_NONE;
4199 mono_loader_clear_error ();
4201 /* This gets rid of the newly added bblocks */
4202 cfg->cbb = prev_cbb;
4208 * Some of these comments may well be out-of-date.
4209 * Design decisions: we do a single pass over the IL code (and we do bblock
4210 * splitting/merging in the few cases when it's required: a back jump to an IL
4211 * address that was not already seen as bblock starting point).
4212 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4213 * Complex operations are decomposed in simpler ones right away. We need to let the
4214 * arch-specific code peek and poke inside this process somehow (except when the
4215 * optimizations can take advantage of the full semantic info of coarse opcodes).
4216 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4217 * MonoInst->opcode initially is the IL opcode or some simplification of that
4218 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4219 * opcode with value bigger than OP_LAST.
4220 * At this point the IR can be handed over to an interpreter, a dumb code generator
4221 * or to the optimizing code generator that will translate it to SSA form.
4223 * Profiling directed optimizations.
4224 * We may compile by default with few or no optimizations and instrument the code
4225 * or the user may indicate what methods to optimize the most either in a config file
4226 * or through repeated runs where the compiler applies offline the optimizations to
4227 * each method and then decides if it was worth it.
4230 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4231 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4232 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4233 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4234 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4235 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4236 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4237 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4239 /* offset from br.s -> br like opcodes */
4240 #define BIG_BRANCH_OFFSET 13
4243 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4245 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4247 return b == NULL || b == bb;
4251 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4253 unsigned char *ip = start;
4254 unsigned char *target;
4257 MonoBasicBlock *bblock;
4258 const MonoOpcode *opcode;
4261 cli_addr = ip - start;
4262 i = mono_opcode_value ((const guint8 **)&ip, end);
4265 opcode = &mono_opcodes [i];
4266 switch (opcode->argument) {
4267 case MonoInlineNone:
4270 case MonoInlineString:
4271 case MonoInlineType:
4272 case MonoInlineField:
4273 case MonoInlineMethod:
4276 case MonoShortInlineR:
4283 case MonoShortInlineVar:
4284 case MonoShortInlineI:
4287 case MonoShortInlineBrTarget:
4288 target = start + cli_addr + 2 + (signed char)ip [1];
4289 GET_BBLOCK (cfg, bblock, target);
4292 GET_BBLOCK (cfg, bblock, ip);
4294 case MonoInlineBrTarget:
4295 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4296 GET_BBLOCK (cfg, bblock, target);
4299 GET_BBLOCK (cfg, bblock, ip);
4301 case MonoInlineSwitch: {
4302 guint32 n = read32 (ip + 1);
4305 cli_addr += 5 + 4 * n;
4306 target = start + cli_addr;
4307 GET_BBLOCK (cfg, bblock, target);
4309 for (j = 0; j < n; ++j) {
4310 target = start + cli_addr + (gint32)read32 (ip);
4311 GET_BBLOCK (cfg, bblock, target);
4321 g_assert_not_reached ();
4324 if (i == CEE_THROW) {
4325 unsigned char *bb_start = ip - 1;
4327 /* Find the start of the bblock containing the throw */
4329 while ((bb_start >= start) && !bblock) {
4330 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4334 bblock->out_of_line = 1;
4343 static inline MonoMethod *
4344 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4348 if (m->wrapper_type != MONO_WRAPPER_NONE)
4349 return mono_method_get_wrapper_data (m, token);
4351 method = mono_get_method_full (m->klass->image, token, klass, context);
4356 static inline MonoMethod *
4357 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4359 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4361 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4367 static inline MonoClass*
4368 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4372 if (method->wrapper_type != MONO_WRAPPER_NONE)
4373 klass = mono_method_get_wrapper_data (method, token);
4375 klass = mono_class_get_full (method->klass->image, token, context);
4377 mono_class_init (klass);
4382 * Returns TRUE if the JIT should abort inlining because "callee"
4383 * is influenced by security attributes.
4386 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4390 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4394 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4395 if (result == MONO_JIT_SECURITY_OK)
4398 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4399 /* Generate code to throw a SecurityException before the actual call/link */
4400 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4403 NEW_ICONST (cfg, args [0], 4);
4404 NEW_METHODCONST (cfg, args [1], caller);
4405 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4406 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4407 /* don't hide previous results */
4408 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4409 cfg->exception_data = result;
4417 method_access_exception (void)
4419 static MonoMethod *method = NULL;
4422 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4423 method = mono_class_get_method_from_name (secman->securitymanager,
4424 "MethodAccessException", 2);
4431 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4432 MonoBasicBlock *bblock, unsigned char *ip)
4434 MonoMethod *thrower = method_access_exception ();
4437 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4438 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4439 mono_emit_method_call (cfg, thrower, args, NULL);
4443 verification_exception (void)
4445 static MonoMethod *method = NULL;
4448 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4449 method = mono_class_get_method_from_name (secman->securitymanager,
4450 "VerificationException", 0);
4457 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4459 MonoMethod *thrower = verification_exception ();
4461 mono_emit_method_call (cfg, thrower, NULL, NULL);
4465 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4466 MonoBasicBlock *bblock, unsigned char *ip)
4468 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4469 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4470 gboolean is_safe = TRUE;
4472 if (!(caller_level >= callee_level ||
4473 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4474 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4479 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4483 method_is_safe (MonoMethod *method)
4486 if (strcmp (method->name, "unsafeMethod") == 0)
4493 * Check that the IL instructions at ip are the array initialization
4494 * sequence and return the pointer to the data and the size.
4497 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4500 * newarr[System.Int32]
4502 * ldtoken field valuetype ...
4503 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4505 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4506 guint32 token = read32 (ip + 7);
4507 guint32 field_token = read32 (ip + 2);
4508 guint32 field_index = field_token & 0xffffff;
4510 const char *data_ptr;
4512 MonoMethod *cmethod;
4513 MonoClass *dummy_class;
4514 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4520 *out_field_token = field_token;
4522 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4525 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4527 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4528 case MONO_TYPE_BOOLEAN:
4532 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4533 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4534 case MONO_TYPE_CHAR:
4544 return NULL; /* stupid ARM FP swapped format */
4554 if (size > mono_type_size (field->type, &dummy_align))
4557 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4558 if (!method->klass->image->dynamic) {
4559 field_index = read32 (ip + 2) & 0xffffff;
4560 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4561 data_ptr = mono_image_rva_map (method->klass->image, rva);
4562 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4563 /* for aot code we do the lookup on load */
4564 if (aot && data_ptr)
4565 return GUINT_TO_POINTER (rva);
4567 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4569 data_ptr = mono_field_get_data (field);
4577 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4579 char *method_fname = mono_method_full_name (method, TRUE);
4582 if (mono_method_get_header (method)->code_size == 0)
4583 method_code = g_strdup ("method body is empty.");
4585 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4586 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4587 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4588 g_free (method_fname);
4589 g_free (method_code);
4593 set_exception_object (MonoCompile *cfg, MonoException *exception)
4595 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4596 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4597 cfg->exception_ptr = exception;
4601 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4605 if (cfg->generic_sharing_context)
4606 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4608 type = &klass->byval_arg;
4609 return MONO_TYPE_IS_REFERENCE (type);
4613 * mono_decompose_array_access_opts:
4615 * Decompose array access opcodes.
4616 * This should be in decompose.c, but it emits calls so it has to stay here until
4617 * the old JIT is gone.
4620 mono_decompose_array_access_opts (MonoCompile *cfg)
4622 MonoBasicBlock *bb, *first_bb;
4625 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4626 * can be executed anytime. It should be run before decompose_long
4630 * Create a dummy bblock and emit code into it so we can use the normal
4631 * code generation macros.
4633 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4634 first_bb = cfg->cbb;
4636 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4638 MonoInst *prev = NULL;
4640 MonoInst *iargs [3];
4643 if (!bb->has_array_access)
4646 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4648 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4654 for (ins = bb->code; ins; ins = ins->next) {
4655 switch (ins->opcode) {
4657 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4658 G_STRUCT_OFFSET (MonoArray, max_length));
4659 MONO_ADD_INS (cfg->cbb, dest);
4661 case OP_BOUNDS_CHECK:
4662 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4665 if (cfg->opt & MONO_OPT_SHARED) {
4666 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4667 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4668 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4669 iargs [2]->dreg = ins->sreg1;
4671 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4672 dest->dreg = ins->dreg;
4674 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4677 NEW_VTABLECONST (cfg, iargs [0], vtable);
4678 MONO_ADD_INS (cfg->cbb, iargs [0]);
4679 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4680 iargs [1]->dreg = ins->sreg1;
4682 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4683 dest->dreg = ins->dreg;
4687 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4688 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4689 MONO_ADD_INS (cfg->cbb, dest);
4695 g_assert (cfg->cbb == first_bb);
4697 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4698 /* Replace the original instruction with the new code sequence */
4700 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4701 first_bb->code = first_bb->last_ins = NULL;
4702 first_bb->in_count = first_bb->out_count = 0;
4703 cfg->cbb = first_bb;
4710 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4720 #ifdef MONO_ARCH_SOFT_FLOAT
4723 * mono_decompose_soft_float:
4725 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4726 * similar to long support on 32 bit platforms. 32 bit float values require special
4727 * handling when used as locals, arguments, and in calls.
4728 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4731 mono_decompose_soft_float (MonoCompile *cfg)
4733 MonoBasicBlock *bb, *first_bb;
4736 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4740 * Create a dummy bblock and emit code into it so we can use the normal
4741 * code generation macros.
4743 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4744 first_bb = cfg->cbb;
4746 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4748 MonoInst *prev = NULL;
4751 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4753 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4759 for (ins = bb->code; ins; ins = ins->next) {
4760 const char *spec = INS_INFO (ins->opcode);
4762 /* Most fp operations are handled automatically by opcode emulation */
4764 switch (ins->opcode) {
4767 d.vald = *(double*)ins->inst_p0;
4768 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4773 /* We load the r8 value */
4774 d.vald = *(float*)ins->inst_p0;
4775 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4779 ins->opcode = OP_LMOVE;
4782 ins->opcode = OP_MOVE;
4783 ins->sreg1 = ins->sreg1 + 1;
4786 ins->opcode = OP_MOVE;
4787 ins->sreg1 = ins->sreg1 + 2;
4790 int reg = ins->sreg1;
4792 ins->opcode = OP_SETLRET;
4794 ins->sreg1 = reg + 1;
4795 ins->sreg2 = reg + 2;
4798 case OP_LOADR8_MEMBASE:
4799 ins->opcode = OP_LOADI8_MEMBASE;
4801 case OP_STORER8_MEMBASE_REG:
4802 ins->opcode = OP_STOREI8_MEMBASE_REG;
4804 case OP_STORER4_MEMBASE_REG: {
4805 MonoInst *iargs [2];
4808 /* Arg 1 is the double value */
4809 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4810 iargs [0]->dreg = ins->sreg1;
4812 /* Arg 2 is the address to store to */
4813 addr_reg = mono_alloc_preg (cfg);
4814 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4815 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4819 case OP_LOADR4_MEMBASE: {
4820 MonoInst *iargs [1];
4824 addr_reg = mono_alloc_preg (cfg);
4825 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4826 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4827 conv->dreg = ins->dreg;
4832 case OP_FCALL_MEMBASE: {
4833 MonoCallInst *call = (MonoCallInst*)ins;
4834 if (call->signature->ret->type == MONO_TYPE_R4) {
4835 MonoCallInst *call2;
4836 MonoInst *iargs [1];
4839 /* Convert the call into a call returning an int */
4840 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4841 memcpy (call2, call, sizeof (MonoCallInst));
4842 switch (ins->opcode) {
4844 call2->inst.opcode = OP_CALL;
4847 call2->inst.opcode = OP_CALL_REG;
4849 case OP_FCALL_MEMBASE:
4850 call2->inst.opcode = OP_CALL_MEMBASE;
4853 g_assert_not_reached ();
4855 call2->inst.dreg = mono_alloc_ireg (cfg);
4856 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4858 /* FIXME: Optimize this */
4860 /* Emit an r4->r8 conversion */
4861 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4862 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4863 conv->dreg = ins->dreg;
4865 switch (ins->opcode) {
4867 ins->opcode = OP_LCALL;
4870 ins->opcode = OP_LCALL_REG;
4872 case OP_FCALL_MEMBASE:
4873 ins->opcode = OP_LCALL_MEMBASE;
4876 g_assert_not_reached ();
4882 MonoJitICallInfo *info;
4883 MonoInst *iargs [2];
4884 MonoInst *call, *cmp, *br;
4886 /* Convert fcompare+fbcc to icall+icompare+beq */
4888 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4891 /* Create dummy MonoInst's for the arguments */
4892 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4893 iargs [0]->dreg = ins->sreg1;
4894 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4895 iargs [1]->dreg = ins->sreg2;
4897 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4899 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4900 cmp->sreg1 = call->dreg;
4902 MONO_ADD_INS (cfg->cbb, cmp);
4904 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4905 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4906 br->inst_true_bb = ins->next->inst_true_bb;
4907 br->inst_false_bb = ins->next->inst_false_bb;
4908 MONO_ADD_INS (cfg->cbb, br);
4910 /* The call sequence might include fp ins */
4913 /* Skip fbcc or fccc */
4914 NULLIFY_INS (ins->next);
4922 MonoJitICallInfo *info;
4923 MonoInst *iargs [2];
4926 /* Convert fccc to icall+icompare+iceq */
4928 info = mono_find_jit_opcode_emulation (ins->opcode);
4931 /* Create dummy MonoInst's for the arguments */
4932 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4933 iargs [0]->dreg = ins->sreg1;
4934 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4935 iargs [1]->dreg = ins->sreg2;
4937 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4940 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4942 /* The call sequence might include fp ins */
4947 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4948 mono_print_ins (ins);
4949 g_assert_not_reached ();
4954 g_assert (cfg->cbb == first_bb);
4956 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4957 /* Replace the original instruction with the new code sequence */
4959 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4960 first_bb->code = first_bb->last_ins = NULL;
4961 first_bb->in_count = first_bb->out_count = 0;
4962 cfg->cbb = first_bb;
4969 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4972 mono_decompose_long_opts (cfg);
4978 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4981 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4982 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
4983 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4984 /* Optimize reg-reg moves away */
4986 * Can't optimize other opcodes, since sp[0] might point to
4987 * the last ins of a decomposed opcode.
4989 sp [0]->dreg = (cfg)->locals [n]->dreg;
4991 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4996 * ldloca inhibits many optimizations so try to get rid of it in common
4999 static inline unsigned char *
5000 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5009 local = read16 (ip + 2);
5013 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5014 gboolean skip = FALSE;
5016 /* From the INITOBJ case */
5017 token = read32 (ip + 2);
5018 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5019 CHECK_TYPELOAD (klass);
5020 if (generic_class_is_reference_type (cfg, klass)) {
5021 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5022 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5023 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5024 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5025 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5038 is_exception_class (MonoClass *class)
5041 if (class == mono_defaults.exception_class)
5043 class = class->parent;
5049 * mono_method_to_ir:
5051 * Translate the .net IL into linear IR.
5054 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5055 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5056 guint inline_offset, gboolean is_virtual_call)
5058 MonoInst *ins, **sp, **stack_start;
5059 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5060 MonoMethod *cmethod, *method_definition;
5061 MonoInst **arg_array;
5062 MonoMethodHeader *header;
5064 guint32 token, ins_flag;
5066 MonoClass *constrained_call = NULL;
5067 unsigned char *ip, *end, *target, *err_pos;
5068 static double r8_0 = 0.0;
5069 MonoMethodSignature *sig;
5070 MonoGenericContext *generic_context = NULL;
5071 MonoGenericContainer *generic_container = NULL;
5072 MonoType **param_types;
5073 int i, n, start_new_bblock, dreg;
5074 int num_calls = 0, inline_costs = 0;
5075 int breakpoint_id = 0;
5077 MonoBoolean security, pinvoke;
5078 MonoSecurityManager* secman = NULL;
5079 MonoDeclSecurityActions actions;
5080 GSList *class_inits = NULL;
5081 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5084 /* serialization and xdomain stuff may need access to private fields and methods */
5085 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5086 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5087 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5088 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5089 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5090 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5092 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5094 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5095 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5096 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5097 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5099 image = method->klass->image;
5100 header = mono_method_get_header (method);
5101 generic_container = mono_method_get_generic_container (method);
5102 sig = mono_method_signature (method);
5103 num_args = sig->hasthis + sig->param_count;
5104 ip = (unsigned char*)header->code;
5105 cfg->cil_start = ip;
5106 end = ip + header->code_size;
5107 mono_jit_stats.cil_code_size += header->code_size;
5109 method_definition = method;
5110 while (method_definition->is_inflated) {
5111 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5112 method_definition = imethod->declaring;
5115 /* SkipVerification is not allowed if core-clr is enabled */
5116 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5118 dont_verify_stloc = TRUE;
5121 if (!dont_verify && mini_method_verify (cfg, method_definition))
5122 goto exception_exit;
5124 if (mono_debug_using_mono_debugger ())
5125 cfg->keep_cil_nops = TRUE;
5127 if (sig->is_inflated)
5128 generic_context = mono_method_get_context (method);
5129 else if (generic_container)
5130 generic_context = &generic_container->context;
5131 cfg->generic_context = generic_context;
5133 if (!cfg->generic_sharing_context)
5134 g_assert (!sig->has_type_parameters);
5136 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5137 g_assert (method->is_inflated);
5138 g_assert (mono_method_get_context (method)->method_inst);
5140 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5141 g_assert (sig->generic_param_count);
5143 if (cfg->method == method) {
5144 cfg->real_offset = 0;
5146 cfg->real_offset = inline_offset;
5149 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5150 cfg->cil_offset_to_bb_len = header->code_size;
5152 cfg->current_method = method;
5154 if (cfg->verbose_level > 2)
5155 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5157 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5159 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5160 for (n = 0; n < sig->param_count; ++n)
5161 param_types [n + sig->hasthis] = sig->params [n];
5162 cfg->arg_types = param_types;
5164 dont_inline = g_list_prepend (dont_inline, method);
5165 if (cfg->method == method) {
5167 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5168 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5171 NEW_BBLOCK (cfg, start_bblock);
5172 cfg->bb_entry = start_bblock;
5173 start_bblock->cil_code = NULL;
5174 start_bblock->cil_length = 0;
5177 NEW_BBLOCK (cfg, end_bblock);
5178 cfg->bb_exit = end_bblock;
5179 end_bblock->cil_code = NULL;
5180 end_bblock->cil_length = 0;
5181 g_assert (cfg->num_bblocks == 2);
5183 arg_array = cfg->args;
5185 if (header->num_clauses) {
5186 cfg->spvars = g_hash_table_new (NULL, NULL);
5187 cfg->exvars = g_hash_table_new (NULL, NULL);
5189 /* handle exception clauses */
5190 for (i = 0; i < header->num_clauses; ++i) {
5191 MonoBasicBlock *try_bb;
5192 MonoExceptionClause *clause = &header->clauses [i];
5193 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5194 try_bb->real_offset = clause->try_offset;
5195 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5196 tblock->real_offset = clause->handler_offset;
5197 tblock->flags |= BB_EXCEPTION_HANDLER;
5199 link_bblock (cfg, try_bb, tblock);
5201 if (*(ip + clause->handler_offset) == CEE_POP)
5202 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5204 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5205 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5206 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5207 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5208 MONO_ADD_INS (tblock, ins);
5210 /* todo: is a fault block unsafe to optimize? */
5211 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5212 tblock->flags |= BB_EXCEPTION_UNSAFE;
5216 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5218 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5220 /* catch and filter blocks get the exception object on the stack */
5221 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5222 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5223 MonoInst *dummy_use;
5225 /* mostly like handle_stack_args (), but just sets the input args */
5226 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5227 tblock->in_scount = 1;
5228 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5229 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5232 * Add a dummy use for the exvar so its liveness info will be
5236 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5238 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5239 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5240 tblock->flags |= BB_EXCEPTION_HANDLER;
5241 tblock->real_offset = clause->data.filter_offset;
5242 tblock->in_scount = 1;
5243 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5244 /* The filter block shares the exvar with the handler block */
5245 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5246 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5247 MONO_ADD_INS (tblock, ins);
5251 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5252 clause->data.catch_class &&
5253 cfg->generic_sharing_context &&
5254 mono_class_check_context_used (clause->data.catch_class)) {
5256 * In shared generic code with catch
5257 * clauses containing type variables
5258 * the exception handling code has to
5259 * be able to get to the rgctx.
5260 * Therefore we have to make sure that
5261 * the vtable/mrgctx argument (for
5262 * static or generic methods) or the
5263 * "this" argument (for non-static
5264 * methods) are live.
5266 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5267 mini_method_get_context (method)->method_inst ||
5268 method->klass->valuetype) {
5269 mono_get_vtable_var (cfg);
5271 MonoInst *dummy_use;
5273 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5278 arg_array = alloca (sizeof (MonoInst *) * num_args);
5279 cfg->cbb = start_bblock;
5280 cfg->args = arg_array;
5281 mono_save_args (cfg, sig, inline_args);
5284 /* FIRST CODE BLOCK */
5285 NEW_BBLOCK (cfg, bblock);
5286 bblock->cil_code = ip;
5290 ADD_BBLOCK (cfg, bblock);
5292 if (cfg->method == method) {
5293 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5294 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5295 MONO_INST_NEW (cfg, ins, OP_BREAK);
5296 MONO_ADD_INS (bblock, ins);
5300 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5301 secman = mono_security_manager_get_methods ();
5303 security = (secman && mono_method_has_declsec (method));
5304 /* at this point having security doesn't mean we have any code to generate */
5305 if (security && (cfg->method == method)) {
5306 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5307 * And we do not want to enter the next section (with allocation) if we
5308 * have nothing to generate */
5309 security = mono_declsec_get_demands (method, &actions);
5312 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5313 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5315 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5316 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5317 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5319 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5320 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5324 mono_custom_attrs_free (custom);
5327 custom = mono_custom_attrs_from_class (wrapped->klass);
5328 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5332 mono_custom_attrs_free (custom);
5335 /* not a P/Invoke after all */
5340 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5341 /* we use a separate basic block for the initialization code */
5342 NEW_BBLOCK (cfg, init_localsbb);
5343 cfg->bb_init = init_localsbb;
5344 init_localsbb->real_offset = cfg->real_offset;
5345 start_bblock->next_bb = init_localsbb;
5346 init_localsbb->next_bb = bblock;
5347 link_bblock (cfg, start_bblock, init_localsbb);
5348 link_bblock (cfg, init_localsbb, bblock);
5350 cfg->cbb = init_localsbb;
5352 start_bblock->next_bb = bblock;
5353 link_bblock (cfg, start_bblock, bblock);
5356 /* at this point we know, if security is TRUE, that some code needs to be generated */
5357 if (security && (cfg->method == method)) {
5360 mono_jit_stats.cas_demand_generation++;
5362 if (actions.demand.blob) {
5363 /* Add code for SecurityAction.Demand */
5364 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5365 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5366 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5367 mono_emit_method_call (cfg, secman->demand, args, NULL);
5369 if (actions.noncasdemand.blob) {
5370 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5371 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5372 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5373 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5374 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5375 mono_emit_method_call (cfg, secman->demand, args, NULL);
5377 if (actions.demandchoice.blob) {
5378 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5379 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5380 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5381 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5382 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5386 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5388 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5391 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5392 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5393 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5394 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5395 if (!(method->klass && method->klass->image &&
5396 mono_security_core_clr_is_platform_image (method->klass->image))) {
5397 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5401 if (!method_is_safe (method))
5402 emit_throw_verification_exception (cfg, bblock, ip);
5405 if (header->code_size == 0)
5408 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5413 if (cfg->method == method)
5414 mono_debug_init_method (cfg, bblock, breakpoint_id);
5416 for (n = 0; n < header->num_locals; ++n) {
5417 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5422 /* We force the vtable variable here for all shared methods
5423 for the possibility that they might show up in a stack
5424 trace where their exact instantiation is needed. */
5425 if (cfg->generic_sharing_context)
5426 mono_get_vtable_var (cfg);
5428 /* add a check for this != NULL to inlined methods */
5429 if (is_virtual_call) {
5432 NEW_ARGLOAD (cfg, arg_ins, 0);
5433 MONO_ADD_INS (cfg->cbb, arg_ins);
5434 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5435 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5436 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5439 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5440 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5443 start_new_bblock = 0;
5447 if (cfg->method == method)
5448 cfg->real_offset = ip - header->code;
5450 cfg->real_offset = inline_offset;
5455 if (start_new_bblock) {
5456 bblock->cil_length = ip - bblock->cil_code;
5457 if (start_new_bblock == 2) {
5458 g_assert (ip == tblock->cil_code);
5460 GET_BBLOCK (cfg, tblock, ip);
5462 bblock->next_bb = tblock;
5465 start_new_bblock = 0;
5466 for (i = 0; i < bblock->in_scount; ++i) {
5467 if (cfg->verbose_level > 3)
5468 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5469 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5473 g_slist_free (class_inits);
5476 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5477 link_bblock (cfg, bblock, tblock);
5478 if (sp != stack_start) {
5479 handle_stack_args (cfg, stack_start, sp - stack_start);
5481 CHECK_UNVERIFIABLE (cfg);
5483 bblock->next_bb = tblock;
5486 for (i = 0; i < bblock->in_scount; ++i) {
5487 if (cfg->verbose_level > 3)
5488 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5489 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5492 g_slist_free (class_inits);
5497 bblock->real_offset = cfg->real_offset;
5499 if ((cfg->method == method) && cfg->coverage_info) {
5500 guint32 cil_offset = ip - header->code;
5501 cfg->coverage_info->data [cil_offset].cil_code = ip;
5503 /* TODO: Use an increment here */
5504 #if defined(__i386__)
5505 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5506 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5508 MONO_ADD_INS (cfg->cbb, ins);
5510 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5511 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5515 if (cfg->verbose_level > 3)
5516 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5520 if (cfg->keep_cil_nops)
5521 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5523 MONO_INST_NEW (cfg, ins, OP_NOP);
5525 MONO_ADD_INS (bblock, ins);
5528 MONO_INST_NEW (cfg, ins, OP_BREAK);
5530 MONO_ADD_INS (bblock, ins);
5536 CHECK_STACK_OVF (1);
5537 n = (*ip)-CEE_LDARG_0;
5539 EMIT_NEW_ARGLOAD (cfg, ins, n);
5547 CHECK_STACK_OVF (1);
5548 n = (*ip)-CEE_LDLOC_0;
5550 EMIT_NEW_LOCLOAD (cfg, ins, n);
5559 n = (*ip)-CEE_STLOC_0;
5562 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5564 emit_stloc_ir (cfg, sp, header, n);
5571 CHECK_STACK_OVF (1);
5574 EMIT_NEW_ARGLOAD (cfg, ins, n);
5580 CHECK_STACK_OVF (1);
5583 NEW_ARGLOADA (cfg, ins, n);
5584 MONO_ADD_INS (cfg->cbb, ins);
5594 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5596 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5601 CHECK_STACK_OVF (1);
5604 EMIT_NEW_LOCLOAD (cfg, ins, n);
5608 case CEE_LDLOCA_S: {
5609 unsigned char *tmp_ip;
5611 CHECK_STACK_OVF (1);
5612 CHECK_LOCAL (ip [1]);
5614 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5620 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5629 CHECK_LOCAL (ip [1]);
5630 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5632 emit_stloc_ir (cfg, sp, header, ip [1]);
5637 CHECK_STACK_OVF (1);
5638 EMIT_NEW_PCONST (cfg, ins, NULL);
5639 ins->type = STACK_OBJ;
5644 CHECK_STACK_OVF (1);
5645 EMIT_NEW_ICONST (cfg, ins, -1);
5658 CHECK_STACK_OVF (1);
5659 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5665 CHECK_STACK_OVF (1);
5667 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5673 CHECK_STACK_OVF (1);
5674 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5680 CHECK_STACK_OVF (1);
5681 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5682 ins->type = STACK_I8;
5683 ins->dreg = alloc_dreg (cfg, STACK_I8);
5685 ins->inst_l = (gint64)read64 (ip);
5686 MONO_ADD_INS (bblock, ins);
5692 /* FIXME: we should really allocate this only late in the compilation process */
5693 mono_domain_lock (cfg->domain);
5694 f = mono_domain_alloc (cfg->domain, sizeof (float));
5695 mono_domain_unlock (cfg->domain);
5697 CHECK_STACK_OVF (1);
5698 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5699 ins->type = STACK_R8;
5700 ins->dreg = alloc_dreg (cfg, STACK_R8);
5704 MONO_ADD_INS (bblock, ins);
5712 /* FIXME: we should really allocate this only late in the compilation process */
5713 mono_domain_lock (cfg->domain);
5714 d = mono_domain_alloc (cfg->domain, sizeof (double));
5715 mono_domain_unlock (cfg->domain);
5717 CHECK_STACK_OVF (1);
5718 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5719 ins->type = STACK_R8;
5720 ins->dreg = alloc_dreg (cfg, STACK_R8);
5724 MONO_ADD_INS (bblock, ins);
5731 MonoInst *temp, *store;
5733 CHECK_STACK_OVF (1);
5737 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5738 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5740 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5743 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5756 if (sp [0]->type == STACK_R8)
5757 /* we need to pop the value from the x86 FP stack */
5758 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5765 if (stack_start != sp)
5767 token = read32 (ip + 1);
5768 /* FIXME: check the signature matches */
5769 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5774 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5775 GENERIC_SHARING_FAILURE (CEE_JMP);
5777 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5778 if (check_linkdemand (cfg, method, cmethod))
5780 CHECK_CFG_EXCEPTION;
5785 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5788 /* Handle tail calls similarly to calls */
5789 n = fsig->param_count + fsig->hasthis;
5791 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5792 call->method = cmethod;
5793 call->tail_call = TRUE;
5794 call->signature = mono_method_signature (cmethod);
5795 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5796 call->inst.inst_p0 = cmethod;
5797 for (i = 0; i < n; ++i)
5798 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5800 mono_arch_emit_call (cfg, call);
5801 MONO_ADD_INS (bblock, (MonoInst*)call);
5804 for (i = 0; i < num_args; ++i)
5805 /* Prevent arguments from being optimized away */
5806 arg_array [i]->flags |= MONO_INST_VOLATILE;
5808 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5809 ins = (MonoInst*)call;
5810 ins->inst_p0 = cmethod;
5811 MONO_ADD_INS (bblock, ins);
5815 start_new_bblock = 1;
5820 case CEE_CALLVIRT: {
5821 MonoInst *addr = NULL;
5822 MonoMethodSignature *fsig = NULL;
5824 int virtual = *ip == CEE_CALLVIRT;
5825 int calli = *ip == CEE_CALLI;
5826 gboolean pass_imt_from_rgctx = FALSE;
5827 MonoInst *imt_arg = NULL;
5828 gboolean pass_vtable = FALSE;
5829 gboolean pass_mrgctx = FALSE;
5830 MonoInst *vtable_arg = NULL;
5831 gboolean check_this = FALSE;
5834 token = read32 (ip + 1);
5841 if (method->wrapper_type != MONO_WRAPPER_NONE)
5842 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5844 fsig = mono_metadata_parse_signature (image, token);
5846 n = fsig->param_count + fsig->hasthis;
5848 MonoMethod *cil_method;
5850 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5851 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5852 cil_method = cmethod;
5853 } else if (constrained_call) {
5854 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
5856 * This is needed since get_method_constrained can't find
5857 * the method in klass representing a type var.
5858 * The type var is guaranteed to be a reference type in this
5861 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5862 cil_method = cmethod;
5863 g_assert (!cmethod->klass->valuetype);
5865 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5868 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5869 cil_method = cmethod;
5874 if (!dont_verify && !cfg->skip_visibility) {
5875 MonoMethod *target_method = cil_method;
5876 if (method->is_inflated) {
5877 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5879 if (!mono_method_can_access_method (method_definition, target_method) &&
5880 !mono_method_can_access_method (method, cil_method))
5881 METHOD_ACCESS_FAILURE;
5884 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5885 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5887 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5888 /* MS.NET seems to silently convert this to a callvirt */
5891 if (!cmethod->klass->inited)
5892 if (!mono_class_init (cmethod->klass))
5895 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5896 mini_class_is_system_array (cmethod->klass)) {
5897 array_rank = cmethod->klass->rank;
5898 fsig = mono_method_signature (cmethod);
5900 if (mono_method_signature (cmethod)->pinvoke) {
5901 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5902 check_for_pending_exc, FALSE);
5903 fsig = mono_method_signature (wrapper);
5904 } else if (constrained_call) {
5905 fsig = mono_method_signature (cmethod);
5907 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5911 mono_save_token_info (cfg, image, token, cil_method);
5913 n = fsig->param_count + fsig->hasthis;
5915 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5916 if (check_linkdemand (cfg, method, cmethod))
5918 CHECK_CFG_EXCEPTION;
5921 if (cmethod->string_ctor)
5922 g_assert_not_reached ();
5925 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5928 if (!cfg->generic_sharing_context && cmethod)
5929 g_assert (!mono_method_check_context_used (cmethod));
5933 //g_assert (!virtual || fsig->hasthis);
5937 if (constrained_call) {
5939 * We have the `constrained.' prefix opcode.
5941 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5945 * The type parameter is instantiated as a valuetype,
5946 * but that type doesn't override the method we're
5947 * calling, so we need to box `this'.
5949 dreg = alloc_dreg (cfg, STACK_VTYPE);
5950 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5951 ins->klass = constrained_call;
5952 sp [0] = handle_box (cfg, ins, constrained_call);
5953 } else if (!constrained_call->valuetype) {
5954 int dreg = alloc_preg (cfg);
5957 * The type parameter is instantiated as a reference
5958 * type. We have a managed pointer on the stack, so
5959 * we need to dereference it here.
5961 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5962 ins->type = STACK_OBJ;
5964 } else if (cmethod->klass->valuetype)
5966 constrained_call = NULL;
5969 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5973 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5974 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5975 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5976 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5977 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5980 * Pass vtable iff target method might
5981 * be shared, which means that sharing
5982 * is enabled for its class and its
5983 * context is sharable (and it's not a
5986 if (sharing_enabled && context_sharable &&
5987 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5991 if (cmethod && mini_method_get_context (cmethod) &&
5992 mini_method_get_context (cmethod)->method_inst) {
5993 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5994 MonoGenericContext *context = mini_method_get_context (cmethod);
5995 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5997 g_assert (!pass_vtable);
5999 if (sharing_enabled && context_sharable)
6003 if (cfg->generic_sharing_context && cmethod) {
6004 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6006 context_used = mono_method_check_context_used (cmethod);
6008 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6009 /* Generic method interface
6010 calls are resolved via a
6011 helper function and don't
6013 if (!cmethod_context || !cmethod_context->method_inst)
6014 pass_imt_from_rgctx = TRUE;
6018 * If a shared method calls another
6019 * shared method then the caller must
6020 * have a generic sharing context
6021 * because the magic trampoline
6022 * requires it. FIXME: We shouldn't
6023 * have to force the vtable/mrgctx
6024 * variable here. Instead there
6025 * should be a flag in the cfg to
6026 * request a generic sharing context.
6029 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6030 mono_get_vtable_var (cfg);
6035 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6037 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6039 CHECK_TYPELOAD (cmethod->klass);
6040 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6045 g_assert (!vtable_arg);
6048 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6050 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6053 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6054 MONO_METHOD_IS_FINAL (cmethod)) {
6061 if (pass_imt_from_rgctx) {
6062 g_assert (!pass_vtable);
6065 imt_arg = emit_get_rgctx_method (cfg, context_used,
6066 cmethod, MONO_RGCTX_INFO_METHOD);
6072 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6073 check->sreg1 = sp [0]->dreg;
6074 MONO_ADD_INS (cfg->cbb, check);
6077 /* Calling virtual generic methods */
6078 if (cmethod && virtual &&
6079 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6080 !(MONO_METHOD_IS_FINAL (cmethod) &&
6081 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6082 mono_method_signature (cmethod)->generic_param_count) {
6083 MonoInst *this_temp, *this_arg_temp, *store;
6084 MonoInst *iargs [4];
6086 g_assert (mono_method_signature (cmethod)->is_inflated);
6088 /* Prevent inlining of methods that contain indirect calls */
6091 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6092 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6093 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6094 g_assert (!imt_arg);
6096 imt_arg = emit_get_rgctx_method (cfg, context_used,
6097 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6101 cfg->disable_aot = TRUE;
6102 g_assert (cmethod->is_inflated);
6103 EMIT_NEW_PCONST (cfg, imt_arg,
6104 ((MonoMethodInflated*)cmethod)->context.method_inst);
6106 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6110 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6111 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6112 MONO_ADD_INS (bblock, store);
6114 /* FIXME: This should be a managed pointer */
6115 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6117 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6119 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6120 cmethod, MONO_RGCTX_INFO_METHOD);
6121 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6122 addr = mono_emit_jit_icall (cfg,
6123 mono_helper_compile_generic_method, iargs);
6125 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6126 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6127 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6130 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6132 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6135 if (!MONO_TYPE_IS_VOID (fsig->ret))
6144 /* FIXME: runtime generic context pointer for jumps? */
6145 /* FIXME: handle this for generic sharing eventually */
6146 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6147 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6150 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6153 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6154 call->tail_call = TRUE;
6155 call->method = cmethod;
6156 call->signature = mono_method_signature (cmethod);
6159 /* Handle tail calls similarly to calls */
6160 call->inst.opcode = OP_TAILCALL;
6162 mono_arch_emit_call (cfg, call);
6165 * We implement tail calls by storing the actual arguments into the
6166 * argument variables, then emitting a CEE_JMP.
6168 for (i = 0; i < n; ++i) {
6169 /* Prevent argument from being register allocated */
6170 arg_array [i]->flags |= MONO_INST_VOLATILE;
6171 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6175 ins = (MonoInst*)call;
6176 ins->inst_p0 = cmethod;
6177 ins->inst_p1 = arg_array [0];
6178 MONO_ADD_INS (bblock, ins);
6179 link_bblock (cfg, bblock, end_bblock);
6180 start_new_bblock = 1;
6181 /* skip CEE_RET as well */
6187 /* Conversion to a JIT intrinsic */
6188 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6189 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6190 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6201 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6202 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6203 mono_method_check_inlining (cfg, cmethod) &&
6204 !g_list_find (dont_inline, cmethod)) {
6206 gboolean allways = FALSE;
6208 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6209 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6210 /* Prevent inlining of methods that call wrappers */
6212 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6216 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6218 cfg->real_offset += 5;
6221 if (!MONO_TYPE_IS_VOID (fsig->ret))
6222 /* *sp is already set by inline_method */
6225 inline_costs += costs;
6231 inline_costs += 10 * num_calls++;
6233 /* Tail recursion elimination */
6234 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6235 gboolean has_vtargs = FALSE;
6238 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6241 /* keep it simple */
6242 for (i = fsig->param_count - 1; i >= 0; i--) {
6243 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6248 for (i = 0; i < n; ++i)
6249 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6250 MONO_INST_NEW (cfg, ins, OP_BR);
6251 MONO_ADD_INS (bblock, ins);
6252 tblock = start_bblock->out_bb [0];
6253 link_bblock (cfg, bblock, tblock);
6254 ins->inst_target_bb = tblock;
6255 start_new_bblock = 1;
6257 /* skip the CEE_RET, too */
6258 if (ip_in_bb (cfg, bblock, ip + 5))
6268 /* Generic sharing */
6269 /* FIXME: only do this for generic methods if
6270 they are not shared! */
6271 if (context_used && !imt_arg && !array_rank &&
6272 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6273 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6274 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6275 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6278 g_assert (cfg->generic_sharing_context && cmethod);
6282 * We are compiling a call to a
6283 * generic method from shared code,
6284 * which means that we have to look up
6285 * the method in the rgctx and do an
6288 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6291 /* Indirect calls */
6293 g_assert (!imt_arg);
6295 if (*ip == CEE_CALL)
6296 g_assert (context_used);
6297 else if (*ip == CEE_CALLI)
6298 g_assert (!vtable_arg);
6300 /* FIXME: what the hell is this??? */
6301 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6302 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6304 /* Prevent inlining of methods with indirect calls */
6308 #ifdef MONO_ARCH_RGCTX_REG
6310 int rgctx_reg = mono_alloc_preg (cfg);
6312 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6313 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6314 call = (MonoCallInst*)ins;
6315 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6316 cfg->uses_rgctx_reg = TRUE;
6321 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6323 * Instead of emitting an indirect call, emit a direct call
6324 * with the contents of the aotconst as the patch info.
6326 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6329 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6332 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6333 if (fsig->pinvoke && !fsig->ret->byref) {
6337 * Native code might return non register sized integers
6338 * without initializing the upper bits.
6340 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6341 case OP_LOADI1_MEMBASE:
6342 widen_op = OP_ICONV_TO_I1;
6344 case OP_LOADU1_MEMBASE:
6345 widen_op = OP_ICONV_TO_U1;
6347 case OP_LOADI2_MEMBASE:
6348 widen_op = OP_ICONV_TO_I2;
6350 case OP_LOADU2_MEMBASE:
6351 widen_op = OP_ICONV_TO_U2;
6357 if (widen_op != -1) {
6358 int dreg = alloc_preg (cfg);
6361 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6362 widen->type = ins->type;
6379 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6380 if (sp [fsig->param_count]->type == STACK_OBJ) {
6381 MonoInst *iargs [2];
6384 iargs [1] = sp [fsig->param_count];
6386 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6389 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6390 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6391 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6392 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6397 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6398 if (!cmethod->klass->element_class->valuetype && !readonly)
6399 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6402 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6405 g_assert_not_reached ();
6413 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6415 if (!MONO_TYPE_IS_VOID (fsig->ret))
6426 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6428 } else if (imt_arg) {
6429 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6431 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6434 if (!MONO_TYPE_IS_VOID (fsig->ret))
6442 if (cfg->method != method) {
6443 /* return from inlined method */
6445 * If in_count == 0, that means the ret is unreachable due to
6446 * being preceeded by a throw. In that case, inline_method () will
6447 * handle setting the return value
6448 * (test case: test_0_inline_throw ()).
6450 if (return_var && cfg->cbb->in_count) {
6454 //g_assert (returnvar != -1);
6455 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6456 cfg->ret_var_set = TRUE;
6460 MonoType *ret_type = mono_method_signature (method)->ret;
6462 g_assert (!return_var);
6465 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6468 if (!cfg->vret_addr) {
6471 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6473 EMIT_NEW_RETLOADA (cfg, ret_addr);
6475 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6476 ins->klass = mono_class_from_mono_type (ret_type);
6479 #ifdef MONO_ARCH_SOFT_FLOAT
6480 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6481 MonoInst *iargs [1];
6485 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6486 mono_arch_emit_setret (cfg, method, conv);
6488 mono_arch_emit_setret (cfg, method, *sp);
6491 mono_arch_emit_setret (cfg, method, *sp);
6496 if (sp != stack_start)
6498 MONO_INST_NEW (cfg, ins, OP_BR);
6500 ins->inst_target_bb = end_bblock;
6501 MONO_ADD_INS (bblock, ins);
6502 link_bblock (cfg, bblock, end_bblock);
6503 start_new_bblock = 1;
6507 MONO_INST_NEW (cfg, ins, OP_BR);
6509 target = ip + 1 + (signed char)(*ip);
6511 GET_BBLOCK (cfg, tblock, target);
6512 link_bblock (cfg, bblock, tblock);
6513 ins->inst_target_bb = tblock;
6514 if (sp != stack_start) {
6515 handle_stack_args (cfg, stack_start, sp - stack_start);
6517 CHECK_UNVERIFIABLE (cfg);
6519 MONO_ADD_INS (bblock, ins);
6520 start_new_bblock = 1;
6521 inline_costs += BRANCH_COST;
6535 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6537 target = ip + 1 + *(signed char*)ip;
6543 inline_costs += BRANCH_COST;
6547 MONO_INST_NEW (cfg, ins, OP_BR);
6550 target = ip + 4 + (gint32)read32(ip);
6552 GET_BBLOCK (cfg, tblock, target);
6553 link_bblock (cfg, bblock, tblock);
6554 ins->inst_target_bb = tblock;
6555 if (sp != stack_start) {
6556 handle_stack_args (cfg, stack_start, sp - stack_start);
6558 CHECK_UNVERIFIABLE (cfg);
6561 MONO_ADD_INS (bblock, ins);
6563 start_new_bblock = 1;
6564 inline_costs += BRANCH_COST;
6571 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6572 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6573 guint32 opsize = is_short ? 1 : 4;
6575 CHECK_OPSIZE (opsize);
6577 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6580 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6585 GET_BBLOCK (cfg, tblock, target);
6586 link_bblock (cfg, bblock, tblock);
6587 GET_BBLOCK (cfg, tblock, ip);
6588 link_bblock (cfg, bblock, tblock);
6590 if (sp != stack_start) {
6591 handle_stack_args (cfg, stack_start, sp - stack_start);
6592 CHECK_UNVERIFIABLE (cfg);
6595 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6596 cmp->sreg1 = sp [0]->dreg;
6597 type_from_op (cmp, sp [0], NULL);
6600 #if SIZEOF_REGISTER == 4
6601 if (cmp->opcode == OP_LCOMPARE_IMM) {
6602 /* Convert it to OP_LCOMPARE */
6603 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6604 ins->type = STACK_I8;
6605 ins->dreg = alloc_dreg (cfg, STACK_I8);
6607 MONO_ADD_INS (bblock, ins);
6608 cmp->opcode = OP_LCOMPARE;
6609 cmp->sreg2 = ins->dreg;
6612 MONO_ADD_INS (bblock, cmp);
6614 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6615 type_from_op (ins, sp [0], NULL);
6616 MONO_ADD_INS (bblock, ins);
6617 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6618 GET_BBLOCK (cfg, tblock, target);
6619 ins->inst_true_bb = tblock;
6620 GET_BBLOCK (cfg, tblock, ip);
6621 ins->inst_false_bb = tblock;
6622 start_new_bblock = 2;
6625 inline_costs += BRANCH_COST;
6640 MONO_INST_NEW (cfg, ins, *ip);
6642 target = ip + 4 + (gint32)read32(ip);
6648 inline_costs += BRANCH_COST;
6652 MonoBasicBlock **targets;
6653 MonoBasicBlock *default_bblock;
6654 MonoJumpInfoBBTable *table;
6655 int offset_reg = alloc_preg (cfg);
6656 int target_reg = alloc_preg (cfg);
6657 int table_reg = alloc_preg (cfg);
6658 int sum_reg = alloc_preg (cfg);
6659 gboolean use_op_switch;
6663 n = read32 (ip + 1);
6666 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6670 CHECK_OPSIZE (n * sizeof (guint32));
6671 target = ip + n * sizeof (guint32);
6673 GET_BBLOCK (cfg, default_bblock, target);
6675 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6676 for (i = 0; i < n; ++i) {
6677 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6678 targets [i] = tblock;
6682 if (sp != stack_start) {
6684 * Link the current bb with the targets as well, so handle_stack_args
6685 * will set their in_stack correctly.
6687 link_bblock (cfg, bblock, default_bblock);
6688 for (i = 0; i < n; ++i)
6689 link_bblock (cfg, bblock, targets [i]);
6691 handle_stack_args (cfg, stack_start, sp - stack_start);
6693 CHECK_UNVERIFIABLE (cfg);
6696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6700 for (i = 0; i < n; ++i)
6701 link_bblock (cfg, bblock, targets [i]);
6703 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6704 table->table = targets;
6705 table->table_size = n;
6707 use_op_switch = FALSE;
6709 /* ARM implements SWITCH statements differently */
6710 /* FIXME: Make it use the generic implementation */
6711 if (!cfg->compile_aot)
6712 use_op_switch = TRUE;
6715 if (use_op_switch) {
6716 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6717 ins->sreg1 = src1->dreg;
6718 ins->inst_p0 = table;
6719 ins->inst_many_bb = targets;
6720 ins->klass = GUINT_TO_POINTER (n);
6721 MONO_ADD_INS (cfg->cbb, ins);
6723 if (sizeof (gpointer) == 8)
6724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6728 #if SIZEOF_REGISTER == 8
6729 /* The upper word might not be zero, and we add it to a 64 bit address later */
6730 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6733 if (cfg->compile_aot) {
6734 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6736 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6737 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6738 ins->inst_p0 = table;
6739 ins->dreg = table_reg;
6740 MONO_ADD_INS (cfg->cbb, ins);
6743 /* FIXME: Use load_memindex */
6744 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6746 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6748 start_new_bblock = 1;
6749 inline_costs += (BRANCH_COST * 2);
6769 dreg = alloc_freg (cfg);
6772 dreg = alloc_lreg (cfg);
6775 dreg = alloc_preg (cfg);
6778 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6779 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6780 ins->flags |= ins_flag;
6782 MONO_ADD_INS (bblock, ins);
6797 #if HAVE_WRITE_BARRIERS
6798 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6799 /* insert call to write barrier */
6800 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6801 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6808 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6809 ins->flags |= ins_flag;
6811 MONO_ADD_INS (bblock, ins);
6819 MONO_INST_NEW (cfg, ins, (*ip));
6821 ins->sreg1 = sp [0]->dreg;
6822 ins->sreg2 = sp [1]->dreg;
6823 type_from_op (ins, sp [0], sp [1]);
6825 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6827 /* Use the immediate opcodes if possible */
6828 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6829 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6830 if (imm_opcode != -1) {
6831 ins->opcode = imm_opcode;
6832 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6835 sp [1]->opcode = OP_NOP;
6839 MONO_ADD_INS ((cfg)->cbb, (ins));
6842 mono_decompose_opcode (cfg, ins);
6859 MONO_INST_NEW (cfg, ins, (*ip));
6861 ins->sreg1 = sp [0]->dreg;
6862 ins->sreg2 = sp [1]->dreg;
6863 type_from_op (ins, sp [0], sp [1]);
6865 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6866 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6868 /* FIXME: Pass opcode to is_inst_imm */
6870 /* Use the immediate opcodes if possible */
6871 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6874 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6875 if (imm_opcode != -1) {
6876 ins->opcode = imm_opcode;
6877 if (sp [1]->opcode == OP_I8CONST) {
6878 #if SIZEOF_REGISTER == 8
6879 ins->inst_imm = sp [1]->inst_l;
6881 ins->inst_ls_word = sp [1]->inst_ls_word;
6882 ins->inst_ms_word = sp [1]->inst_ms_word;
6886 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6889 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6890 if (sp [1]->next == NULL)
6891 sp [1]->opcode = OP_NOP;
6894 MONO_ADD_INS ((cfg)->cbb, (ins));
6897 mono_decompose_opcode (cfg, ins);
6910 case CEE_CONV_OVF_I8:
6911 case CEE_CONV_OVF_U8:
6915 /* Special case this earlier so we have long constants in the IR */
6916 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6917 int data = sp [-1]->inst_c0;
6918 sp [-1]->opcode = OP_I8CONST;
6919 sp [-1]->type = STACK_I8;
6920 #if SIZEOF_REGISTER == 8
6921 if ((*ip) == CEE_CONV_U8)
6922 sp [-1]->inst_c0 = (guint32)data;
6924 sp [-1]->inst_c0 = data;
6926 sp [-1]->inst_ls_word = data;
6927 if ((*ip) == CEE_CONV_U8)
6928 sp [-1]->inst_ms_word = 0;
6930 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6932 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6939 case CEE_CONV_OVF_I4:
6940 case CEE_CONV_OVF_I1:
6941 case CEE_CONV_OVF_I2:
6942 case CEE_CONV_OVF_I:
6943 case CEE_CONV_OVF_U:
6946 if (sp [-1]->type == STACK_R8) {
6947 ADD_UNOP (CEE_CONV_OVF_I8);
6954 case CEE_CONV_OVF_U1:
6955 case CEE_CONV_OVF_U2:
6956 case CEE_CONV_OVF_U4:
6959 if (sp [-1]->type == STACK_R8) {
6960 ADD_UNOP (CEE_CONV_OVF_U8);
6967 case CEE_CONV_OVF_I1_UN:
6968 case CEE_CONV_OVF_I2_UN:
6969 case CEE_CONV_OVF_I4_UN:
6970 case CEE_CONV_OVF_I8_UN:
6971 case CEE_CONV_OVF_U1_UN:
6972 case CEE_CONV_OVF_U2_UN:
6973 case CEE_CONV_OVF_U4_UN:
6974 case CEE_CONV_OVF_U8_UN:
6975 case CEE_CONV_OVF_I_UN:
6976 case CEE_CONV_OVF_U_UN:
6986 case CEE_ADD_OVF_UN:
6988 case CEE_MUL_OVF_UN:
6990 case CEE_SUB_OVF_UN:
6998 token = read32 (ip + 1);
6999 klass = mini_get_class (method, token, generic_context);
7000 CHECK_TYPELOAD (klass);
7002 if (generic_class_is_reference_type (cfg, klass)) {
7003 MonoInst *store, *load;
7004 int dreg = alloc_preg (cfg);
7006 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7007 load->flags |= ins_flag;
7008 MONO_ADD_INS (cfg->cbb, load);
7010 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7011 store->flags |= ins_flag;
7012 MONO_ADD_INS (cfg->cbb, store);
7014 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7026 token = read32 (ip + 1);
7027 klass = mini_get_class (method, token, generic_context);
7028 CHECK_TYPELOAD (klass);
7030 /* Optimize the common ldobj+stloc combination */
7040 loc_index = ip [5] - CEE_STLOC_0;
7047 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7048 CHECK_LOCAL (loc_index);
7050 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7051 ins->dreg = cfg->locals [loc_index]->dreg;
7057 /* Optimize the ldobj+stobj combination */
7058 /* The reference case ends up being a load+store anyway */
7059 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7064 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7071 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7080 CHECK_STACK_OVF (1);
7082 n = read32 (ip + 1);
7084 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7085 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7086 ins->type = STACK_OBJ;
7089 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7090 MonoInst *iargs [1];
7092 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7093 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7095 if (cfg->opt & MONO_OPT_SHARED) {
7096 MonoInst *iargs [3];
7098 if (cfg->compile_aot) {
7099 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7101 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7102 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7103 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7104 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7105 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7107 if (bblock->out_of_line) {
7108 MonoInst *iargs [2];
7110 if (image == mono_defaults.corlib) {
7112 * Avoid relocations in AOT and save some space by using a
7113 * version of helper_ldstr specialized to mscorlib.
7115 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7116 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7118 /* Avoid creating the string object */
7119 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7120 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7121 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7125 if (cfg->compile_aot) {
7126 NEW_LDSTRCONST (cfg, ins, image, n);
7128 MONO_ADD_INS (bblock, ins);
7131 NEW_PCONST (cfg, ins, NULL);
7132 ins->type = STACK_OBJ;
7133 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7135 MONO_ADD_INS (bblock, ins);
7144 MonoInst *iargs [2];
7145 MonoMethodSignature *fsig;
7148 MonoInst *vtable_arg = NULL;
7151 token = read32 (ip + 1);
7152 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7155 fsig = mono_method_get_signature (cmethod, image, token);
7157 mono_save_token_info (cfg, image, token, cmethod);
7159 if (!mono_class_init (cmethod->klass))
7162 if (cfg->generic_sharing_context)
7163 context_used = mono_method_check_context_used (cmethod);
7165 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7166 if (check_linkdemand (cfg, method, cmethod))
7168 CHECK_CFG_EXCEPTION;
7169 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7170 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7173 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7174 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7175 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7177 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7178 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7180 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7184 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7185 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7187 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7189 CHECK_TYPELOAD (cmethod->klass);
7190 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7195 n = fsig->param_count;
7199 * Generate smaller code for the common newobj <exception> instruction in
7200 * argument checking code.
7202 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7203 is_exception_class (cmethod->klass) && n <= 2 &&
7204 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7205 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7206 MonoInst *iargs [3];
7208 g_assert (!vtable_arg);
7212 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7215 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7219 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7224 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7227 g_assert_not_reached ();
7235 /* move the args to allow room for 'this' in the first position */
7241 /* check_call_signature () requires sp[0] to be set */
7242 this_ins.type = STACK_OBJ;
7244 if (check_call_signature (cfg, fsig, sp))
7249 if (mini_class_is_system_array (cmethod->klass)) {
7250 g_assert (!vtable_arg);
7253 *sp = emit_get_rgctx_method (cfg, context_used,
7254 cmethod, MONO_RGCTX_INFO_METHOD);
7256 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7259 /* Avoid varargs in the common case */
7260 if (fsig->param_count == 1)
7261 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7262 else if (fsig->param_count == 2)
7263 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7265 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7266 } else if (cmethod->string_ctor) {
7267 g_assert (!context_used);
7268 g_assert (!vtable_arg);
7269 /* we simply pass a null pointer */
7270 EMIT_NEW_PCONST (cfg, *sp, NULL);
7271 /* now call the string ctor */
7272 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7274 MonoInst* callvirt_this_arg = NULL;
7276 if (cmethod->klass->valuetype) {
7277 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7278 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7279 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7284 * The code generated by mini_emit_virtual_call () expects
7285 * iargs [0] to be a boxed instance, but luckily the vcall
7286 * will be transformed into a normal call there.
7288 } else if (context_used) {
7292 if (cfg->opt & MONO_OPT_SHARED)
7293 rgctx_info = MONO_RGCTX_INFO_KLASS;
7295 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7296 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7298 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7301 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7303 CHECK_TYPELOAD (cmethod->klass);
7306 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7307 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7308 * As a workaround, we call class cctors before allocating objects.
7310 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7311 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7312 if (cfg->verbose_level > 2)
7313 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7314 class_inits = g_slist_prepend (class_inits, vtable);
7317 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7322 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7324 /* Now call the actual ctor */
7325 /* Avoid virtual calls to ctors if possible */
7326 if (cmethod->klass->marshalbyref)
7327 callvirt_this_arg = sp [0];
7329 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7330 mono_method_check_inlining (cfg, cmethod) &&
7331 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7332 !g_list_find (dont_inline, cmethod)) {
7335 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7336 cfg->real_offset += 5;
7339 inline_costs += costs - 5;
7342 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7344 } else if (context_used &&
7345 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7346 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7347 MonoInst *cmethod_addr;
7349 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7350 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7352 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7355 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7356 callvirt_this_arg, NULL, vtable_arg);
7360 if (alloc == NULL) {
7362 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7363 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7377 token = read32 (ip + 1);
7378 klass = mini_get_class (method, token, generic_context);
7379 CHECK_TYPELOAD (klass);
7380 if (sp [0]->type != STACK_OBJ)
7383 if (cfg->generic_sharing_context)
7384 context_used = mono_class_check_context_used (klass);
7393 args [1] = emit_get_rgctx_klass (cfg, context_used,
7394 klass, MONO_RGCTX_INFO_KLASS);
7396 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7400 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7401 MonoMethod *mono_castclass;
7402 MonoInst *iargs [1];
7405 mono_castclass = mono_marshal_get_castclass (klass);
7408 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7409 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7410 g_assert (costs > 0);
7413 cfg->real_offset += 5;
7418 inline_costs += costs;
7421 ins = handle_castclass (cfg, klass, *sp);
7431 token = read32 (ip + 1);
7432 klass = mini_get_class (method, token, generic_context);
7433 CHECK_TYPELOAD (klass);
7434 if (sp [0]->type != STACK_OBJ)
7437 if (cfg->generic_sharing_context)
7438 context_used = mono_class_check_context_used (klass);
7447 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7449 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7453 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7454 MonoMethod *mono_isinst;
7455 MonoInst *iargs [1];
7458 mono_isinst = mono_marshal_get_isinst (klass);
7461 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7462 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7463 g_assert (costs > 0);
7466 cfg->real_offset += 5;
7471 inline_costs += costs;
7474 ins = handle_isinst (cfg, klass, *sp);
7481 case CEE_UNBOX_ANY: {
7485 token = read32 (ip + 1);
7486 klass = mini_get_class (method, token, generic_context);
7487 CHECK_TYPELOAD (klass);
7489 mono_save_token_info (cfg, image, token, klass);
7491 if (cfg->generic_sharing_context)
7492 context_used = mono_class_check_context_used (klass);
7494 if (generic_class_is_reference_type (cfg, klass)) {
7497 MonoInst *iargs [2];
7502 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7503 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7507 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7508 MonoMethod *mono_castclass;
7509 MonoInst *iargs [1];
7512 mono_castclass = mono_marshal_get_castclass (klass);
7515 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7516 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7518 g_assert (costs > 0);
7521 cfg->real_offset += 5;
7525 inline_costs += costs;
7527 ins = handle_castclass (cfg, klass, *sp);
7535 if (mono_class_is_nullable (klass)) {
7536 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7543 ins = handle_unbox (cfg, klass, sp, context_used);
7549 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7562 token = read32 (ip + 1);
7563 klass = mini_get_class (method, token, generic_context);
7564 CHECK_TYPELOAD (klass);
7566 mono_save_token_info (cfg, image, token, klass);
7568 if (cfg->generic_sharing_context)
7569 context_used = mono_class_check_context_used (klass);
7571 if (generic_class_is_reference_type (cfg, klass)) {
7577 if (klass == mono_defaults.void_class)
7579 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7581 /* frequent check in generic code: box (struct), brtrue */
7582 if (!mono_class_is_nullable (klass) &&
7583 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7584 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7586 MONO_INST_NEW (cfg, ins, OP_BR);
7587 if (*ip == CEE_BRTRUE_S) {
7590 target = ip + 1 + (signed char)(*ip);
7595 target = ip + 4 + (gint)(read32 (ip));
7598 GET_BBLOCK (cfg, tblock, target);
7599 link_bblock (cfg, bblock, tblock);
7600 ins->inst_target_bb = tblock;
7601 GET_BBLOCK (cfg, tblock, ip);
7603 * This leads to some inconsistency, since the two bblocks are
7604 * not really connected, but it is needed for handling stack
7605 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7606 * FIXME: This should only be needed if sp != stack_start, but that
7607 * doesn't work for some reason (test failure in mcs/tests on x86).
7609 link_bblock (cfg, bblock, tblock);
7610 if (sp != stack_start) {
7611 handle_stack_args (cfg, stack_start, sp - stack_start);
7613 CHECK_UNVERIFIABLE (cfg);
7615 MONO_ADD_INS (bblock, ins);
7616 start_new_bblock = 1;
7624 if (cfg->opt & MONO_OPT_SHARED)
7625 rgctx_info = MONO_RGCTX_INFO_KLASS;
7627 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7628 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7629 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7631 *sp++ = handle_box (cfg, val, klass);
7642 token = read32 (ip + 1);
7643 klass = mini_get_class (method, token, generic_context);
7644 CHECK_TYPELOAD (klass);
7646 mono_save_token_info (cfg, image, token, klass);
7648 if (cfg->generic_sharing_context)
7649 context_used = mono_class_check_context_used (klass);
7651 if (mono_class_is_nullable (klass)) {
7654 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7655 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7659 ins = handle_unbox (cfg, klass, sp, context_used);
7669 MonoClassField *field;
7673 if (*ip == CEE_STFLD) {
7680 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7682 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7685 token = read32 (ip + 1);
7686 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7687 field = mono_method_get_wrapper_data (method, token);
7688 klass = field->parent;
7691 field = mono_field_from_token (image, token, &klass, generic_context);
7695 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7696 FIELD_ACCESS_FAILURE;
7697 mono_class_init (klass);
7699 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7700 if (*ip == CEE_STFLD) {
7701 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7703 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7704 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7705 MonoInst *iargs [5];
7708 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7709 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7710 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7714 if (cfg->opt & MONO_OPT_INLINE) {
7715 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7716 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7717 g_assert (costs > 0);
7719 cfg->real_offset += 5;
7722 inline_costs += costs;
7724 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7729 #if HAVE_WRITE_BARRIERS
7730 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7731 /* insert call to write barrier */
7732 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7733 MonoInst *iargs [2];
7736 dreg = alloc_preg (cfg);
7737 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7739 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7743 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7745 store->flags |= ins_flag;
7752 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7753 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7754 MonoInst *iargs [4];
7757 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7758 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7759 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7760 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7761 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7762 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7764 g_assert (costs > 0);
7766 cfg->real_offset += 5;
7770 inline_costs += costs;
7772 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7776 if (sp [0]->type == STACK_VTYPE) {
7779 /* Have to compute the address of the variable */
7781 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7783 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7785 g_assert (var->klass == klass);
7787 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7791 if (*ip == CEE_LDFLDA) {
7792 dreg = alloc_preg (cfg);
7794 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7795 ins->klass = mono_class_from_mono_type (field->type);
7796 ins->type = STACK_MP;
7801 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7802 load->flags |= ins_flag;
7813 MonoClassField *field;
7814 gpointer addr = NULL;
7815 gboolean is_special_static;
7818 token = read32 (ip + 1);
7820 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7821 field = mono_method_get_wrapper_data (method, token);
7822 klass = field->parent;
7825 field = mono_field_from_token (image, token, &klass, generic_context);
7828 mono_class_init (klass);
7829 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7830 FIELD_ACCESS_FAILURE;
7833 * We can only support shared generic static
7834 * field access on architectures where the
7835 * trampoline code has been extended to handle
7836 * the generic class init.
7838 #ifndef MONO_ARCH_VTABLE_REG
7839 GENERIC_SHARING_FAILURE (*ip);
7842 if (cfg->generic_sharing_context)
7843 context_used = mono_class_check_context_used (klass);
7845 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7847 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7848 * to be called here.
7850 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7851 mono_class_vtable (cfg->domain, klass);
7852 CHECK_TYPELOAD (klass);
7854 mono_domain_lock (cfg->domain);
7855 if (cfg->domain->special_static_fields)
7856 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7857 mono_domain_unlock (cfg->domain);
7859 is_special_static = mono_class_field_is_special_static (field);
7861 /* Generate IR to compute the field address */
7863 if ((cfg->opt & MONO_OPT_SHARED) ||
7864 (cfg->compile_aot && is_special_static) ||
7865 (context_used && is_special_static)) {
7866 MonoInst *iargs [2];
7868 g_assert (field->parent);
7869 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7871 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7872 field, MONO_RGCTX_INFO_CLASS_FIELD);
7874 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7876 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7877 } else if (context_used) {
7878 MonoInst *static_data;
7881 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7882 method->klass->name_space, method->klass->name, method->name,
7883 depth, field->offset);
7886 if (mono_class_needs_cctor_run (klass, method)) {
7890 vtable = emit_get_rgctx_klass (cfg, context_used,
7891 klass, MONO_RGCTX_INFO_VTABLE);
7893 // FIXME: This doesn't work since it tries to pass the argument
7894 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7896 * The vtable pointer is always passed in a register regardless of
7897 * the calling convention, so assign it manually, and make a call
7898 * using a signature without parameters.
7900 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7901 #ifdef MONO_ARCH_VTABLE_REG
7902 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7903 cfg->uses_vtable_reg = TRUE;
7910 * The pointer we're computing here is
7912 * super_info.static_data + field->offset
7914 static_data = emit_get_rgctx_klass (cfg, context_used,
7915 klass, MONO_RGCTX_INFO_STATIC_DATA);
7917 if (field->offset == 0) {
7920 int addr_reg = mono_alloc_preg (cfg);
7921 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7923 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7924 MonoInst *iargs [2];
7926 g_assert (field->parent);
7927 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7928 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7929 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7931 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7933 CHECK_TYPELOAD (klass);
7935 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7936 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7937 if (cfg->verbose_level > 2)
7938 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7939 class_inits = g_slist_prepend (class_inits, vtable);
7941 if (cfg->run_cctors) {
7943 /* This makes so that inline cannot trigger */
7944 /* .cctors: too many apps depend on them */
7945 /* running with a specific order... */
7946 if (! vtable->initialized)
7948 ex = mono_runtime_class_init_full (vtable, FALSE);
7950 set_exception_object (cfg, ex);
7951 goto exception_exit;
7955 addr = (char*)vtable->data + field->offset;
7957 if (cfg->compile_aot)
7958 EMIT_NEW_SFLDACONST (cfg, ins, field);
7960 EMIT_NEW_PCONST (cfg, ins, addr);
7963 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7964 * This could be later optimized to do just a couple of
7965 * memory dereferences with constant offsets.
7967 MonoInst *iargs [1];
7968 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7969 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7973 /* Generate IR to do the actual load/store operation */
7975 if (*ip == CEE_LDSFLDA) {
7976 ins->klass = mono_class_from_mono_type (field->type);
7977 ins->type = STACK_PTR;
7979 } else if (*ip == CEE_STSFLD) {
7984 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7985 store->flags |= ins_flag;
7987 gboolean is_const = FALSE;
7988 MonoVTable *vtable = NULL;
7990 if (!context_used) {
7991 vtable = mono_class_vtable (cfg->domain, klass);
7992 CHECK_TYPELOAD (klass);
7994 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7995 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7996 gpointer addr = (char*)vtable->data + field->offset;
7997 int ro_type = field->type->type;
7998 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7999 ro_type = field->type->data.klass->enum_basetype->type;
8001 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8004 case MONO_TYPE_BOOLEAN:
8006 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8010 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8013 case MONO_TYPE_CHAR:
8015 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8019 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8024 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8028 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8031 #ifndef HAVE_MOVING_COLLECTOR
8034 case MONO_TYPE_STRING:
8035 case MONO_TYPE_OBJECT:
8036 case MONO_TYPE_CLASS:
8037 case MONO_TYPE_SZARRAY:
8039 case MONO_TYPE_FNPTR:
8040 case MONO_TYPE_ARRAY:
8041 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8042 type_to_eval_stack_type ((cfg), field->type, *sp);
8048 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8053 case MONO_TYPE_VALUETYPE:
8063 CHECK_STACK_OVF (1);
8065 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8066 load->flags |= ins_flag;
8079 token = read32 (ip + 1);
8080 klass = mini_get_class (method, token, generic_context);
8081 CHECK_TYPELOAD (klass);
8082 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8083 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8094 const char *data_ptr;
8096 guint32 field_token;
8102 token = read32 (ip + 1);
8104 klass = mini_get_class (method, token, generic_context);
8105 CHECK_TYPELOAD (klass);
8107 if (cfg->generic_sharing_context)
8108 context_used = mono_class_check_context_used (klass);
8113 /* FIXME: Decompose later to help abcrem */
8116 args [0] = emit_get_rgctx_klass (cfg, context_used,
8117 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8122 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8124 if (cfg->opt & MONO_OPT_SHARED) {
8125 /* Decompose now to avoid problems with references to the domainvar */
8126 MonoInst *iargs [3];
8128 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8129 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8132 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8134 /* Decompose later since it is needed by abcrem */
8135 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8136 ins->dreg = alloc_preg (cfg);
8137 ins->sreg1 = sp [0]->dreg;
8138 ins->inst_newa_class = klass;
8139 ins->type = STACK_OBJ;
8141 MONO_ADD_INS (cfg->cbb, ins);
8142 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8143 cfg->cbb->has_array_access = TRUE;
8145 /* Needed so mono_emit_load_get_addr () gets called */
8146 mono_get_got_var (cfg);
8156 * we inline/optimize the initialization sequence if possible.
8157 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8158 * for small sizes open code the memcpy
8159 * ensure the rva field is big enough
8161 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8162 MonoMethod *memcpy_method = get_memcpy_method ();
8163 MonoInst *iargs [3];
8164 int add_reg = alloc_preg (cfg);
8166 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8167 if (cfg->compile_aot) {
8168 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8170 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8172 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8173 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8182 if (sp [0]->type != STACK_OBJ)
8185 dreg = alloc_preg (cfg);
8186 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8187 ins->dreg = alloc_preg (cfg);
8188 ins->sreg1 = sp [0]->dreg;
8189 ins->type = STACK_I4;
8190 MONO_ADD_INS (cfg->cbb, ins);
8191 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8192 cfg->cbb->has_array_access = TRUE;
8200 if (sp [0]->type != STACK_OBJ)
8203 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8205 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8206 CHECK_TYPELOAD (klass);
8207 /* we need to make sure that this array is exactly the type it needs
8208 * to be for correctness. the wrappers are lax with their usage
8209 * so we need to ignore them here
8211 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8212 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8215 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8219 case CEE_LDELEM_ANY:
8230 case CEE_LDELEM_REF: {
8236 if (*ip == CEE_LDELEM_ANY) {
8238 token = read32 (ip + 1);
8239 klass = mini_get_class (method, token, generic_context);
8240 CHECK_TYPELOAD (klass);
8241 mono_class_init (klass);
8244 klass = array_access_to_klass (*ip);
8246 if (sp [0]->type != STACK_OBJ)
8249 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8251 if (sp [1]->opcode == OP_ICONST) {
8252 int array_reg = sp [0]->dreg;
8253 int index_reg = sp [1]->dreg;
8254 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8256 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8259 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8260 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8263 if (*ip == CEE_LDELEM_ANY)
8276 case CEE_STELEM_REF:
8277 case CEE_STELEM_ANY: {
8283 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8285 if (*ip == CEE_STELEM_ANY) {
8287 token = read32 (ip + 1);
8288 klass = mini_get_class (method, token, generic_context);
8289 CHECK_TYPELOAD (klass);
8290 mono_class_init (klass);
8293 klass = array_access_to_klass (*ip);
8295 if (sp [0]->type != STACK_OBJ)
8298 /* storing a NULL doesn't need any of the complex checks in stelemref */
8299 if (generic_class_is_reference_type (cfg, klass) &&
8300 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8301 MonoMethod* helper = mono_marshal_get_stelemref ();
8302 MonoInst *iargs [3];
8304 if (sp [0]->type != STACK_OBJ)
8306 if (sp [2]->type != STACK_OBJ)
8313 mono_emit_method_call (cfg, helper, iargs, NULL);
8315 if (sp [1]->opcode == OP_ICONST) {
8316 int array_reg = sp [0]->dreg;
8317 int index_reg = sp [1]->dreg;
8318 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8320 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8321 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8323 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8324 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8328 if (*ip == CEE_STELEM_ANY)
8335 case CEE_CKFINITE: {
8339 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8340 ins->sreg1 = sp [0]->dreg;
8341 ins->dreg = alloc_freg (cfg);
8342 ins->type = STACK_R8;
8343 MONO_ADD_INS (bblock, ins);
8346 mono_decompose_opcode (cfg, ins);
8351 case CEE_REFANYVAL: {
8352 MonoInst *src_var, *src;
8354 int klass_reg = alloc_preg (cfg);
8355 int dreg = alloc_preg (cfg);
8358 MONO_INST_NEW (cfg, ins, *ip);
8361 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8362 CHECK_TYPELOAD (klass);
8363 mono_class_init (klass);
8365 if (cfg->generic_sharing_context)
8366 context_used = mono_class_check_context_used (klass);
8369 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8371 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8372 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8376 MonoInst *klass_ins;
8378 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8379 klass, MONO_RGCTX_INFO_KLASS);
8382 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8383 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8385 mini_emit_class_check (cfg, klass_reg, klass);
8387 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8388 ins->type = STACK_MP;
8393 case CEE_MKREFANY: {
8394 MonoInst *loc, *addr;
8397 MONO_INST_NEW (cfg, ins, *ip);
8400 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8401 CHECK_TYPELOAD (klass);
8402 mono_class_init (klass);
8404 if (cfg->generic_sharing_context)
8405 context_used = mono_class_check_context_used (klass);
8407 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8408 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8411 MonoInst *const_ins;
8412 int type_reg = alloc_preg (cfg);
8414 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8415 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8417 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8418 } else if (cfg->compile_aot) {
8419 int const_reg = alloc_preg (cfg);
8420 int type_reg = alloc_preg (cfg);
8422 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8423 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8425 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8427 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8428 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8430 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8432 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8433 ins->type = STACK_VTYPE;
8434 ins->klass = mono_defaults.typed_reference_class;
8441 MonoClass *handle_class;
8443 CHECK_STACK_OVF (1);
8446 n = read32 (ip + 1);
8448 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8449 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8450 handle = mono_method_get_wrapper_data (method, n);
8451 handle_class = mono_method_get_wrapper_data (method, n + 1);
8452 if (handle_class == mono_defaults.typehandle_class)
8453 handle = &((MonoClass*)handle)->byval_arg;
8456 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8460 mono_class_init (handle_class);
8461 if (cfg->generic_sharing_context) {
8462 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8463 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8464 /* This case handles ldtoken
8465 of an open type, like for
8468 } else if (handle_class == mono_defaults.typehandle_class) {
8469 /* If we get a MONO_TYPE_CLASS
8470 then we need to provide the
8472 instantiation of it. */
8473 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8476 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8477 } else if (handle_class == mono_defaults.fieldhandle_class)
8478 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8479 else if (handle_class == mono_defaults.methodhandle_class)
8480 context_used = mono_method_check_context_used (handle);
8482 g_assert_not_reached ();
8485 if ((cfg->opt & MONO_OPT_SHARED) &&
8486 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8487 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8488 MonoInst *addr, *vtvar, *iargs [3];
8489 int method_context_used;
8491 if (cfg->generic_sharing_context)
8492 method_context_used = mono_method_check_context_used (method);
8494 method_context_used = 0;
8496 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8498 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8499 EMIT_NEW_ICONST (cfg, iargs [1], n);
8500 if (method_context_used) {
8501 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8502 method, MONO_RGCTX_INFO_METHOD);
8503 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8505 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8506 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8508 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8510 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8512 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8514 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8515 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8516 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8517 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8518 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8519 MonoClass *tclass = mono_class_from_mono_type (handle);
8521 mono_class_init (tclass);
8523 ins = emit_get_rgctx_klass (cfg, context_used,
8524 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8525 } else if (cfg->compile_aot) {
8526 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8528 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8530 ins->type = STACK_OBJ;
8531 ins->klass = cmethod->klass;
8534 MonoInst *addr, *vtvar;
8536 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8539 if (handle_class == mono_defaults.typehandle_class) {
8540 ins = emit_get_rgctx_klass (cfg, context_used,
8541 mono_class_from_mono_type (handle),
8542 MONO_RGCTX_INFO_TYPE);
8543 } else if (handle_class == mono_defaults.methodhandle_class) {
8544 ins = emit_get_rgctx_method (cfg, context_used,
8545 handle, MONO_RGCTX_INFO_METHOD);
8546 } else if (handle_class == mono_defaults.fieldhandle_class) {
8547 ins = emit_get_rgctx_field (cfg, context_used,
8548 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8550 g_assert_not_reached ();
8552 } else if (cfg->compile_aot) {
8553 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8555 EMIT_NEW_PCONST (cfg, ins, handle);
8557 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8558 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8559 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8569 MONO_INST_NEW (cfg, ins, OP_THROW);
8571 ins->sreg1 = sp [0]->dreg;
8573 bblock->out_of_line = TRUE;
8574 MONO_ADD_INS (bblock, ins);
8575 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8576 MONO_ADD_INS (bblock, ins);
8579 link_bblock (cfg, bblock, end_bblock);
8580 start_new_bblock = 1;
8582 case CEE_ENDFINALLY:
8583 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8584 MONO_ADD_INS (bblock, ins);
8586 start_new_bblock = 1;
8589 * Control will leave the method so empty the stack, otherwise
8590 * the next basic block will start with a nonempty stack.
8592 while (sp != stack_start) {
8600 if (*ip == CEE_LEAVE) {
8602 target = ip + 5 + (gint32)read32(ip + 1);
8605 target = ip + 2 + (signed char)(ip [1]);
8608 /* empty the stack */
8609 while (sp != stack_start) {
8614 * If this leave statement is in a catch block, check for a
8615 * pending exception, and rethrow it if necessary.
8617 for (i = 0; i < header->num_clauses; ++i) {
8618 MonoExceptionClause *clause = &header->clauses [i];
8621 * Use <= in the final comparison to handle clauses with multiple
8622 * leave statements, like in bug #78024.
8623 * The ordering of the exception clauses guarantees that we find the
8626 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8628 MonoBasicBlock *dont_throw;
8633 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8636 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8638 NEW_BBLOCK (cfg, dont_throw);
8641 * Currently, we allways rethrow the abort exception, despite the
8642 * fact that this is not correct. See thread6.cs for an example.
8643 * But propagating the abort exception is more important than
8644 * getting the sematics right.
8646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8648 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8650 MONO_START_BB (cfg, dont_throw);
8655 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8657 for (tmp = handlers; tmp; tmp = tmp->next) {
8659 link_bblock (cfg, bblock, tblock);
8660 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8661 ins->inst_target_bb = tblock;
8662 MONO_ADD_INS (bblock, ins);
8664 g_list_free (handlers);
8667 MONO_INST_NEW (cfg, ins, OP_BR);
8668 MONO_ADD_INS (bblock, ins);
8669 GET_BBLOCK (cfg, tblock, target);
8670 link_bblock (cfg, bblock, tblock);
8671 ins->inst_target_bb = tblock;
8672 start_new_bblock = 1;
8674 if (*ip == CEE_LEAVE)
8683 * Mono specific opcodes
8685 case MONO_CUSTOM_PREFIX: {
8687 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8691 case CEE_MONO_ICALL: {
8693 MonoJitICallInfo *info;
8695 token = read32 (ip + 2);
8696 func = mono_method_get_wrapper_data (method, token);
8697 info = mono_find_jit_icall_by_addr (func);
8700 CHECK_STACK (info->sig->param_count);
8701 sp -= info->sig->param_count;
8703 ins = mono_emit_jit_icall (cfg, info->func, sp);
8704 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8708 inline_costs += 10 * num_calls++;
8712 case CEE_MONO_LDPTR: {
8715 CHECK_STACK_OVF (1);
8717 token = read32 (ip + 2);
8719 ptr = mono_method_get_wrapper_data (method, token);
8720 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8721 MonoJitICallInfo *callinfo;
8722 const char *icall_name;
8724 icall_name = method->name + strlen ("__icall_wrapper_");
8725 g_assert (icall_name);
8726 callinfo = mono_find_jit_icall_by_name (icall_name);
8727 g_assert (callinfo);
8729 if (ptr == callinfo->func) {
8730 /* Will be transformed into an AOTCONST later */
8731 EMIT_NEW_PCONST (cfg, ins, ptr);
8737 /* FIXME: Generalize this */
8738 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8739 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8744 EMIT_NEW_PCONST (cfg, ins, ptr);
8747 inline_costs += 10 * num_calls++;
8748 /* Can't embed random pointers into AOT code */
8749 cfg->disable_aot = 1;
8752 case CEE_MONO_ICALL_ADDR: {
8753 MonoMethod *cmethod;
8756 CHECK_STACK_OVF (1);
8758 token = read32 (ip + 2);
8760 cmethod = mono_method_get_wrapper_data (method, token);
8762 if (cfg->compile_aot) {
8763 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8765 ptr = mono_lookup_internal_call (cmethod);
8767 EMIT_NEW_PCONST (cfg, ins, ptr);
8773 case CEE_MONO_VTADDR: {
8774 MonoInst *src_var, *src;
8780 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8781 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8786 case CEE_MONO_NEWOBJ: {
8787 MonoInst *iargs [2];
8789 CHECK_STACK_OVF (1);
8791 token = read32 (ip + 2);
8792 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8793 mono_class_init (klass);
8794 NEW_DOMAINCONST (cfg, iargs [0]);
8795 MONO_ADD_INS (cfg->cbb, iargs [0]);
8796 NEW_CLASSCONST (cfg, iargs [1], klass);
8797 MONO_ADD_INS (cfg->cbb, iargs [1]);
8798 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8800 inline_costs += 10 * num_calls++;
8803 case CEE_MONO_OBJADDR:
8806 MONO_INST_NEW (cfg, ins, OP_MOVE);
8807 ins->dreg = alloc_preg (cfg);
8808 ins->sreg1 = sp [0]->dreg;
8809 ins->type = STACK_MP;
8810 MONO_ADD_INS (cfg->cbb, ins);
8814 case CEE_MONO_LDNATIVEOBJ:
8816 * Similar to LDOBJ, but instead load the unmanaged
8817 * representation of the vtype to the stack.
8822 token = read32 (ip + 2);
8823 klass = mono_method_get_wrapper_data (method, token);
8824 g_assert (klass->valuetype);
8825 mono_class_init (klass);
8828 MonoInst *src, *dest, *temp;
8831 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8832 temp->backend.is_pinvoke = 1;
8833 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8834 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8836 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8837 dest->type = STACK_VTYPE;
8838 dest->klass = klass;
8844 case CEE_MONO_RETOBJ: {
8846 * Same as RET, but return the native representation of a vtype
8849 g_assert (cfg->ret);
8850 g_assert (mono_method_signature (method)->pinvoke);
8855 token = read32 (ip + 2);
8856 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8858 if (!cfg->vret_addr) {
8859 g_assert (cfg->ret_var_is_local);
8861 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8863 EMIT_NEW_RETLOADA (cfg, ins);
8865 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8867 if (sp != stack_start)
8870 MONO_INST_NEW (cfg, ins, OP_BR);
8871 ins->inst_target_bb = end_bblock;
8872 MONO_ADD_INS (bblock, ins);
8873 link_bblock (cfg, bblock, end_bblock);
8874 start_new_bblock = 1;
8878 case CEE_MONO_CISINST:
8879 case CEE_MONO_CCASTCLASS: {
8884 token = read32 (ip + 2);
8885 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8886 if (ip [1] == CEE_MONO_CISINST)
8887 ins = handle_cisinst (cfg, klass, sp [0]);
8889 ins = handle_ccastclass (cfg, klass, sp [0]);
8895 case CEE_MONO_SAVE_LMF:
8896 case CEE_MONO_RESTORE_LMF:
8897 #ifdef MONO_ARCH_HAVE_LMF_OPS
8898 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8899 MONO_ADD_INS (bblock, ins);
8900 cfg->need_lmf_area = TRUE;
8904 case CEE_MONO_CLASSCONST:
8905 CHECK_STACK_OVF (1);
8907 token = read32 (ip + 2);
8908 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8911 inline_costs += 10 * num_calls++;
8913 case CEE_MONO_NOT_TAKEN:
8914 bblock->out_of_line = TRUE;
8918 CHECK_STACK_OVF (1);
8920 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8921 ins->dreg = alloc_preg (cfg);
8922 ins->inst_offset = (gint32)read32 (ip + 2);
8923 ins->type = STACK_PTR;
8924 MONO_ADD_INS (bblock, ins);
8929 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8939 /* somewhat similar to LDTOKEN */
8940 MonoInst *addr, *vtvar;
8941 CHECK_STACK_OVF (1);
8942 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8944 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8945 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8947 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8948 ins->type = STACK_VTYPE;
8949 ins->klass = mono_defaults.argumenthandle_class;
8962 * The following transforms:
8963 * CEE_CEQ into OP_CEQ
8964 * CEE_CGT into OP_CGT
8965 * CEE_CGT_UN into OP_CGT_UN
8966 * CEE_CLT into OP_CLT
8967 * CEE_CLT_UN into OP_CLT_UN
8969 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8971 MONO_INST_NEW (cfg, ins, cmp->opcode);
8973 cmp->sreg1 = sp [0]->dreg;
8974 cmp->sreg2 = sp [1]->dreg;
8975 type_from_op (cmp, sp [0], sp [1]);
8977 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8978 cmp->opcode = OP_LCOMPARE;
8979 else if (sp [0]->type == STACK_R8)
8980 cmp->opcode = OP_FCOMPARE;
8982 cmp->opcode = OP_ICOMPARE;
8983 MONO_ADD_INS (bblock, cmp);
8984 ins->type = STACK_I4;
8985 ins->dreg = alloc_dreg (cfg, ins->type);
8986 type_from_op (ins, sp [0], sp [1]);
8988 if (cmp->opcode == OP_FCOMPARE) {
8990 * The backends expect the fceq opcodes to do the
8993 cmp->opcode = OP_NOP;
8994 ins->sreg1 = cmp->sreg1;
8995 ins->sreg2 = cmp->sreg2;
8997 MONO_ADD_INS (bblock, ins);
9004 MonoMethod *cil_method;
9005 gboolean needs_static_rgctx_invoke;
9007 CHECK_STACK_OVF (1);
9009 n = read32 (ip + 2);
9010 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9013 mono_class_init (cmethod->klass);
9015 mono_save_token_info (cfg, image, n, cmethod);
9017 if (cfg->generic_sharing_context)
9018 context_used = mono_method_check_context_used (cmethod);
9020 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9022 cil_method = cmethod;
9023 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9024 METHOD_ACCESS_FAILURE;
9026 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9027 if (check_linkdemand (cfg, method, cmethod))
9029 CHECK_CFG_EXCEPTION;
9030 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9031 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9035 * Optimize the common case of ldftn+delegate creation
9037 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9038 /* FIXME: SGEN support */
9039 /* FIXME: handle shared static generic methods */
9040 /* FIXME: handle this in shared code */
9041 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9042 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9043 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9044 MonoInst *target_ins;
9047 if (cfg->verbose_level > 3)
9048 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9049 target_ins = sp [-1];
9051 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9060 if (needs_static_rgctx_invoke)
9061 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9063 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9064 } else if (needs_static_rgctx_invoke) {
9065 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9067 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9069 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9073 inline_costs += 10 * num_calls++;
9076 case CEE_LDVIRTFTN: {
9081 n = read32 (ip + 2);
9082 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9085 mono_class_init (cmethod->klass);
9087 if (cfg->generic_sharing_context)
9088 context_used = mono_method_check_context_used (cmethod);
9090 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9091 if (check_linkdemand (cfg, method, cmethod))
9093 CHECK_CFG_EXCEPTION;
9094 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9095 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9102 args [1] = emit_get_rgctx_method (cfg, context_used,
9103 cmethod, MONO_RGCTX_INFO_METHOD);
9104 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9106 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9107 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9111 inline_costs += 10 * num_calls++;
9115 CHECK_STACK_OVF (1);
9117 n = read16 (ip + 2);
9119 EMIT_NEW_ARGLOAD (cfg, ins, n);
9124 CHECK_STACK_OVF (1);
9126 n = read16 (ip + 2);
9128 NEW_ARGLOADA (cfg, ins, n);
9129 MONO_ADD_INS (cfg->cbb, ins);
9137 n = read16 (ip + 2);
9139 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9141 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9145 CHECK_STACK_OVF (1);
9147 n = read16 (ip + 2);
9149 EMIT_NEW_LOCLOAD (cfg, ins, n);
9154 unsigned char *tmp_ip;
9155 CHECK_STACK_OVF (1);
9157 n = read16 (ip + 2);
9160 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9166 EMIT_NEW_LOCLOADA (cfg, ins, n);
9175 n = read16 (ip + 2);
9177 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9179 emit_stloc_ir (cfg, sp, header, n);
9186 if (sp != stack_start)
9188 if (cfg->method != method)
9190 * Inlining this into a loop in a parent could lead to
9191 * stack overflows which is different behavior than the
9192 * non-inlined case, thus disable inlining in this case.
9194 goto inline_failure;
9196 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9197 ins->dreg = alloc_preg (cfg);
9198 ins->sreg1 = sp [0]->dreg;
9199 ins->type = STACK_PTR;
9200 MONO_ADD_INS (cfg->cbb, ins);
9202 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9203 if (header->init_locals)
9204 ins->flags |= MONO_INST_INIT;
9209 case CEE_ENDFILTER: {
9210 MonoExceptionClause *clause, *nearest;
9211 int cc, nearest_num;
9215 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9217 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9218 ins->sreg1 = (*sp)->dreg;
9219 MONO_ADD_INS (bblock, ins);
9220 start_new_bblock = 1;
9225 for (cc = 0; cc < header->num_clauses; ++cc) {
9226 clause = &header->clauses [cc];
9227 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9228 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9229 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9235 if ((ip - header->code) != nearest->handler_offset)
9240 case CEE_UNALIGNED_:
9241 ins_flag |= MONO_INST_UNALIGNED;
9242 /* FIXME: record alignment? we can assume 1 for now */
9247 ins_flag |= MONO_INST_VOLATILE;
9251 ins_flag |= MONO_INST_TAILCALL;
9252 cfg->flags |= MONO_CFG_HAS_TAIL;
9253 /* Can't inline tail calls at this time */
9254 inline_costs += 100000;
9261 token = read32 (ip + 2);
9262 klass = mini_get_class (method, token, generic_context);
9263 CHECK_TYPELOAD (klass);
9264 if (generic_class_is_reference_type (cfg, klass))
9265 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9267 mini_emit_initobj (cfg, *sp, NULL, klass);
9271 case CEE_CONSTRAINED_:
9273 token = read32 (ip + 2);
9274 constrained_call = mono_class_get_full (image, token, generic_context);
9275 CHECK_TYPELOAD (constrained_call);
9280 MonoInst *iargs [3];
9284 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9285 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9286 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9287 /* emit_memset only works when val == 0 */
9288 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9293 if (ip [1] == CEE_CPBLK) {
9294 MonoMethod *memcpy_method = get_memcpy_method ();
9295 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9297 MonoMethod *memset_method = get_memset_method ();
9298 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9308 ins_flag |= MONO_INST_NOTYPECHECK;
9310 ins_flag |= MONO_INST_NORANGECHECK;
9311 /* we ignore the no-nullcheck for now since we
9312 * really do it explicitly only when doing callvirt->call
9318 int handler_offset = -1;
9320 for (i = 0; i < header->num_clauses; ++i) {
9321 MonoExceptionClause *clause = &header->clauses [i];
9322 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9323 handler_offset = clause->handler_offset;
9328 bblock->flags |= BB_EXCEPTION_UNSAFE;
9330 g_assert (handler_offset != -1);
9332 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9333 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9334 ins->sreg1 = load->dreg;
9335 MONO_ADD_INS (bblock, ins);
9337 link_bblock (cfg, bblock, end_bblock);
9338 start_new_bblock = 1;
9346 CHECK_STACK_OVF (1);
9348 token = read32 (ip + 2);
9349 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9350 MonoType *type = mono_type_create_from_typespec (image, token);
9351 token = mono_type_size (type, &ialign);
9353 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9354 CHECK_TYPELOAD (klass);
9355 mono_class_init (klass);
9356 token = mono_class_value_size (klass, &align);
9358 EMIT_NEW_ICONST (cfg, ins, token);
9363 case CEE_REFANYTYPE: {
9364 MonoInst *src_var, *src;
9370 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9372 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9373 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9384 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9389 g_error ("opcode 0x%02x not handled", *ip);
9392 if (start_new_bblock != 1)
9395 bblock->cil_length = ip - bblock->cil_code;
9396 bblock->next_bb = end_bblock;
9398 if (cfg->method == method && cfg->domainvar) {
9400 MonoInst *get_domain;
9402 cfg->cbb = init_localsbb;
9404 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9405 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9408 get_domain->dreg = alloc_preg (cfg);
9409 MONO_ADD_INS (cfg->cbb, get_domain);
9411 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9412 MONO_ADD_INS (cfg->cbb, store);
9415 if (cfg->method == method && cfg->got_var)
9416 mono_emit_load_got_addr (cfg);
9418 if (header->init_locals) {
9421 cfg->cbb = init_localsbb;
9423 for (i = 0; i < header->num_locals; ++i) {
9424 MonoType *ptype = header->locals [i];
9425 int t = ptype->type;
9426 dreg = cfg->locals [i]->dreg;
9428 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9429 t = ptype->data.klass->enum_basetype->type;
9431 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9432 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9433 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9434 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9435 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9436 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9437 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9438 ins->type = STACK_R8;
9439 ins->inst_p0 = (void*)&r8_0;
9440 ins->dreg = alloc_dreg (cfg, STACK_R8);
9441 MONO_ADD_INS (init_localsbb, ins);
9442 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9443 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9444 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9445 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9447 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9454 if (cfg->method == method) {
9456 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9457 bb->region = mono_find_block_region (cfg, bb->real_offset);
9459 mono_create_spvar_for_region (cfg, bb->region);
9460 if (cfg->verbose_level > 2)
9461 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9465 g_slist_free (class_inits);
9466 dont_inline = g_list_remove (dont_inline, method);
9468 if (inline_costs < 0) {
9471 /* Method is too large */
9472 mname = mono_method_full_name (method, TRUE);
9473 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9474 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9479 if ((cfg->verbose_level > 2) && (cfg->method == method))
9480 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9482 return inline_costs;
9485 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9486 g_slist_free (class_inits);
9487 dont_inline = g_list_remove (dont_inline, method);
9491 g_slist_free (class_inits);
9492 dont_inline = g_list_remove (dont_inline, method);
9496 g_slist_free (class_inits);
9497 dont_inline = g_list_remove (dont_inline, method);
9498 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9502 g_slist_free (class_inits);
9503 dont_inline = g_list_remove (dont_inline, method);
9504 set_exception_type_from_invalid_il (cfg, method, ip);
9509 store_membase_reg_to_store_membase_imm (int opcode)
9512 case OP_STORE_MEMBASE_REG:
9513 return OP_STORE_MEMBASE_IMM;
9514 case OP_STOREI1_MEMBASE_REG:
9515 return OP_STOREI1_MEMBASE_IMM;
9516 case OP_STOREI2_MEMBASE_REG:
9517 return OP_STOREI2_MEMBASE_IMM;
9518 case OP_STOREI4_MEMBASE_REG:
9519 return OP_STOREI4_MEMBASE_IMM;
9520 case OP_STOREI8_MEMBASE_REG:
9521 return OP_STOREI8_MEMBASE_IMM;
9523 g_assert_not_reached ();
9529 #endif /* DISABLE_JIT */
9532 mono_op_to_op_imm (int opcode)
9542 return OP_IDIV_UN_IMM;
9546 return OP_IREM_UN_IMM;
9560 return OP_ISHR_UN_IMM;
9577 return OP_LSHR_UN_IMM;
9580 return OP_COMPARE_IMM;
9582 return OP_ICOMPARE_IMM;
9584 return OP_LCOMPARE_IMM;
9586 case OP_STORE_MEMBASE_REG:
9587 return OP_STORE_MEMBASE_IMM;
9588 case OP_STOREI1_MEMBASE_REG:
9589 return OP_STOREI1_MEMBASE_IMM;
9590 case OP_STOREI2_MEMBASE_REG:
9591 return OP_STOREI2_MEMBASE_IMM;
9592 case OP_STOREI4_MEMBASE_REG:
9593 return OP_STOREI4_MEMBASE_IMM;
9595 #if defined(__i386__) || defined (__x86_64__)
9597 return OP_X86_PUSH_IMM;
9598 case OP_X86_COMPARE_MEMBASE_REG:
9599 return OP_X86_COMPARE_MEMBASE_IMM;
9601 #if defined(__x86_64__)
9602 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9603 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9605 case OP_VOIDCALL_REG:
9614 return OP_LOCALLOC_IMM;
9621 ldind_to_load_membase (int opcode)
9625 return OP_LOADI1_MEMBASE;
9627 return OP_LOADU1_MEMBASE;
9629 return OP_LOADI2_MEMBASE;
9631 return OP_LOADU2_MEMBASE;
9633 return OP_LOADI4_MEMBASE;
9635 return OP_LOADU4_MEMBASE;
9637 return OP_LOAD_MEMBASE;
9639 return OP_LOAD_MEMBASE;
9641 return OP_LOADI8_MEMBASE;
9643 return OP_LOADR4_MEMBASE;
9645 return OP_LOADR8_MEMBASE;
9647 g_assert_not_reached ();
9654 stind_to_store_membase (int opcode)
9658 return OP_STOREI1_MEMBASE_REG;
9660 return OP_STOREI2_MEMBASE_REG;
9662 return OP_STOREI4_MEMBASE_REG;
9665 return OP_STORE_MEMBASE_REG;
9667 return OP_STOREI8_MEMBASE_REG;
9669 return OP_STORER4_MEMBASE_REG;
9671 return OP_STORER8_MEMBASE_REG;
9673 g_assert_not_reached ();
9680 mono_load_membase_to_load_mem (int opcode)
9682 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9683 #if defined(__i386__) || defined(__x86_64__)
9685 case OP_LOAD_MEMBASE:
9687 case OP_LOADU1_MEMBASE:
9688 return OP_LOADU1_MEM;
9689 case OP_LOADU2_MEMBASE:
9690 return OP_LOADU2_MEM;
9691 case OP_LOADI4_MEMBASE:
9692 return OP_LOADI4_MEM;
9693 case OP_LOADU4_MEMBASE:
9694 return OP_LOADU4_MEM;
9695 #if SIZEOF_REGISTER == 8
9696 case OP_LOADI8_MEMBASE:
9697 return OP_LOADI8_MEM;
9706 op_to_op_dest_membase (int store_opcode, int opcode)
9708 #if defined(__i386__)
9709 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9714 return OP_X86_ADD_MEMBASE_REG;
9716 return OP_X86_SUB_MEMBASE_REG;
9718 return OP_X86_AND_MEMBASE_REG;
9720 return OP_X86_OR_MEMBASE_REG;
9722 return OP_X86_XOR_MEMBASE_REG;
9725 return OP_X86_ADD_MEMBASE_IMM;
9728 return OP_X86_SUB_MEMBASE_IMM;
9731 return OP_X86_AND_MEMBASE_IMM;
9734 return OP_X86_OR_MEMBASE_IMM;
9737 return OP_X86_XOR_MEMBASE_IMM;
9743 #if defined(__x86_64__)
9744 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9749 return OP_X86_ADD_MEMBASE_REG;
9751 return OP_X86_SUB_MEMBASE_REG;
9753 return OP_X86_AND_MEMBASE_REG;
9755 return OP_X86_OR_MEMBASE_REG;
9757 return OP_X86_XOR_MEMBASE_REG;
9759 return OP_X86_ADD_MEMBASE_IMM;
9761 return OP_X86_SUB_MEMBASE_IMM;
9763 return OP_X86_AND_MEMBASE_IMM;
9765 return OP_X86_OR_MEMBASE_IMM;
9767 return OP_X86_XOR_MEMBASE_IMM;
9769 return OP_AMD64_ADD_MEMBASE_REG;
9771 return OP_AMD64_SUB_MEMBASE_REG;
9773 return OP_AMD64_AND_MEMBASE_REG;
9775 return OP_AMD64_OR_MEMBASE_REG;
9777 return OP_AMD64_XOR_MEMBASE_REG;
9780 return OP_AMD64_ADD_MEMBASE_IMM;
9783 return OP_AMD64_SUB_MEMBASE_IMM;
9786 return OP_AMD64_AND_MEMBASE_IMM;
9789 return OP_AMD64_OR_MEMBASE_IMM;
9792 return OP_AMD64_XOR_MEMBASE_IMM;
9802 op_to_op_store_membase (int store_opcode, int opcode)
9804 #if defined(__i386__) || defined(__x86_64__)
9807 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9808 return OP_X86_SETEQ_MEMBASE;
9810 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9811 return OP_X86_SETNE_MEMBASE;
9819 op_to_op_src1_membase (int load_opcode, int opcode)
9822 /* FIXME: This has sign extension issues */
9824 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9825 return OP_X86_COMPARE_MEMBASE8_IMM;
9828 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9833 return OP_X86_PUSH_MEMBASE;
9834 case OP_COMPARE_IMM:
9835 case OP_ICOMPARE_IMM:
9836 return OP_X86_COMPARE_MEMBASE_IMM;
9839 return OP_X86_COMPARE_MEMBASE_REG;
9844 /* FIXME: This has sign extension issues */
9846 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9847 return OP_X86_COMPARE_MEMBASE8_IMM;
9852 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9853 return OP_X86_PUSH_MEMBASE;
9855 /* FIXME: This only works for 32 bit immediates
9856 case OP_COMPARE_IMM:
9857 case OP_LCOMPARE_IMM:
9858 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9859 return OP_AMD64_COMPARE_MEMBASE_IMM;
9861 case OP_ICOMPARE_IMM:
9862 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9863 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9867 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9868 return OP_AMD64_COMPARE_MEMBASE_REG;
9871 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9872 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9881 op_to_op_src2_membase (int load_opcode, int opcode)
9884 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9890 return OP_X86_COMPARE_REG_MEMBASE;
9892 return OP_X86_ADD_REG_MEMBASE;
9894 return OP_X86_SUB_REG_MEMBASE;
9896 return OP_X86_AND_REG_MEMBASE;
9898 return OP_X86_OR_REG_MEMBASE;
9900 return OP_X86_XOR_REG_MEMBASE;
9907 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9908 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9912 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9913 return OP_AMD64_COMPARE_REG_MEMBASE;
9916 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9917 return OP_X86_ADD_REG_MEMBASE;
9919 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9920 return OP_X86_SUB_REG_MEMBASE;
9922 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9923 return OP_X86_AND_REG_MEMBASE;
9925 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9926 return OP_X86_OR_REG_MEMBASE;
9928 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9929 return OP_X86_XOR_REG_MEMBASE;
9931 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9932 return OP_AMD64_ADD_REG_MEMBASE;
9934 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9935 return OP_AMD64_SUB_REG_MEMBASE;
9937 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9938 return OP_AMD64_AND_REG_MEMBASE;
9940 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9941 return OP_AMD64_OR_REG_MEMBASE;
9943 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9944 return OP_AMD64_XOR_REG_MEMBASE;
9952 mono_op_to_op_imm_noemul (int opcode)
9955 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9960 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9968 return mono_op_to_op_imm (opcode);
9975 * mono_handle_global_vregs:
9977 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9981 mono_handle_global_vregs (MonoCompile *cfg)
9987 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9989 #ifdef MONO_ARCH_SIMD_INTRINSICS
9990 if (cfg->uses_simd_intrinsics)
9991 mono_simd_simplify_indirection (cfg);
9994 /* Find local vregs used in more than one bb */
9995 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9996 MonoInst *ins = bb->code;
9997 int block_num = bb->block_num;
9999 if (cfg->verbose_level > 2)
10000 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10003 for (; ins; ins = ins->next) {
10004 const char *spec = INS_INFO (ins->opcode);
10005 int regtype, regindex;
10008 if (G_UNLIKELY (cfg->verbose_level > 2))
10009 mono_print_ins (ins);
10011 g_assert (ins->opcode >= MONO_CEE_LAST);
10013 for (regindex = 0; regindex < 3; regindex ++) {
10016 if (regindex == 0) {
10017 regtype = spec [MONO_INST_DEST];
10018 if (regtype == ' ')
10021 } else if (regindex == 1) {
10022 regtype = spec [MONO_INST_SRC1];
10023 if (regtype == ' ')
10027 regtype = spec [MONO_INST_SRC2];
10028 if (regtype == ' ')
10033 #if SIZEOF_REGISTER == 4
10034 if (regtype == 'l') {
10036 * Since some instructions reference the original long vreg,
10037 * and some reference the two component vregs, it is quite hard
10038 * to determine when it needs to be global. So be conservative.
10040 if (!get_vreg_to_inst (cfg, vreg)) {
10041 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10043 if (cfg->verbose_level > 2)
10044 printf ("LONG VREG R%d made global.\n", vreg);
10048 * Make the component vregs volatile since the optimizations can
10049 * get confused otherwise.
10051 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10052 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10056 g_assert (vreg != -1);
10058 prev_bb = vreg_to_bb [vreg];
10059 if (prev_bb == 0) {
10060 /* 0 is a valid block num */
10061 vreg_to_bb [vreg] = block_num + 1;
10062 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10063 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10066 if (!get_vreg_to_inst (cfg, vreg)) {
10067 if (G_UNLIKELY (cfg->verbose_level > 2))
10068 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10072 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10075 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10078 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10081 g_assert_not_reached ();
10085 /* Flag as having been used in more than one bb */
10086 vreg_to_bb [vreg] = -1;
10092 /* If a variable is used in only one bblock, convert it into a local vreg */
10093 for (i = 0; i < cfg->num_varinfo; i++) {
10094 MonoInst *var = cfg->varinfo [i];
10095 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10097 switch (var->type) {
10103 #if SIZEOF_REGISTER == 8
10106 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10107 /* Enabling this screws up the fp stack on x86 */
10110 /* Arguments are implicitly global */
10111 /* Putting R4 vars into registers doesn't work currently */
10112 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10114 * Make that the variable's liveness interval doesn't contain a call, since
10115 * that would cause the lvreg to be spilled, making the whole optimization
10118 /* This is too slow for JIT compilation */
10120 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10122 int def_index, call_index, ins_index;
10123 gboolean spilled = FALSE;
10128 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10129 const char *spec = INS_INFO (ins->opcode);
10131 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10132 def_index = ins_index;
10134 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10135 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10136 if (call_index > def_index) {
10142 if (MONO_IS_CALL (ins))
10143 call_index = ins_index;
10153 if (G_UNLIKELY (cfg->verbose_level > 2))
10154 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10155 var->flags |= MONO_INST_IS_DEAD;
10156 cfg->vreg_to_inst [var->dreg] = NULL;
10163 * Compress the varinfo and vars tables so the liveness computation is faster and
10164 * takes up less space.
10167 for (i = 0; i < cfg->num_varinfo; ++i) {
10168 MonoInst *var = cfg->varinfo [i];
10169 if (pos < i && cfg->locals_start == i)
10170 cfg->locals_start = pos;
10171 if (!(var->flags & MONO_INST_IS_DEAD)) {
10173 cfg->varinfo [pos] = cfg->varinfo [i];
10174 cfg->varinfo [pos]->inst_c0 = pos;
10175 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10176 cfg->vars [pos].idx = pos;
10177 #if SIZEOF_REGISTER == 4
10178 if (cfg->varinfo [pos]->type == STACK_I8) {
10179 /* Modify the two component vars too */
10182 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10183 var1->inst_c0 = pos;
10184 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10185 var1->inst_c0 = pos;
10192 cfg->num_varinfo = pos;
10193 if (cfg->locals_start > cfg->num_varinfo)
10194 cfg->locals_start = cfg->num_varinfo;
10198 * mono_spill_global_vars:
10200 * Generate spill code for variables which are not allocated to registers,
10201 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10202 * code is generated which could be optimized by the local optimization passes.
10205 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10207 MonoBasicBlock *bb;
10209 int orig_next_vreg;
10210 guint32 *vreg_to_lvreg;
10212 guint32 i, lvregs_len;
10213 gboolean dest_has_lvreg = FALSE;
10214 guint32 stacktypes [128];
10216 *need_local_opts = FALSE;
10218 memset (spec2, 0, sizeof (spec2));
10220 /* FIXME: Move this function to mini.c */
10221 stacktypes ['i'] = STACK_PTR;
10222 stacktypes ['l'] = STACK_I8;
10223 stacktypes ['f'] = STACK_R8;
10224 #ifdef MONO_ARCH_SIMD_INTRINSICS
10225 stacktypes ['x'] = STACK_VTYPE;
10228 #if SIZEOF_REGISTER == 4
10229 /* Create MonoInsts for longs */
10230 for (i = 0; i < cfg->num_varinfo; i++) {
10231 MonoInst *ins = cfg->varinfo [i];
10233 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10234 switch (ins->type) {
10235 #ifdef MONO_ARCH_SOFT_FLOAT
10241 g_assert (ins->opcode == OP_REGOFFSET);
10243 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10245 tree->opcode = OP_REGOFFSET;
10246 tree->inst_basereg = ins->inst_basereg;
10247 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10249 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10251 tree->opcode = OP_REGOFFSET;
10252 tree->inst_basereg = ins->inst_basereg;
10253 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10263 /* FIXME: widening and truncation */
10266 * As an optimization, when a variable allocated to the stack is first loaded into
10267 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10268 * the variable again.
10270 orig_next_vreg = cfg->next_vreg;
10271 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10272 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10275 /* Add spill loads/stores */
10276 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10279 if (cfg->verbose_level > 2)
10280 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10282 /* Clear vreg_to_lvreg array */
10283 for (i = 0; i < lvregs_len; i++)
10284 vreg_to_lvreg [lvregs [i]] = 0;
10288 MONO_BB_FOR_EACH_INS (bb, ins) {
10289 const char *spec = INS_INFO (ins->opcode);
10290 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10291 gboolean store, no_lvreg;
10293 if (G_UNLIKELY (cfg->verbose_level > 2))
10294 mono_print_ins (ins);
10296 if (ins->opcode == OP_NOP)
10300 * We handle LDADDR here as well, since it can only be decomposed
10301 * when variable addresses are known.
10303 if (ins->opcode == OP_LDADDR) {
10304 MonoInst *var = ins->inst_p0;
10306 if (var->opcode == OP_VTARG_ADDR) {
10307 /* Happens on SPARC/S390 where vtypes are passed by reference */
10308 MonoInst *vtaddr = var->inst_left;
10309 if (vtaddr->opcode == OP_REGVAR) {
10310 ins->opcode = OP_MOVE;
10311 ins->sreg1 = vtaddr->dreg;
10313 else if (var->inst_left->opcode == OP_REGOFFSET) {
10314 ins->opcode = OP_LOAD_MEMBASE;
10315 ins->inst_basereg = vtaddr->inst_basereg;
10316 ins->inst_offset = vtaddr->inst_offset;
10320 g_assert (var->opcode == OP_REGOFFSET);
10322 ins->opcode = OP_ADD_IMM;
10323 ins->sreg1 = var->inst_basereg;
10324 ins->inst_imm = var->inst_offset;
10327 *need_local_opts = TRUE;
10328 spec = INS_INFO (ins->opcode);
10331 if (ins->opcode < MONO_CEE_LAST) {
10332 mono_print_ins (ins);
10333 g_assert_not_reached ();
10337 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10341 if (MONO_IS_STORE_MEMBASE (ins)) {
10342 tmp_reg = ins->dreg;
10343 ins->dreg = ins->sreg2;
10344 ins->sreg2 = tmp_reg;
10347 spec2 [MONO_INST_DEST] = ' ';
10348 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10349 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10351 } else if (MONO_IS_STORE_MEMINDEX (ins))
10352 g_assert_not_reached ();
10357 if (G_UNLIKELY (cfg->verbose_level > 2))
10358 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10363 regtype = spec [MONO_INST_DEST];
10364 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10367 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10368 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10369 MonoInst *store_ins;
10372 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10374 if (var->opcode == OP_REGVAR) {
10375 ins->dreg = var->dreg;
10376 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10378 * Instead of emitting a load+store, use a _membase opcode.
10380 g_assert (var->opcode == OP_REGOFFSET);
10381 if (ins->opcode == OP_MOVE) {
10384 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10385 ins->inst_basereg = var->inst_basereg;
10386 ins->inst_offset = var->inst_offset;
10389 spec = INS_INFO (ins->opcode);
10393 g_assert (var->opcode == OP_REGOFFSET);
10395 prev_dreg = ins->dreg;
10397 /* Invalidate any previous lvreg for this vreg */
10398 vreg_to_lvreg [ins->dreg] = 0;
10402 #ifdef MONO_ARCH_SOFT_FLOAT
10403 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10405 store_opcode = OP_STOREI8_MEMBASE_REG;
10409 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10411 if (regtype == 'l') {
10412 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10413 mono_bblock_insert_after_ins (bb, ins, store_ins);
10414 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10415 mono_bblock_insert_after_ins (bb, ins, store_ins);
10418 g_assert (store_opcode != OP_STOREV_MEMBASE);
10420 /* Try to fuse the store into the instruction itself */
10421 /* FIXME: Add more instructions */
10422 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10423 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10424 ins->inst_imm = ins->inst_c0;
10425 ins->inst_destbasereg = var->inst_basereg;
10426 ins->inst_offset = var->inst_offset;
10427 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10428 ins->opcode = store_opcode;
10429 ins->inst_destbasereg = var->inst_basereg;
10430 ins->inst_offset = var->inst_offset;
10434 tmp_reg = ins->dreg;
10435 ins->dreg = ins->sreg2;
10436 ins->sreg2 = tmp_reg;
10439 spec2 [MONO_INST_DEST] = ' ';
10440 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10441 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10443 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10444 // FIXME: The backends expect the base reg to be in inst_basereg
10445 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10447 ins->inst_basereg = var->inst_basereg;
10448 ins->inst_offset = var->inst_offset;
10449 spec = INS_INFO (ins->opcode);
10451 /* printf ("INS: "); mono_print_ins (ins); */
10452 /* Create a store instruction */
10453 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10455 /* Insert it after the instruction */
10456 mono_bblock_insert_after_ins (bb, ins, store_ins);
10459 * We can't assign ins->dreg to var->dreg here, since the
10460 * sregs could use it. So set a flag, and do it after
10463 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10464 dest_has_lvreg = TRUE;
10473 for (srcindex = 0; srcindex < 2; ++srcindex) {
10474 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10475 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10477 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10478 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10479 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10480 MonoInst *load_ins;
10481 guint32 load_opcode;
10483 if (var->opcode == OP_REGVAR) {
10485 ins->sreg1 = var->dreg;
10487 ins->sreg2 = var->dreg;
10491 g_assert (var->opcode == OP_REGOFFSET);
10493 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10495 g_assert (load_opcode != OP_LOADV_MEMBASE);
10497 if (vreg_to_lvreg [sreg]) {
10498 /* The variable is already loaded to an lvreg */
10499 if (G_UNLIKELY (cfg->verbose_level > 2))
10500 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10502 ins->sreg1 = vreg_to_lvreg [sreg];
10504 ins->sreg2 = vreg_to_lvreg [sreg];
10508 /* Try to fuse the load into the instruction */
10509 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10510 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10511 ins->inst_basereg = var->inst_basereg;
10512 ins->inst_offset = var->inst_offset;
10513 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10514 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10515 ins->sreg2 = var->inst_basereg;
10516 ins->inst_offset = var->inst_offset;
10518 if (MONO_IS_REAL_MOVE (ins)) {
10519 ins->opcode = OP_NOP;
10522 //printf ("%d ", srcindex); mono_print_ins (ins);
10524 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10526 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10527 if (var->dreg == prev_dreg) {
10529 * sreg refers to the value loaded by the load
10530 * emitted below, but we need to use ins->dreg
10531 * since it refers to the store emitted earlier.
10535 vreg_to_lvreg [var->dreg] = sreg;
10536 g_assert (lvregs_len < 1024);
10537 lvregs [lvregs_len ++] = var->dreg;
10546 if (regtype == 'l') {
10547 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10548 mono_bblock_insert_before_ins (bb, ins, load_ins);
10549 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10550 mono_bblock_insert_before_ins (bb, ins, load_ins);
10553 #if SIZEOF_REGISTER == 4
10554 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10556 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10557 mono_bblock_insert_before_ins (bb, ins, load_ins);
10563 if (dest_has_lvreg) {
10564 vreg_to_lvreg [prev_dreg] = ins->dreg;
10565 g_assert (lvregs_len < 1024);
10566 lvregs [lvregs_len ++] = prev_dreg;
10567 dest_has_lvreg = FALSE;
10571 tmp_reg = ins->dreg;
10572 ins->dreg = ins->sreg2;
10573 ins->sreg2 = tmp_reg;
10576 if (MONO_IS_CALL (ins)) {
10577 /* Clear vreg_to_lvreg array */
10578 for (i = 0; i < lvregs_len; i++)
10579 vreg_to_lvreg [lvregs [i]] = 0;
10583 if (cfg->verbose_level > 2)
10584 mono_print_ins_index (1, ins);
10591 * - use 'iadd' instead of 'int_add'
10592 * - handling ovf opcodes: decompose in method_to_ir.
10593 * - unify iregs/fregs
10594 * -> partly done, the missing parts are:
10595 * - a more complete unification would involve unifying the hregs as well, so
10596 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10597 * would no longer map to the machine hregs, so the code generators would need to
10598 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10599 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10600 * fp/non-fp branches speeds it up by about 15%.
10601 * - use sext/zext opcodes instead of shifts
10603 * - get rid of TEMPLOADs if possible and use vregs instead
10604 * - clean up usage of OP_P/OP_ opcodes
10605 * - cleanup usage of DUMMY_USE
10606 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10608 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10609 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10610 * - make sure handle_stack_args () is called before the branch is emitted
10611 * - when the new IR is done, get rid of all unused stuff
10612 * - COMPARE/BEQ as separate instructions or unify them ?
10613 * - keeping them separate allows specialized compare instructions like
10614 * compare_imm, compare_membase
10615 * - most back ends unify fp compare+branch, fp compare+ceq
10616 * - integrate mono_save_args into inline_method
10617 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10618 * - handle long shift opts on 32 bit platforms somehow: they require
10619 * 3 sregs (2 for arg1 and 1 for arg2)
10620 * - make byref a 'normal' type.
10621 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10622 * variable if needed.
10623 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10624 * like inline_method.
10625 * - remove inlining restrictions
10626 * - fix LNEG and enable cfold of INEG
10627 * - generalize x86 optimizations like ldelema as a peephole optimization
10628 * - add store_mem_imm for amd64
10629 * - optimize the loading of the interruption flag in the managed->native wrappers
10630 * - avoid special handling of OP_NOP in passes
10631 * - move code inserting instructions into one function/macro.
10632 * - try a coalescing phase after liveness analysis
10633 * - add float -> vreg conversion + local optimizations on !x86
10634 * - figure out how to handle decomposed branches during optimizations, ie.
10635 * compare+branch, op_jump_table+op_br etc.
10636 * - promote RuntimeXHandles to vregs
10637 * - vtype cleanups:
10638 * - add a NEW_VARLOADA_VREG macro
10639 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10640 * accessing vtype fields.
10641 * - get rid of I8CONST on 64 bit platforms
10642 * - dealing with the increase in code size due to branches created during opcode
10644 * - use extended basic blocks
10645 * - all parts of the JIT
10646 * - handle_global_vregs () && local regalloc
10647 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10648 * - sources of increase in code size:
10651 * - isinst and castclass
10652 * - lvregs not allocated to global registers even if used multiple times
10653 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10655 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10656 * - add all micro optimizations from the old JIT
10657 * - put tree optimizations into the deadce pass
10658 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10659 * specific function.
10660 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10661 * fcompare + branchCC.
10662 * - create a helper function for allocating a stack slot, taking into account
10663 * MONO_CFG_HAS_SPILLUP.
10665 * - merge the ia64 switch changes.
10666 * - optimize mono_regstate2_alloc_int/float.
10667 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10668 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10669 * parts of the tree could be separated by other instructions, killing the tree
10670 * arguments, or stores killing loads etc. Also, should we fold loads into other
10671 * instructions if the result of the load is used multiple times ?
10672 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10673 * - LAST MERGE: 108395.
10674 * - when returning vtypes in registers, generate IR and append it to the end of the
10675 * last bb instead of doing it in the epilog.
10676 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10684 - When to decompose opcodes:
10685 - earlier: this makes some optimizations hard to implement, since the low level IR
10686 no longer contains the neccessary information. But it is easier to do.
10687 - later: harder to implement, enables more optimizations.
10688 - Branches inside bblocks:
10689 - created when decomposing complex opcodes.
10690 - branches to another bblock: harmless, but not tracked by the branch
10691 optimizations, so need to branch to a label at the start of the bblock.
10692 - branches to inside the same bblock: very problematic, trips up the local
10693 reg allocator. Can be fixed by spitting the current bblock, but that is a
10694 complex operation, since some local vregs can become global vregs etc.
10695 - Local/global vregs:
10696 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10697 local register allocator.
10698 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10699 structure, created by mono_create_var (). Assigned to hregs or the stack by
10700 the global register allocator.
10701 - When to do optimizations like alu->alu_imm:
10702 - earlier -> saves work later on since the IR will be smaller/simpler
10703 - later -> can work on more instructions
10704 - Handling of valuetypes:
10705 - When a vtype is pushed on the stack, a new temporary is created, an
10706 instruction computing its address (LDADDR) is emitted and pushed on
10707 the stack. Need to optimize cases when the vtype is used immediately as in
10708 argument passing, stloc etc.
10709 - Instead of the to_end stuff in the old JIT, simply call the function handling
10710 the values on the stack before emitting the last instruction of the bb.
10713 #endif /* DISABLE_JIT */