2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
26 #ifdef HAVE_VALGRIND_MEMCHECK_H
27 #include <valgrind/memcheck.h>
30 #include <mono/metadata/assembly.h>
31 #include <mono/metadata/loader.h>
32 #include <mono/metadata/tabledefs.h>
33 #include <mono/metadata/class.h>
34 #include <mono/metadata/object.h>
35 #include <mono/metadata/exception.h>
36 #include <mono/metadata/opcodes.h>
37 #include <mono/metadata/mono-endian.h>
38 #include <mono/metadata/tokentype.h>
39 #include <mono/metadata/tabledefs.h>
40 #include <mono/metadata/marshal.h>
41 #include <mono/metadata/debug-helpers.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internal.h>
44 #include <mono/metadata/security-manager.h>
45 #include <mono/metadata/threads-types.h>
46 #include <mono/metadata/security-core-clr.h>
47 #include <mono/metadata/monitor.h>
48 #include <mono/utils/mono-compiler.h>
56 #include "jit-icalls.h"
60 #define BRANCH_COST 100
61 #define INLINE_LENGTH_LIMIT 20
62 #define INLINE_FAILURE do {\
63 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
66 #define CHECK_CFG_EXCEPTION do {\
67 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
70 #define METHOD_ACCESS_FAILURE do { \
71 char *method_fname = mono_method_full_name (method, TRUE); \
72 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
73 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
74 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
75 g_free (method_fname); \
76 g_free (cil_method_fname); \
77 goto exception_exit; \
79 #define FIELD_ACCESS_FAILURE do { \
80 char *method_fname = mono_method_full_name (method, TRUE); \
81 char *field_fname = mono_field_full_name (field); \
82 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
83 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
84 g_free (method_fname); \
85 g_free (field_fname); \
86 goto exception_exit; \
88 #define GENERIC_SHARING_FAILURE(opcode) do { \
89 if (cfg->generic_sharing_context) { \
90 if (cfg->verbose_level > 2) \
91 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
92 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
93 goto exception_exit; \
97 /* Determine whenever 'ins' represents a load of the 'this' argument */
98 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
100 static int ldind_to_load_membase (int opcode);
101 static int stind_to_store_membase (int opcode);
103 int mono_op_to_op_imm (int opcode);
104 int mono_op_to_op_imm_noemul (int opcode);
106 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
107 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
108 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
110 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
111 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
112 guint inline_offset, gboolean is_virtual_call);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
122 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
133 #if SIZEOF_VOID_P == 8
138 /* keep in sync with the enum in mini.h */
141 #include "mini-ops.h"
145 extern GHashTable *jit_icall_name_hash;
147 #define MONO_INIT_VARINFO(vi,id) do { \
148 (vi)->range.first_use.pos.bid = 0xffff; \
154 mono_alloc_ireg (MonoCompile *cfg)
156 return alloc_ireg (cfg);
160 mono_alloc_freg (MonoCompile *cfg)
162 return alloc_freg (cfg);
166 mono_alloc_preg (MonoCompile *cfg)
168 return alloc_preg (cfg);
172 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
174 return alloc_dreg (cfg, stack_type);
178 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
184 switch (type->type) {
187 case MONO_TYPE_BOOLEAN:
199 case MONO_TYPE_FNPTR:
201 case MONO_TYPE_CLASS:
202 case MONO_TYPE_STRING:
203 case MONO_TYPE_OBJECT:
204 case MONO_TYPE_SZARRAY:
205 case MONO_TYPE_ARRAY:
209 #if SIZEOF_VOID_P == 8
218 case MONO_TYPE_VALUETYPE:
219 if (type->data.klass->enumtype) {
220 type = type->data.klass->enum_basetype;
223 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
226 case MONO_TYPE_TYPEDBYREF:
228 case MONO_TYPE_GENERICINST:
229 type = &type->data.generic_class->container_class->byval_arg;
233 g_assert (cfg->generic_sharing_context);
236 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
242 mono_print_bb (MonoBasicBlock *bb, const char *msg)
247 printf ("\n%s %d: [IN: ", msg, bb->block_num);
248 for (i = 0; i < bb->in_count; ++i)
249 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
251 for (i = 0; i < bb->out_count; ++i)
252 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
254 for (tree = bb->code; tree; tree = tree->next)
255 mono_print_ins_index (-1, tree);
259 * Can't put this at the beginning, since other files reference stuff from this
264 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
266 #define GET_BBLOCK(cfg,tblock,ip) do { \
267 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
269 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
270 NEW_BBLOCK (cfg, (tblock)); \
271 (tblock)->cil_code = (ip); \
272 ADD_BBLOCK (cfg, (tblock)); \
276 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
277 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
278 int _length_reg = alloc_ireg (cfg); \
279 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
280 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
281 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
285 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
286 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
287 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
290 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
291 ins->sreg1 = array_reg; \
292 ins->sreg2 = index_reg; \
293 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
294 MONO_ADD_INS ((cfg)->cbb, ins); \
295 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
296 (cfg)->cbb->has_array_access = TRUE; \
300 #if defined(__i386__) || defined(__x86_64__)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_VOID_P == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
350 mono_decompose_opcode (cfg, ins); \
353 #define ADD_BINCOND(next_block) do { \
356 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
357 cmp->sreg1 = sp [0]->dreg; \
358 cmp->sreg2 = sp [1]->dreg; \
359 type_from_op (cmp, sp [0], sp [1]); \
361 type_from_op (ins, sp [0], sp [1]); \
362 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
363 GET_BBLOCK (cfg, tblock, target); \
364 link_bblock (cfg, bblock, tblock); \
365 ins->inst_true_bb = tblock; \
366 if ((next_block)) { \
367 link_bblock (cfg, bblock, (next_block)); \
368 ins->inst_false_bb = (next_block); \
369 start_new_bblock = 1; \
371 GET_BBLOCK (cfg, tblock, ip); \
372 link_bblock (cfg, bblock, tblock); \
373 ins->inst_false_bb = tblock; \
374 start_new_bblock = 2; \
376 if (sp != stack_start) { \
377 handle_stack_args (cfg, stack_start, sp - stack_start); \
378 CHECK_UNVERIFIABLE (cfg); \
380 MONO_ADD_INS (bblock, cmp); \
381 MONO_ADD_INS (bblock, ins); \
385 * link_bblock: Links two basic blocks
387 * links two basic blocks in the control flow graph, the 'from'
388 * argument is the starting block and the 'to' argument is the block
389 * the control flow ends to after 'from'.
392 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
394 MonoBasicBlock **newa;
398 if (from->cil_code) {
400 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
402 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
405 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
407 printf ("edge from entry to exit\n");
412 for (i = 0; i < from->out_count; ++i) {
413 if (to == from->out_bb [i]) {
419 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
420 for (i = 0; i < from->out_count; ++i) {
421 newa [i] = from->out_bb [i];
429 for (i = 0; i < to->in_count; ++i) {
430 if (from == to->in_bb [i]) {
436 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
437 for (i = 0; i < to->in_count; ++i) {
438 newa [i] = to->in_bb [i];
447 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 link_bblock (cfg, from, to);
453 * mono_find_block_region:
455 * We mark each basic block with a region ID. We use that to avoid BB
456 * optimizations when blocks are in different regions.
459 * A region token that encodes where this region is, and information
460 * about the clause owner for this block.
462 * The region encodes the try/catch/filter clause that owns this block
463 * as well as the type. -1 is a special value that represents a block
464 * that is in none of try/catch/filter.
467 mono_find_block_region (MonoCompile *cfg, int offset)
469 MonoMethod *method = cfg->method;
470 MonoMethodHeader *header = mono_method_get_header (method);
471 MonoExceptionClause *clause;
474 /* first search for handlers and filters */
475 for (i = 0; i < header->num_clauses; ++i) {
476 clause = &header->clauses [i];
477 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
478 (offset < (clause->handler_offset)))
479 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
481 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
482 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
483 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
484 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
485 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
487 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
491 /* search the try blocks */
492 for (i = 0; i < header->num_clauses; ++i) {
493 clause = &header->clauses [i];
494 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
495 return ((i + 1) << 8) | clause->flags;
502 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
504 MonoMethod *method = cfg->method;
505 MonoMethodHeader *header = mono_method_get_header (method);
506 MonoExceptionClause *clause;
507 MonoBasicBlock *handler;
511 for (i = 0; i < header->num_clauses; ++i) {
512 clause = &header->clauses [i];
513 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
514 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
515 if (clause->flags == type) {
516 handler = cfg->cil_offset_to_bb [clause->handler_offset];
518 res = g_list_append (res, handler);
526 mono_create_spvar_for_region (MonoCompile *cfg, int region)
530 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
534 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
535 /* prevent it from being register allocated */
536 var->flags |= MONO_INST_INDIRECT;
538 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
542 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
544 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
548 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
552 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
556 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
557 /* prevent it from being register allocated */
558 var->flags |= MONO_INST_INDIRECT;
560 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
566 * Returns the type used in the eval stack when @type is loaded.
567 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
570 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
574 inst->klass = klass = mono_class_from_mono_type (type);
576 inst->type = STACK_MP;
581 switch (type->type) {
583 inst->type = STACK_INV;
587 case MONO_TYPE_BOOLEAN:
593 inst->type = STACK_I4;
598 case MONO_TYPE_FNPTR:
599 inst->type = STACK_PTR;
601 case MONO_TYPE_CLASS:
602 case MONO_TYPE_STRING:
603 case MONO_TYPE_OBJECT:
604 case MONO_TYPE_SZARRAY:
605 case MONO_TYPE_ARRAY:
606 inst->type = STACK_OBJ;
610 inst->type = STACK_I8;
614 inst->type = STACK_R8;
616 case MONO_TYPE_VALUETYPE:
617 if (type->data.klass->enumtype) {
618 type = type->data.klass->enum_basetype;
622 inst->type = STACK_VTYPE;
625 case MONO_TYPE_TYPEDBYREF:
626 inst->klass = mono_defaults.typed_reference_class;
627 inst->type = STACK_VTYPE;
629 case MONO_TYPE_GENERICINST:
630 type = &type->data.generic_class->container_class->byval_arg;
633 case MONO_TYPE_MVAR :
634 /* FIXME: all the arguments must be references for now,
635 * later look inside cfg and see if the arg num is
638 g_assert (cfg->generic_sharing_context);
639 inst->type = STACK_OBJ;
642 g_error ("unknown type 0x%02x in eval stack type", type->type);
647 * The following tables are used to quickly validate the IL code in type_from_op ().
650 bin_num_table [STACK_MAX] [STACK_MAX] = {
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
653 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
654 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
655 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
656 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
663 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
666 /* reduce the size of this table */
668 bin_int_table [STACK_MAX] [STACK_MAX] = {
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
676 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
680 bin_comp_table [STACK_MAX] [STACK_MAX] = {
681 /* Inv i L p F & O vt */
683 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
684 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
685 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
686 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
687 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
688 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
689 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
692 /* reduce the size of this table */
694 shift_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
706 * Tables to map from the non-specific opcode to the matching
707 * type-specific opcode.
709 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
711 binops_op_map [STACK_MAX] = {
712 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
715 /* handles from CEE_NEG to CEE_CONV_U8 */
717 unops_op_map [STACK_MAX] = {
718 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
721 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
723 ovfops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
727 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
729 ovf2ops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
733 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
735 ovf3ops_op_map [STACK_MAX] = {
736 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
739 /* handles from CEE_BEQ to CEE_BLT_UN */
741 beqops_op_map [STACK_MAX] = {
742 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
745 /* handles from CEE_CEQ to CEE_CLT_UN */
747 ceqops_op_map [STACK_MAX] = {
748 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
752 * Sets ins->type (the type on the eval stack) according to the
753 * type of the opcode and the arguments to it.
754 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
756 * FIXME: this function sets ins->type unconditionally in some cases, but
757 * it should set it to invalid for some types (a conv.x on an object)
760 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
762 switch (ins->opcode) {
769 /* FIXME: check unverifiable args for STACK_MP */
770 ins->type = bin_num_table [src1->type] [src2->type];
771 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_int_table [src1->type] [src2->type];
779 ins->opcode += binops_op_map [ins->type];
784 ins->type = shift_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
790 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
791 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE;
793 else if (src1->type == STACK_R8)
794 ins->opcode = OP_FCOMPARE;
796 ins->opcode = OP_ICOMPARE;
798 case OP_ICOMPARE_IMM:
799 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
800 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
801 ins->opcode = OP_LCOMPARE_IMM;
813 ins->opcode += beqops_op_map [src1->type];
816 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
823 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
824 ins->opcode += ceqops_op_map [src1->type];
828 ins->type = neg_table [src1->type];
829 ins->opcode += unops_op_map [ins->type];
832 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
833 ins->type = src1->type;
835 ins->type = STACK_INV;
836 ins->opcode += unops_op_map [ins->type];
842 ins->type = STACK_I4;
843 ins->opcode += unops_op_map [src1->type];
846 ins->type = STACK_R8;
847 switch (src1->type) {
850 ins->opcode = OP_ICONV_TO_R_UN;
853 ins->opcode = OP_LCONV_TO_R_UN;
857 case CEE_CONV_OVF_I1:
858 case CEE_CONV_OVF_U1:
859 case CEE_CONV_OVF_I2:
860 case CEE_CONV_OVF_U2:
861 case CEE_CONV_OVF_I4:
862 case CEE_CONV_OVF_U4:
863 ins->type = STACK_I4;
864 ins->opcode += ovf3ops_op_map [src1->type];
866 case CEE_CONV_OVF_I_UN:
867 case CEE_CONV_OVF_U_UN:
868 ins->type = STACK_PTR;
869 ins->opcode += ovf2ops_op_map [src1->type];
871 case CEE_CONV_OVF_I1_UN:
872 case CEE_CONV_OVF_I2_UN:
873 case CEE_CONV_OVF_I4_UN:
874 case CEE_CONV_OVF_U1_UN:
875 case CEE_CONV_OVF_U2_UN:
876 case CEE_CONV_OVF_U4_UN:
877 ins->type = STACK_I4;
878 ins->opcode += ovf2ops_op_map [src1->type];
881 ins->type = STACK_PTR;
882 switch (src1->type) {
884 ins->opcode = OP_ICONV_TO_U;
888 #if SIZEOF_VOID_P == 8
889 ins->opcode = OP_LCONV_TO_U;
891 ins->opcode = OP_MOVE;
895 ins->opcode = OP_LCONV_TO_U;
898 ins->opcode = OP_FCONV_TO_U;
904 ins->type = STACK_I8;
905 ins->opcode += unops_op_map [src1->type];
907 case CEE_CONV_OVF_I8:
908 case CEE_CONV_OVF_U8:
909 ins->type = STACK_I8;
910 ins->opcode += ovf3ops_op_map [src1->type];
912 case CEE_CONV_OVF_U8_UN:
913 case CEE_CONV_OVF_I8_UN:
914 ins->type = STACK_I8;
915 ins->opcode += ovf2ops_op_map [src1->type];
919 ins->type = STACK_R8;
920 ins->opcode += unops_op_map [src1->type];
923 ins->type = STACK_R8;
927 ins->type = STACK_I4;
928 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = STACK_PTR;
934 ins->opcode += ovfops_op_map [src1->type];
942 ins->type = bin_num_table [src1->type] [src2->type];
943 ins->opcode += ovfops_op_map [src1->type];
944 if (ins->type == STACK_R8)
945 ins->type = STACK_INV;
947 case OP_LOAD_MEMBASE:
948 ins->type = STACK_PTR;
950 case OP_LOADI1_MEMBASE:
951 case OP_LOADU1_MEMBASE:
952 case OP_LOADI2_MEMBASE:
953 case OP_LOADU2_MEMBASE:
954 case OP_LOADI4_MEMBASE:
955 case OP_LOADU4_MEMBASE:
956 ins->type = STACK_PTR;
958 case OP_LOADI8_MEMBASE:
959 ins->type = STACK_I8;
961 case OP_LOADR4_MEMBASE:
962 case OP_LOADR8_MEMBASE:
963 ins->type = STACK_R8;
966 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
970 if (ins->type == STACK_MP)
971 ins->klass = mono_defaults.object_class;
976 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
982 param_table [STACK_MAX] [STACK_MAX] = {
987 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
991 switch (args->type) {
1001 for (i = 0; i < sig->param_count; ++i) {
1002 switch (args [i].type) {
1006 if (!sig->params [i]->byref)
1010 if (sig->params [i]->byref)
1012 switch (sig->params [i]->type) {
1013 case MONO_TYPE_CLASS:
1014 case MONO_TYPE_STRING:
1015 case MONO_TYPE_OBJECT:
1016 case MONO_TYPE_SZARRAY:
1017 case MONO_TYPE_ARRAY:
1024 if (sig->params [i]->byref)
1026 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1035 /*if (!param_table [args [i].type] [sig->params [i]->type])
1043 * When we need a pointer to the current domain many times in a method, we
1044 * call mono_domain_get() once and we store the result in a local variable.
1045 * This function returns the variable that represents the MonoDomain*.
1047 inline static MonoInst *
1048 mono_get_domainvar (MonoCompile *cfg)
1050 if (!cfg->domainvar)
1051 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1052 return cfg->domainvar;
1056 * The got_var contains the address of the Global Offset Table when AOT
1059 inline static MonoInst *
1060 mono_get_got_var (MonoCompile *cfg)
1062 #ifdef MONO_ARCH_NEED_GOT_VAR
1063 if (!cfg->compile_aot)
1065 if (!cfg->got_var) {
1066 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 return cfg->got_var;
1075 mono_get_vtable_var (MonoCompile *cfg)
1077 g_assert (cfg->generic_sharing_context);
1079 if (!cfg->rgctx_var) {
1080 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1081 /* force the var to be stack allocated */
1082 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1085 return cfg->rgctx_var;
1089 type_from_stack_type (MonoInst *ins) {
1090 switch (ins->type) {
1091 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1092 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1093 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1094 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1096 return &ins->klass->this_arg;
1097 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1098 case STACK_VTYPE: return &ins->klass->byval_arg;
1100 g_error ("stack type %d to monotype not handled\n", ins->type);
1105 static G_GNUC_UNUSED int
1106 type_to_stack_type (MonoType *t)
1108 switch (mono_type_get_underlying_type (t)->type) {
1111 case MONO_TYPE_BOOLEAN:
1114 case MONO_TYPE_CHAR:
1121 case MONO_TYPE_FNPTR:
1123 case MONO_TYPE_CLASS:
1124 case MONO_TYPE_STRING:
1125 case MONO_TYPE_OBJECT:
1126 case MONO_TYPE_SZARRAY:
1127 case MONO_TYPE_ARRAY:
1135 case MONO_TYPE_VALUETYPE:
1136 case MONO_TYPE_TYPEDBYREF:
1138 case MONO_TYPE_GENERICINST:
1139 if (mono_type_generic_inst_is_valuetype (t))
1145 g_assert_not_reached ();
1152 array_access_to_klass (int opcode)
1156 return mono_defaults.byte_class;
1158 return mono_defaults.uint16_class;
1161 return mono_defaults.int_class;
1164 return mono_defaults.sbyte_class;
1167 return mono_defaults.int16_class;
1170 return mono_defaults.int32_class;
1172 return mono_defaults.uint32_class;
1175 return mono_defaults.int64_class;
1178 return mono_defaults.single_class;
1181 return mono_defaults.double_class;
1182 case CEE_LDELEM_REF:
1183 case CEE_STELEM_REF:
1184 return mono_defaults.object_class;
1186 g_assert_not_reached ();
1192 * We try to share variables when possible
1195 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1200 /* inlining can result in deeper stacks */
1201 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1202 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 pos = ins->type - 1 + slot * STACK_MAX;
1206 switch (ins->type) {
1213 if ((vnum = cfg->intvars [pos]))
1214 return cfg->varinfo [vnum];
1215 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 cfg->intvars [pos] = res->inst_c0;
1219 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1225 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1228 * Don't use this if a generic_context is set, since that means AOT can't
1229 * look up the method using just the image+token.
1230 * table == 0 means this is a reference made from a wrapper.
1232 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1233 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1234 jump_info_token->image = image;
1235 jump_info_token->token = token;
1236 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1241 * This function is called to handle items that are left on the evaluation stack
1242 * at basic block boundaries. What happens is that we save the values to local variables
1243 * and we reload them later when first entering the target basic block (with the
1244 * handle_loaded_temps () function).
1245 * A single joint point will use the same variables (stored in the array bb->out_stack or
1246 * bb->in_stack, if the basic block is before or after the joint point).
1248 * This function needs to be called _before_ emitting the last instruction of
1249 * the bb (i.e. before emitting a branch).
1250 * If the stack merge fails at a join point, cfg->unverifiable is set.
1253 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1256 MonoBasicBlock *bb = cfg->cbb;
1257 MonoBasicBlock *outb;
1258 MonoInst *inst, **locals;
1263 if (cfg->verbose_level > 3)
1264 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1265 if (!bb->out_scount) {
1266 bb->out_scount = count;
1267 //printf ("bblock %d has out:", bb->block_num);
1269 for (i = 0; i < bb->out_count; ++i) {
1270 outb = bb->out_bb [i];
1271 /* exception handlers are linked, but they should not be considered for stack args */
1272 if (outb->flags & BB_EXCEPTION_HANDLER)
1274 //printf (" %d", outb->block_num);
1275 if (outb->in_stack) {
1277 bb->out_stack = outb->in_stack;
1283 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1284 for (i = 0; i < count; ++i) {
1286 * try to reuse temps already allocated for this purpouse, if they occupy the same
1287 * stack slot and if they are of the same type.
1288 * This won't cause conflicts since if 'local' is used to
1289 * store one of the values in the in_stack of a bblock, then
1290 * the same variable will be used for the same outgoing stack
1292 * This doesn't work when inlining methods, since the bblocks
1293 * in the inlined methods do not inherit their in_stack from
1294 * the bblock they are inlined to. See bug #58863 for an
1297 if (cfg->inlined_method)
1298 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1300 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1305 for (i = 0; i < bb->out_count; ++i) {
1306 outb = bb->out_bb [i];
1307 /* exception handlers are linked, but they should not be considered for stack args */
1308 if (outb->flags & BB_EXCEPTION_HANDLER)
1310 if (outb->in_scount) {
1311 if (outb->in_scount != bb->out_scount) {
1312 cfg->unverifiable = TRUE;
1315 continue; /* check they are the same locals */
1317 outb->in_scount = count;
1318 outb->in_stack = bb->out_stack;
1321 locals = bb->out_stack;
1323 for (i = 0; i < count; ++i) {
1324 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1325 inst->cil_code = sp [i]->cil_code;
1326 sp [i] = locals [i];
1327 if (cfg->verbose_level > 3)
1328 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1332 * It is possible that the out bblocks already have in_stack assigned, and
1333 * the in_stacks differ. In this case, we will store to all the different
1340 /* Find a bblock which has a different in_stack */
1342 while (bindex < bb->out_count) {
1343 outb = bb->out_bb [bindex];
1344 /* exception handlers are linked, but they should not be considered for stack args */
1345 if (outb->flags & BB_EXCEPTION_HANDLER) {
1349 if (outb->in_stack != locals) {
1350 for (i = 0; i < count; ++i) {
1351 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1352 inst->cil_code = sp [i]->cil_code;
1353 sp [i] = locals [i];
1354 if (cfg->verbose_level > 3)
1355 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1357 locals = outb->in_stack;
1366 /* Emit code which loads interface_offsets [klass->interface_id]
1367 * The array is stored in memory before vtable.
1370 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1372 if (cfg->compile_aot) {
1373 int ioffset_reg = alloc_preg (cfg);
1374 int iid_reg = alloc_preg (cfg);
1376 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1377 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1386 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1387 * stored in "klass_reg" implements the interface "klass".
1390 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1392 int ibitmap_reg = alloc_preg (cfg);
1393 int ibitmap_byte_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1397 if (cfg->compile_aot) {
1398 int iid_reg = alloc_preg (cfg);
1399 int shifted_iid_reg = alloc_preg (cfg);
1400 int ibitmap_byte_address_reg = alloc_preg (cfg);
1401 int masked_iid_reg = alloc_preg (cfg);
1402 int iid_one_bit_reg = alloc_preg (cfg);
1403 int iid_bit_reg = alloc_preg (cfg);
1404 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1406 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1409 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1410 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1411 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1419 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1420 * stored in "vtable_reg" implements the interface "klass".
1423 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1425 int ibitmap_reg = alloc_preg (cfg);
1426 int ibitmap_byte_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1430 if (cfg->compile_aot) {
1431 int iid_reg = alloc_preg (cfg);
1432 int shifted_iid_reg = alloc_preg (cfg);
1433 int ibitmap_byte_address_reg = alloc_preg (cfg);
1434 int masked_iid_reg = alloc_preg (cfg);
1435 int iid_one_bit_reg = alloc_preg (cfg);
1436 int iid_bit_reg = alloc_preg (cfg);
1437 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1439 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1442 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1443 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1444 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1446 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1452 * Emit code which checks whenever the interface id of @klass is smaller than
1453 * than the value given by max_iid_reg.
1456 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1457 MonoBasicBlock *false_target)
1459 if (cfg->compile_aot) {
1460 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1469 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1472 /* Same as above, but obtains max_iid from a vtable */
1474 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1475 MonoBasicBlock *false_target)
1477 int max_iid_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1480 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1483 /* Same as above, but obtains max_iid from a klass */
1485 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1486 MonoBasicBlock *false_target)
1488 int max_iid_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1491 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1495 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1497 int idepth_reg = alloc_preg (cfg);
1498 int stypes_reg = alloc_preg (cfg);
1499 int stype = alloc_preg (cfg);
1501 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1506 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1508 if (cfg->compile_aot) {
1509 int const_reg = alloc_preg (cfg);
1510 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1519 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1521 int intf_reg = alloc_preg (cfg);
1523 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1524 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1529 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1533 * Variant of the above that takes a register to the class, not the vtable.
1536 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1538 int intf_bit_reg = alloc_preg (cfg);
1540 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1541 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1546 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1550 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1552 if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1559 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1563 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1565 if (cfg->compile_aot) {
1566 int const_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1568 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1576 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1579 int rank_reg = alloc_preg (cfg);
1580 int eclass_reg = alloc_preg (cfg);
1582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1584 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1585 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1587 if (klass->cast_class == mono_defaults.object_class) {
1588 int parent_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1590 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1591 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1592 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1593 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1594 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1595 } else if (klass->cast_class == mono_defaults.enum_class) {
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1598 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1600 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1601 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1604 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1605 /* Check that the object is a vector too */
1606 int bounds_reg = alloc_preg (cfg);
1607 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1609 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1612 int idepth_reg = alloc_preg (cfg);
1613 int stypes_reg = alloc_preg (cfg);
1614 int stype = alloc_preg (cfg);
1616 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1617 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1619 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1623 mini_emit_class_check (cfg, stype, klass);
1628 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1632 g_assert (val == 0);
1637 if ((size <= 4) && (size <= align)) {
1640 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1643 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1646 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1648 #if SIZEOF_VOID_P == 8
1650 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1656 val_reg = alloc_preg (cfg);
1658 if (sizeof (gpointer) == 8)
1659 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1661 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1664 /* This could be optimized further if neccesary */
1666 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1673 #if !NO_UNALIGNED_ACCESS
1674 if (sizeof (gpointer) == 8) {
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1705 #endif /* DISABLE_JIT */
1708 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1716 /* This could be optimized further if neccesary */
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1727 #if !NO_UNALIGNED_ACCESS
1728 if (sizeof (gpointer) == 8) {
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1757 cur_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1769 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1772 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1775 type = mini_get_basic_type_from_generic (gsctx, type);
1776 switch (type->type) {
1777 case MONO_TYPE_VOID:
1778 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1781 case MONO_TYPE_BOOLEAN:
1784 case MONO_TYPE_CHAR:
1787 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1791 case MONO_TYPE_FNPTR:
1792 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1793 case MONO_TYPE_CLASS:
1794 case MONO_TYPE_STRING:
1795 case MONO_TYPE_OBJECT:
1796 case MONO_TYPE_SZARRAY:
1797 case MONO_TYPE_ARRAY:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1801 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1804 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1805 case MONO_TYPE_VALUETYPE:
1806 if (type->data.klass->enumtype) {
1807 type = type->data.klass->enum_basetype;
1810 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1811 case MONO_TYPE_TYPEDBYREF:
1812 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1813 case MONO_TYPE_GENERICINST:
1814 type = &type->data.generic_class->container_class->byval_arg;
1817 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1823 * target_type_is_incompatible:
1824 * @cfg: MonoCompile context
1826 * Check that the item @arg on the evaluation stack can be stored
1827 * in the target type (can be a local, or field, etc).
1828 * The cfg arg can be used to check if we need verification or just
1831 * Returns: non-0 value if arg can't be stored on a target.
1834 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1836 MonoType *simple_type;
1839 if (target->byref) {
1840 /* FIXME: check that the pointed to types match */
1841 if (arg->type == STACK_MP)
1842 return arg->klass != mono_class_from_mono_type (target);
1843 if (arg->type == STACK_PTR)
1848 simple_type = mono_type_get_underlying_type (target);
1849 switch (simple_type->type) {
1850 case MONO_TYPE_VOID:
1854 case MONO_TYPE_BOOLEAN:
1857 case MONO_TYPE_CHAR:
1860 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1864 /* STACK_MP is needed when setting pinned locals */
1865 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1870 case MONO_TYPE_FNPTR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1874 case MONO_TYPE_CLASS:
1875 case MONO_TYPE_STRING:
1876 case MONO_TYPE_OBJECT:
1877 case MONO_TYPE_SZARRAY:
1878 case MONO_TYPE_ARRAY:
1879 if (arg->type != STACK_OBJ)
1881 /* FIXME: check type compatibility */
1885 if (arg->type != STACK_I8)
1890 if (arg->type != STACK_R8)
1893 case MONO_TYPE_VALUETYPE:
1894 if (arg->type != STACK_VTYPE)
1896 klass = mono_class_from_mono_type (simple_type);
1897 if (klass != arg->klass)
1900 case MONO_TYPE_TYPEDBYREF:
1901 if (arg->type != STACK_VTYPE)
1903 klass = mono_class_from_mono_type (simple_type);
1904 if (klass != arg->klass)
1907 case MONO_TYPE_GENERICINST:
1908 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1909 if (arg->type != STACK_VTYPE)
1911 klass = mono_class_from_mono_type (simple_type);
1912 if (klass != arg->klass)
1916 if (arg->type != STACK_OBJ)
1918 /* FIXME: check type compatibility */
1922 case MONO_TYPE_MVAR:
1923 /* FIXME: all the arguments must be references for now,
1924 * later look inside cfg and see if the arg num is
1925 * really a reference
1927 g_assert (cfg->generic_sharing_context);
1928 if (arg->type != STACK_OBJ)
1932 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1938 * Prepare arguments for passing to a function call.
1939 * Return a non-zero value if the arguments can't be passed to the given
1941 * The type checks are not yet complete and some conversions may need
1942 * casts on 32 or 64 bit architectures.
1944 * FIXME: implement this using target_type_is_incompatible ()
1947 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1949 MonoType *simple_type;
1953 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1957 for (i = 0; i < sig->param_count; ++i) {
1958 if (sig->params [i]->byref) {
1959 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1963 simple_type = sig->params [i];
1964 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1966 switch (simple_type->type) {
1967 case MONO_TYPE_VOID:
1972 case MONO_TYPE_BOOLEAN:
1975 case MONO_TYPE_CHAR:
1978 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1984 case MONO_TYPE_FNPTR:
1985 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1988 case MONO_TYPE_CLASS:
1989 case MONO_TYPE_STRING:
1990 case MONO_TYPE_OBJECT:
1991 case MONO_TYPE_SZARRAY:
1992 case MONO_TYPE_ARRAY:
1993 if (args [i]->type != STACK_OBJ)
1998 if (args [i]->type != STACK_I8)
2003 if (args [i]->type != STACK_R8)
2006 case MONO_TYPE_VALUETYPE:
2007 if (simple_type->data.klass->enumtype) {
2008 simple_type = simple_type->data.klass->enum_basetype;
2011 if (args [i]->type != STACK_VTYPE)
2014 case MONO_TYPE_TYPEDBYREF:
2015 if (args [i]->type != STACK_VTYPE)
2018 case MONO_TYPE_GENERICINST:
2019 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2023 g_error ("unknown type 0x%02x in check_call_signature",
2031 callvirt_to_call (int opcode)
2036 case OP_VOIDCALLVIRT:
2045 g_assert_not_reached ();
2052 callvirt_to_call_membase (int opcode)
2056 return OP_CALL_MEMBASE;
2057 case OP_VOIDCALLVIRT:
2058 return OP_VOIDCALL_MEMBASE;
2060 return OP_FCALL_MEMBASE;
2062 return OP_LCALL_MEMBASE;
2064 return OP_VCALL_MEMBASE;
2066 g_assert_not_reached ();
2072 #ifdef MONO_ARCH_HAVE_IMT
2074 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2076 #ifdef MONO_ARCH_IMT_REG
2077 int method_reg = alloc_preg (cfg);
2080 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2081 } else if (cfg->compile_aot) {
2082 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2085 MONO_INST_NEW (cfg, ins, OP_PCONST);
2086 ins->inst_p0 = call->method;
2087 ins->dreg = method_reg;
2088 MONO_ADD_INS (cfg->cbb, ins);
2091 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2093 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2098 static MonoJumpInfo *
2099 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2101 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2105 ji->data.target = target;
2110 inline static MonoInst*
2111 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2113 inline static MonoCallInst *
2114 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2115 MonoInst **args, int calli, int virtual)
2118 #ifdef MONO_ARCH_SOFT_FLOAT
2122 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2125 call->signature = sig;
2127 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2129 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2130 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2133 temp->backend.is_pinvoke = sig->pinvoke;
2136 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2137 * address of return value to increase optimization opportunities.
2138 * Before vtype decomposition, the dreg of the call ins itself represents the
2139 * fact the call modifies the return value. After decomposition, the call will
2140 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2141 * will be transformed into an LDADDR.
2143 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2144 loada->dreg = alloc_preg (cfg);
2145 loada->inst_p0 = temp;
2146 /* We reference the call too since call->dreg could change during optimization */
2147 loada->inst_p1 = call;
2148 MONO_ADD_INS (cfg->cbb, loada);
2150 call->inst.dreg = temp->dreg;
2152 call->vret_var = loada;
2153 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2154 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2156 #ifdef MONO_ARCH_SOFT_FLOAT
2158 * If the call has a float argument, we would need to do an r8->r4 conversion using
2159 * an icall, but that cannot be done during the call sequence since it would clobber
2160 * the call registers + the stack. So we do it before emitting the call.
2162 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2164 MonoInst *in = call->args [i];
2166 if (i >= sig->hasthis)
2167 t = sig->params [i - sig->hasthis];
2169 t = &mono_defaults.int_class->byval_arg;
2170 t = mono_type_get_underlying_type (t);
2172 if (!t->byref && t->type == MONO_TYPE_R4) {
2173 MonoInst *iargs [1];
2177 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2179 /* The result will be in an int vreg */
2180 call->args [i] = conv;
2185 mono_arch_emit_call (cfg, call);
2187 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2188 cfg->flags |= MONO_CFG_HAS_CALLS;
2193 inline static MonoInst*
2194 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2196 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2198 call->inst.sreg1 = addr->dreg;
2200 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2202 return (MonoInst*)call;
2205 inline static MonoInst*
2206 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2208 #ifdef MONO_ARCH_RGCTX_REG
2213 rgctx_reg = mono_alloc_preg (cfg);
2214 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2216 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2218 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2219 cfg->uses_rgctx_reg = TRUE;
2221 return (MonoInst*)call;
2223 g_assert_not_reached ();
2229 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2230 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2232 gboolean virtual = this != NULL;
2233 gboolean enable_for_aot = TRUE;
2236 if (method->string_ctor) {
2237 /* Create the real signature */
2238 /* FIXME: Cache these */
2239 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_full (cfg->mempool, sig);
2240 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2245 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2247 if (this && sig->hasthis &&
2248 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2249 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2250 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2252 call->method = method;
2254 call->inst.flags |= MONO_INST_HAS_METHOD;
2255 call->inst.inst_left = this;
2258 int vtable_reg, slot_reg, this_reg;
2260 this_reg = this->dreg;
2262 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2263 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2264 /* Make a call to delegate->invoke_impl */
2265 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2266 call->inst.inst_basereg = this_reg;
2267 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2268 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2270 return (MonoInst*)call;
2274 if ((!cfg->compile_aot || enable_for_aot) &&
2275 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2276 (MONO_METHOD_IS_FINAL (method) &&
2277 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2279 * the method is not virtual, we just need to ensure this is not null
2280 * and then we can call the method directly.
2282 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2283 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2286 if (!method->string_ctor) {
2287 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2288 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2289 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2292 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2294 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2296 return (MonoInst*)call;
2299 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2301 * the method is virtual, but we can statically dispatch since either
2302 * it's class or the method itself are sealed.
2303 * But first we need to ensure it's not a null reference.
2305 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2306 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2307 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2309 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2310 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2312 return (MonoInst*)call;
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2317 vtable_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2319 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2321 #ifdef MONO_ARCH_HAVE_IMT
2323 guint32 imt_slot = mono_method_get_imt_slot (method);
2324 emit_imt_argument (cfg, call, imt_arg);
2325 slot_reg = vtable_reg;
2326 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2329 if (slot_reg == -1) {
2330 slot_reg = alloc_preg (cfg);
2331 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2332 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2335 slot_reg = vtable_reg;
2336 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2337 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2339 g_assert (mono_method_signature (method)->generic_param_count);
2340 emit_imt_argument (cfg, call, imt_arg);
2344 call->inst.sreg1 = slot_reg;
2345 call->virtual = TRUE;
2348 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2350 return (MonoInst*)call;
2354 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2355 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2362 #ifdef MONO_ARCH_RGCTX_REG
2363 rgctx_reg = mono_alloc_preg (cfg);
2364 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2369 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2371 call = (MonoCallInst*)ins;
2373 #ifdef MONO_ARCH_RGCTX_REG
2374 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2375 cfg->uses_rgctx_reg = TRUE;
2384 static inline MonoInst*
2385 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2387 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2391 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2398 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2401 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2403 return (MonoInst*)call;
2406 inline static MonoInst*
2407 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2409 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2413 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2417 * mono_emit_abs_call:
2419 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2421 inline static MonoInst*
2422 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2423 MonoMethodSignature *sig, MonoInst **args)
2425 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2429 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2432 if (cfg->abs_patches == NULL)
2433 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2434 g_hash_table_insert (cfg->abs_patches, ji, ji);
2435 ins = mono_emit_native_call (cfg, ji, sig, args);
2436 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2441 get_memcpy_method (void)
2443 static MonoMethod *memcpy_method = NULL;
2444 if (!memcpy_method) {
2445 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2447 g_error ("Old corlib found. Install a new one");
2449 return memcpy_method;
2453 * Emit code to copy a valuetype of type @klass whose address is stored in
2454 * @src->dreg to memory whose address is stored at @dest->dreg.
2457 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2459 MonoInst *iargs [3];
2462 MonoMethod *memcpy_method;
2466 * This check breaks with spilled vars... need to handle it during verification anyway.
2467 * g_assert (klass && klass == src->klass && klass == dest->klass);
2471 n = mono_class_native_size (klass, &align);
2473 n = mono_class_value_size (klass, &align);
2475 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2476 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2477 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2481 EMIT_NEW_ICONST (cfg, iargs [2], n);
2483 memcpy_method = get_memcpy_method ();
2484 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2489 get_memset_method (void)
2491 static MonoMethod *memset_method = NULL;
2492 if (!memset_method) {
2493 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2495 g_error ("Old corlib found. Install a new one");
2497 return memset_method;
2501 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2503 MonoInst *iargs [3];
2506 MonoMethod *memset_method;
2508 /* FIXME: Optimize this for the case when dest is an LDADDR */
2510 mono_class_init (klass);
2511 n = mono_class_value_size (klass, &align);
2513 if (n <= sizeof (gpointer) * 5) {
2514 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2517 memset_method = get_memset_method ();
2519 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2520 EMIT_NEW_ICONST (cfg, iargs [2], n);
2521 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2526 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2528 MonoInst *this = NULL;
2530 g_assert (cfg->generic_sharing_context);
2532 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2533 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2534 !method->klass->valuetype)
2535 EMIT_NEW_ARGLOAD (cfg, this, 0);
2537 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2538 MonoInst *mrgctx_loc, *mrgctx_var;
2541 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2543 mrgctx_loc = mono_get_vtable_var (cfg);
2544 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2547 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2548 MonoInst *vtable_loc, *vtable_var;
2552 vtable_loc = mono_get_vtable_var (cfg);
2553 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2555 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2556 MonoInst *mrgctx_var = vtable_var;
2559 vtable_reg = alloc_preg (cfg);
2560 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2561 vtable_var->type = STACK_PTR;
2567 int vtable_reg, res_reg;
2569 vtable_reg = alloc_preg (cfg);
2570 res_reg = alloc_preg (cfg);
2571 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2576 static MonoJumpInfoRgctxEntry *
2577 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2579 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2580 res->method = method;
2581 res->in_mrgctx = in_mrgctx;
2582 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2583 res->data->type = patch_type;
2584 res->data->data.target = patch_data;
2585 res->info_type = info_type;
2590 static inline MonoInst*
2591 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2593 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2597 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2598 MonoClass *klass, int rgctx_type)
2600 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2601 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2603 return emit_rgctx_fetch (cfg, rgctx, entry);
2607 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2608 MonoMethod *cmethod, int rgctx_type)
2610 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2611 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2613 return emit_rgctx_fetch (cfg, rgctx, entry);
2617 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2618 MonoClassField *field, int rgctx_type)
2620 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2621 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2623 return emit_rgctx_fetch (cfg, rgctx, entry);
2627 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2629 int vtable_reg = alloc_preg (cfg);
2630 int context_used = 0;
2632 if (cfg->generic_sharing_context)
2633 context_used = mono_class_check_context_used (array_class);
2635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2637 if (cfg->opt & MONO_OPT_SHARED) {
2638 int class_reg = alloc_preg (cfg);
2639 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2640 if (cfg->compile_aot) {
2641 int klass_reg = alloc_preg (cfg);
2642 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2643 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2647 } else if (context_used) {
2648 MonoInst *vtable_ins;
2650 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2653 if (cfg->compile_aot) {
2654 int vt_reg = alloc_preg (cfg);
2655 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2656 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2662 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2666 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2668 if (mini_get_debug_options ()->better_cast_details) {
2669 int to_klass_reg = alloc_preg (cfg);
2670 int vtable_reg = alloc_preg (cfg);
2671 int klass_reg = alloc_preg (cfg);
2672 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2675 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2679 MONO_ADD_INS (cfg->cbb, tls_get);
2680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2684 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2690 reset_cast_details (MonoCompile *cfg)
2692 /* Reset the variables holding the cast details */
2693 if (mini_get_debug_options ()->better_cast_details) {
2694 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2696 MONO_ADD_INS (cfg->cbb, tls_get);
2697 /* It is enough to reset the from field */
2698 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2703 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2704 * generic code is generated.
2707 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2709 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2712 MonoInst *rgctx, *addr;
2714 /* FIXME: What if the class is shared? We might not
2715 have to get the address of the method from the
2717 addr = emit_get_rgctx_method (cfg, context_used, method,
2718 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2720 rgctx = emit_get_rgctx (cfg, method, context_used);
2722 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2724 return mono_emit_method_call (cfg, method, &val, NULL);
2729 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2733 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2734 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2735 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2736 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2738 obj_reg = sp [0]->dreg;
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2742 /* FIXME: generics */
2743 g_assert (klass->rank == 0);
2746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2747 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2753 MonoInst *element_class;
2755 /* This assertion is from the unboxcast insn */
2756 g_assert (klass->rank == 0);
2758 element_class = emit_get_rgctx_klass (cfg, context_used,
2759 klass->element_class, MONO_RGCTX_INFO_KLASS);
2761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2764 save_cast_details (cfg, klass->element_class, obj_reg);
2765 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2766 reset_cast_details (cfg);
2769 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2770 MONO_ADD_INS (cfg->cbb, add);
2771 add->type = STACK_MP;
2778 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2780 MonoInst *iargs [2];
2783 if (cfg->opt & MONO_OPT_SHARED) {
2784 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2785 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2787 alloc_ftn = mono_object_new;
2788 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2789 /* This happens often in argument checking code, eg. throw new FooException... */
2790 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2791 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2792 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2794 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2795 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2798 if (managed_alloc) {
2799 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2800 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2802 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2804 guint32 lw = vtable->klass->instance_size;
2805 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2806 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2807 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2810 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2814 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2818 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2821 MonoInst *iargs [2];
2822 MonoMethod *managed_alloc = NULL;
2826 FIXME: we cannot get managed_alloc here because we can't get
2827 the class's vtable (because it's not a closed class)
2829 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2830 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2833 if (cfg->opt & MONO_OPT_SHARED) {
2834 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2835 iargs [1] = data_inst;
2836 alloc_ftn = mono_object_new;
2838 if (managed_alloc) {
2839 iargs [0] = data_inst;
2840 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2843 iargs [0] = data_inst;
2844 alloc_ftn = mono_object_new_specific;
2847 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2851 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2853 MonoInst *alloc, *ins;
2855 if (mono_class_is_nullable (klass)) {
2856 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2857 return mono_emit_method_call (cfg, method, &val, NULL);
2860 alloc = handle_alloc (cfg, klass, TRUE);
2862 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2868 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2870 MonoInst *alloc, *ins;
2872 if (mono_class_is_nullable (klass)) {
2873 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2874 /* FIXME: What if the class is shared? We might not
2875 have to get the method address from the RGCTX. */
2876 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2877 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2878 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2880 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2882 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2884 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2891 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2893 MonoBasicBlock *is_null_bb;
2894 int obj_reg = src->dreg;
2895 int vtable_reg = alloc_preg (cfg);
2897 NEW_BBLOCK (cfg, is_null_bb);
2899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2902 save_cast_details (cfg, klass, obj_reg);
2904 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2906 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2908 int klass_reg = alloc_preg (cfg);
2910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2912 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2913 /* the remoting code is broken, access the class for now */
2915 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2921 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2924 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2928 MONO_START_BB (cfg, is_null_bb);
2930 reset_cast_details (cfg);
2936 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2939 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2940 int obj_reg = src->dreg;
2941 int vtable_reg = alloc_preg (cfg);
2942 int res_reg = alloc_preg (cfg);
2944 NEW_BBLOCK (cfg, is_null_bb);
2945 NEW_BBLOCK (cfg, false_bb);
2946 NEW_BBLOCK (cfg, end_bb);
2948 /* Do the assignment at the beginning, so the other assignment can be if converted */
2949 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2950 ins->type = STACK_OBJ;
2953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2956 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2957 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2958 /* the is_null_bb target simply copies the input register to the output */
2959 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2961 int klass_reg = alloc_preg (cfg);
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2966 int rank_reg = alloc_preg (cfg);
2967 int eclass_reg = alloc_preg (cfg);
2969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2974 if (klass->cast_class == mono_defaults.object_class) {
2975 int parent_reg = alloc_preg (cfg);
2976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2977 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2978 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2980 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2981 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2982 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2983 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2984 } else if (klass->cast_class == mono_defaults.enum_class) {
2985 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2987 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2988 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2990 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2991 /* Check that the object is a vector too */
2992 int bounds_reg = alloc_preg (cfg);
2993 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2995 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2998 /* the is_null_bb target simply copies the input register to the output */
2999 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3001 } else if (mono_class_is_nullable (klass)) {
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3003 /* the is_null_bb target simply copies the input register to the output */
3004 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3006 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3007 /* the remoting code is broken, access the class for now */
3009 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3012 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3015 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3016 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3018 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3019 /* the is_null_bb target simply copies the input register to the output */
3020 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3025 MONO_START_BB (cfg, false_bb);
3027 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
3028 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3030 MONO_START_BB (cfg, is_null_bb);
3032 MONO_START_BB (cfg, end_bb);
3038 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3040 /* This opcode takes as input an object reference and a class, and returns:
3041 0) if the object is an instance of the class,
3042 1) if the object is not instance of the class,
3043 2) if the object is a proxy whose type cannot be determined */
3046 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3047 int obj_reg = src->dreg;
3048 int dreg = alloc_ireg (cfg);
3050 int klass_reg = alloc_preg (cfg);
3052 NEW_BBLOCK (cfg, true_bb);
3053 NEW_BBLOCK (cfg, false_bb);
3054 NEW_BBLOCK (cfg, false2_bb);
3055 NEW_BBLOCK (cfg, end_bb);
3056 NEW_BBLOCK (cfg, no_proxy_bb);
3058 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3061 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3062 NEW_BBLOCK (cfg, interface_fail_bb);
3064 tmp_reg = alloc_preg (cfg);
3065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3066 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3067 MONO_START_BB (cfg, interface_fail_bb);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3070 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3077 tmp_reg = alloc_preg (cfg);
3078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3081 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3082 tmp_reg = alloc_preg (cfg);
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3086 tmp_reg = alloc_preg (cfg);
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3091 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3094 MONO_START_BB (cfg, no_proxy_bb);
3096 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3099 MONO_START_BB (cfg, false_bb);
3101 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3102 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3104 MONO_START_BB (cfg, false2_bb);
3106 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3109 MONO_START_BB (cfg, true_bb);
3111 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3113 MONO_START_BB (cfg, end_bb);
3116 MONO_INST_NEW (cfg, ins, OP_ICONST);
3118 ins->type = STACK_I4;
3124 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3126 /* This opcode takes as input an object reference and a class, and returns:
3127 0) if the object is an instance of the class,
3128 1) if the object is a proxy whose type cannot be determined
3129 an InvalidCastException exception is thrown otherwhise*/
3132 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3133 int obj_reg = src->dreg;
3134 int dreg = alloc_ireg (cfg);
3135 int tmp_reg = alloc_preg (cfg);
3136 int klass_reg = alloc_preg (cfg);
3138 NEW_BBLOCK (cfg, end_bb);
3139 NEW_BBLOCK (cfg, ok_result_bb);
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 NEW_BBLOCK (cfg, interface_fail_bb);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3148 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3149 MONO_START_BB (cfg, interface_fail_bb);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3152 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3154 tmp_reg = alloc_preg (cfg);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3157 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3163 NEW_BBLOCK (cfg, no_proxy_bb);
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3167 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3169 tmp_reg = alloc_preg (cfg);
3170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3171 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3173 tmp_reg = alloc_preg (cfg);
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3178 NEW_BBLOCK (cfg, fail_1_bb);
3180 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3182 MONO_START_BB (cfg, fail_1_bb);
3184 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3187 MONO_START_BB (cfg, no_proxy_bb);
3189 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3192 MONO_START_BB (cfg, ok_result_bb);
3194 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3196 MONO_START_BB (cfg, end_bb);
3199 MONO_INST_NEW (cfg, ins, OP_ICONST);
3201 ins->type = STACK_I4;
3206 static G_GNUC_UNUSED MonoInst*
3207 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3209 gpointer *trampoline;
3210 MonoInst *obj, *method_ins, *tramp_ins;
3214 obj = handle_alloc (cfg, klass, FALSE);
3216 /* Inline the contents of mono_delegate_ctor */
3218 /* Set target field */
3219 /* Optimize away setting of NULL target */
3220 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3221 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3223 /* Set method field */
3224 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3225 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3228 * To avoid looking up the compiled code belonging to the target method
3229 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3230 * store it, and we fill it after the method has been compiled.
3232 if (!cfg->compile_aot && !method->dynamic) {
3233 MonoInst *code_slot_ins;
3235 domain = mono_domain_get ();
3236 mono_domain_lock (domain);
3237 if (!domain_jit_info (domain)->method_code_hash)
3238 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3239 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3241 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3242 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3244 mono_domain_unlock (domain);
3246 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3250 /* Set invoke_impl field */
3251 trampoline = mono_create_delegate_trampoline (klass);
3252 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3255 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3261 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3263 MonoJitICallInfo *info;
3265 /* Need to register the icall so it gets an icall wrapper */
3266 info = mono_get_array_new_va_icall (rank);
3268 cfg->flags |= MONO_CFG_HAS_VARARGS;
3270 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3271 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3275 mono_emit_load_got_addr (MonoCompile *cfg)
3277 MonoInst *getaddr, *dummy_use;
3279 if (!cfg->got_var || cfg->got_var_allocated)
3282 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3283 getaddr->dreg = cfg->got_var->dreg;
3285 /* Add it to the start of the first bblock */
3286 if (cfg->bb_entry->code) {
3287 getaddr->next = cfg->bb_entry->code;
3288 cfg->bb_entry->code = getaddr;
3291 MONO_ADD_INS (cfg->bb_entry, getaddr);
3293 cfg->got_var_allocated = TRUE;
3296 * Add a dummy use to keep the got_var alive, since real uses might
3297 * only be generated by the back ends.
3298 * Add it to end_bblock, so the variable's lifetime covers the whole
3300 * It would be better to make the usage of the got var explicit in all
3301 * cases when the backend needs it (i.e. calls, throw etc.), so this
3302 * wouldn't be needed.
3304 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3305 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3308 static int inline_limit;
3309 static gboolean inline_limit_inited;
3312 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3314 MonoMethodHeader *header = mono_method_get_header (method);
3316 #ifdef MONO_ARCH_SOFT_FLOAT
3317 MonoMethodSignature *sig = mono_method_signature (method);
3321 if (cfg->generic_sharing_context)
3324 #ifdef MONO_ARCH_HAVE_LMF_OPS
3325 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3326 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3327 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3331 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3332 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3333 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3334 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3335 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3336 (method->klass->marshalbyref) ||
3337 !header || header->num_clauses)
3340 /* also consider num_locals? */
3341 /* Do the size check early to avoid creating vtables */
3342 if (!inline_limit_inited) {
3343 if (getenv ("MONO_INLINELIMIT"))
3344 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3346 inline_limit = INLINE_LENGTH_LIMIT;
3347 inline_limit_inited = TRUE;
3349 if (header->code_size >= inline_limit)
3353 * if we can initialize the class of the method right away, we do,
3354 * otherwise we don't allow inlining if the class needs initialization,
3355 * since it would mean inserting a call to mono_runtime_class_init()
3356 * inside the inlined code
3358 if (!(cfg->opt & MONO_OPT_SHARED)) {
3359 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3360 if (cfg->run_cctors && method->klass->has_cctor) {
3361 if (!method->klass->runtime_info)
3362 /* No vtable created yet */
3364 vtable = mono_class_vtable (cfg->domain, method->klass);
3367 /* This makes so that inline cannot trigger */
3368 /* .cctors: too many apps depend on them */
3369 /* running with a specific order... */
3370 if (! vtable->initialized)
3372 mono_runtime_class_init (vtable);
3374 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3375 if (!method->klass->runtime_info)
3376 /* No vtable created yet */
3378 vtable = mono_class_vtable (cfg->domain, method->klass);
3381 if (!vtable->initialized)
3386 * If we're compiling for shared code
3387 * the cctor will need to be run at aot method load time, for example,
3388 * or at the end of the compilation of the inlining method.
3390 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3395 * CAS - do not inline methods with declarative security
3396 * Note: this has to be before any possible return TRUE;
3398 if (mono_method_has_declsec (method))
3401 #ifdef MONO_ARCH_SOFT_FLOAT
3403 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3405 for (i = 0; i < sig->param_count; ++i)
3406 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3414 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3416 if (vtable->initialized && !cfg->compile_aot)
3419 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3422 if (!mono_class_needs_cctor_run (vtable->klass, method))
3425 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3426 /* The initialization is already done before the method is called */
3433 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3437 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3439 mono_class_init (klass);
3440 size = mono_class_array_element_size (klass);
3442 mult_reg = alloc_preg (cfg);
3443 array_reg = arr->dreg;
3444 index_reg = index->dreg;
3446 #if SIZEOF_VOID_P == 8
3447 /* The array reg is 64 bits but the index reg is only 32 */
3448 index2_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3451 index2_reg = index_reg;
3454 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3456 #if defined(__i386__) || defined(__x86_64__)
3457 if (size == 1 || size == 2 || size == 4 || size == 8) {
3458 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3460 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3461 ins->type = STACK_PTR;
3467 add_reg = alloc_preg (cfg);
3469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3470 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3471 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3472 ins->type = STACK_PTR;
3473 MONO_ADD_INS (cfg->cbb, ins);
3478 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3480 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3482 int bounds_reg = alloc_preg (cfg);
3483 int add_reg = alloc_preg (cfg);
3484 int mult_reg = alloc_preg (cfg);
3485 int mult2_reg = alloc_preg (cfg);
3486 int low1_reg = alloc_preg (cfg);
3487 int low2_reg = alloc_preg (cfg);
3488 int high1_reg = alloc_preg (cfg);
3489 int high2_reg = alloc_preg (cfg);
3490 int realidx1_reg = alloc_preg (cfg);
3491 int realidx2_reg = alloc_preg (cfg);
3492 int sum_reg = alloc_preg (cfg);
3497 mono_class_init (klass);
3498 size = mono_class_array_element_size (klass);
3500 index1 = index_ins1->dreg;
3501 index2 = index_ins2->dreg;
3503 /* range checking */
3504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3505 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3508 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3509 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3511 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3512 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3513 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3516 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3517 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3518 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3519 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3520 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3521 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3523 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3524 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3526 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3527 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3529 ins->type = STACK_MP;
3531 MONO_ADD_INS (cfg->cbb, ins);
3538 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3542 MonoMethod *addr_method;
3545 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3548 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3550 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3551 /* emit_ldelema_2 depends on OP_LMUL */
3552 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3553 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3557 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3558 addr_method = mono_marshal_get_array_address (rank, element_size);
3559 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3565 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3567 MonoInst *ins = NULL;
3569 static MonoClass *runtime_helpers_class = NULL;
3570 if (! runtime_helpers_class)
3571 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3572 "System.Runtime.CompilerServices", "RuntimeHelpers");
3574 if (cmethod->klass == mono_defaults.string_class) {
3575 if (strcmp (cmethod->name, "get_Chars") == 0) {
3576 int dreg = alloc_ireg (cfg);
3577 int index_reg = alloc_preg (cfg);
3578 int mult_reg = alloc_preg (cfg);
3579 int add_reg = alloc_preg (cfg);
3581 #if SIZEOF_VOID_P == 8
3582 /* The array reg is 64 bits but the index reg is only 32 */
3583 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3585 index_reg = args [1]->dreg;
3587 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3589 #if defined(__i386__) || defined(__x86_64__)
3590 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3591 add_reg = ins->dreg;
3592 /* Avoid a warning */
3594 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3598 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3599 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3600 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3602 type_from_op (ins, NULL, NULL);
3604 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3605 int dreg = alloc_ireg (cfg);
3606 /* Decompose later to allow more optimizations */
3607 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3608 ins->type = STACK_I4;
3609 cfg->cbb->has_array_access = TRUE;
3610 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3613 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3614 int mult_reg = alloc_preg (cfg);
3615 int add_reg = alloc_preg (cfg);
3617 /* The corlib functions check for oob already. */
3618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3619 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3620 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3623 } else if (cmethod->klass == mono_defaults.object_class) {
3625 if (strcmp (cmethod->name, "GetType") == 0) {
3626 int dreg = alloc_preg (cfg);
3627 int vt_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3629 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3630 type_from_op (ins, NULL, NULL);
3633 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3634 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3635 int dreg = alloc_ireg (cfg);
3636 int t1 = alloc_ireg (cfg);
3638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3639 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3640 ins->type = STACK_I4;
3644 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3645 MONO_INST_NEW (cfg, ins, OP_NOP);
3646 MONO_ADD_INS (cfg->cbb, ins);
3650 } else if (cmethod->klass == mono_defaults.array_class) {
3651 if (cmethod->name [0] != 'g')
3654 if (strcmp (cmethod->name, "get_Rank") == 0) {
3655 int dreg = alloc_ireg (cfg);
3656 int vtable_reg = alloc_preg (cfg);
3657 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3658 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3659 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3660 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3661 type_from_op (ins, NULL, NULL);
3664 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3665 int dreg = alloc_ireg (cfg);
3667 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3668 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3669 type_from_op (ins, NULL, NULL);
3674 } else if (cmethod->klass == runtime_helpers_class) {
3676 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3677 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3681 } else if (cmethod->klass == mono_defaults.thread_class) {
3682 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3683 ins->dreg = alloc_preg (cfg);
3684 ins->type = STACK_OBJ;
3685 MONO_ADD_INS (cfg->cbb, ins);
3687 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3688 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3689 MONO_ADD_INS (cfg->cbb, ins);
3691 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3692 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3693 MONO_ADD_INS (cfg->cbb, ins);
3696 } else if (cmethod->klass == mono_defaults.monitor_class) {
3697 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3698 if (strcmp (cmethod->name, "Enter") == 0) {
3701 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3702 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3703 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3704 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3706 return (MonoInst*)call;
3707 } else if (strcmp (cmethod->name, "Exit") == 0) {
3710 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3711 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3712 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3713 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3715 return (MonoInst*)call;
3717 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3718 MonoMethod *fast_method = NULL;
3720 /* Avoid infinite recursion */
3721 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3722 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3723 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3726 if (strcmp (cmethod->name, "Enter") == 0 ||
3727 strcmp (cmethod->name, "Exit") == 0)
3728 fast_method = mono_monitor_get_fast_path (cmethod);
3732 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3734 } else if (mini_class_is_system_array (cmethod->klass) &&
3735 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3736 MonoInst *addr, *store, *load;
3737 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3739 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3740 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3741 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3743 } else if (cmethod->klass->image == mono_defaults.corlib &&
3744 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3745 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3748 #if SIZEOF_VOID_P == 8
3749 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3750 /* 64 bit reads are already atomic */
3751 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3752 ins->dreg = mono_alloc_preg (cfg);
3753 ins->inst_basereg = args [0]->dreg;
3754 ins->inst_offset = 0;
3755 MONO_ADD_INS (cfg->cbb, ins);
3759 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3760 if (strcmp (cmethod->name, "Increment") == 0) {
3761 MonoInst *ins_iconst;
3764 if (fsig->params [0]->type == MONO_TYPE_I4)
3765 opcode = OP_ATOMIC_ADD_NEW_I4;
3766 #if SIZEOF_VOID_P == 8
3767 else if (fsig->params [0]->type == MONO_TYPE_I8)
3768 opcode = OP_ATOMIC_ADD_NEW_I8;
3771 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3772 ins_iconst->inst_c0 = 1;
3773 ins_iconst->dreg = mono_alloc_ireg (cfg);
3774 MONO_ADD_INS (cfg->cbb, ins_iconst);
3776 MONO_INST_NEW (cfg, ins, opcode);
3777 ins->dreg = mono_alloc_ireg (cfg);
3778 ins->inst_basereg = args [0]->dreg;
3779 ins->inst_offset = 0;
3780 ins->sreg2 = ins_iconst->dreg;
3781 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3782 MONO_ADD_INS (cfg->cbb, ins);
3784 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3785 MonoInst *ins_iconst;
3788 if (fsig->params [0]->type == MONO_TYPE_I4)
3789 opcode = OP_ATOMIC_ADD_NEW_I4;
3790 #if SIZEOF_VOID_P == 8
3791 else if (fsig->params [0]->type == MONO_TYPE_I8)
3792 opcode = OP_ATOMIC_ADD_NEW_I8;
3795 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3796 ins_iconst->inst_c0 = -1;
3797 ins_iconst->dreg = mono_alloc_ireg (cfg);
3798 MONO_ADD_INS (cfg->cbb, ins_iconst);
3800 MONO_INST_NEW (cfg, ins, opcode);
3801 ins->dreg = mono_alloc_ireg (cfg);
3802 ins->inst_basereg = args [0]->dreg;
3803 ins->inst_offset = 0;
3804 ins->sreg2 = ins_iconst->dreg;
3805 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3806 MONO_ADD_INS (cfg->cbb, ins);
3808 } else if (strcmp (cmethod->name, "Add") == 0) {
3811 if (fsig->params [0]->type == MONO_TYPE_I4)
3812 opcode = OP_ATOMIC_ADD_NEW_I4;
3813 #if SIZEOF_VOID_P == 8
3814 else if (fsig->params [0]->type == MONO_TYPE_I8)
3815 opcode = OP_ATOMIC_ADD_NEW_I8;
3819 MONO_INST_NEW (cfg, ins, opcode);
3820 ins->dreg = mono_alloc_ireg (cfg);
3821 ins->inst_basereg = args [0]->dreg;
3822 ins->inst_offset = 0;
3823 ins->sreg2 = args [1]->dreg;
3824 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3825 MONO_ADD_INS (cfg->cbb, ins);
3828 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3830 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3831 if (strcmp (cmethod->name, "Exchange") == 0) {
3834 if (fsig->params [0]->type == MONO_TYPE_I4)
3835 opcode = OP_ATOMIC_EXCHANGE_I4;
3836 #if SIZEOF_VOID_P == 8
3837 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3838 (fsig->params [0]->type == MONO_TYPE_I) ||
3839 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3840 opcode = OP_ATOMIC_EXCHANGE_I8;
3842 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3843 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3844 opcode = OP_ATOMIC_EXCHANGE_I4;
3849 MONO_INST_NEW (cfg, ins, opcode);
3850 ins->dreg = mono_alloc_ireg (cfg);
3851 ins->inst_basereg = args [0]->dreg;
3852 ins->inst_offset = 0;
3853 ins->sreg2 = args [1]->dreg;
3854 MONO_ADD_INS (cfg->cbb, ins);
3856 switch (fsig->params [0]->type) {
3858 ins->type = STACK_I4;
3862 ins->type = STACK_I8;
3864 case MONO_TYPE_OBJECT:
3865 ins->type = STACK_OBJ;
3868 g_assert_not_reached ();
3871 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3873 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3875 * Can't implement CompareExchange methods this way since they have
3876 * three arguments. We can implement one of the common cases, where the new
3877 * value is a constant.
3879 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3880 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3881 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3882 && args [2]->opcode == OP_ICONST) {
3883 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3884 ins->dreg = alloc_ireg (cfg);
3885 ins->sreg1 = args [0]->dreg;
3886 ins->sreg2 = args [1]->dreg;
3887 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3888 ins->type = STACK_I4;
3889 MONO_ADD_INS (cfg->cbb, ins);
3891 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3893 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3897 } else if (cmethod->klass->image == mono_defaults.corlib) {
3898 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3899 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3900 MONO_INST_NEW (cfg, ins, OP_BREAK);
3901 MONO_ADD_INS (cfg->cbb, ins);
3904 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3905 && strcmp (cmethod->klass->name, "Environment") == 0) {
3906 #ifdef PLATFORM_WIN32
3907 EMIT_NEW_ICONST (cfg, ins, 1);
3909 EMIT_NEW_ICONST (cfg, ins, 0);
3913 } else if (cmethod->klass == mono_defaults.math_class) {
3915 * There is general branches code for Min/Max, but it does not work for
3917 * http://everything2.com/?node_id=1051618
3921 #ifdef MONO_ARCH_SIMD_INTRINSICS
3922 if (cfg->opt & MONO_OPT_SIMD) {
3923 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3929 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3933 * This entry point could be used later for arbitrary method
3936 inline static MonoInst*
3937 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3938 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3940 if (method->klass == mono_defaults.string_class) {
3941 /* managed string allocation support */
3942 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3943 MonoInst *iargs [2];
3944 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3945 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3948 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3949 iargs [1] = args [0];
3950 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3957 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3959 MonoInst *store, *temp;
3962 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3963 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3966 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3967 * would be different than the MonoInst's used to represent arguments, and
3968 * the ldelema implementation can't deal with that.
3969 * Solution: When ldelema is used on an inline argument, create a var for
3970 * it, emit ldelema on that var, and emit the saving code below in
3971 * inline_method () if needed.
3973 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3974 cfg->args [i] = temp;
3975 /* This uses cfg->args [i] which is set by the preceeding line */
3976 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3977 store->cil_code = sp [0]->cil_code;
3982 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3983 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3985 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3987 check_inline_called_method_name_limit (MonoMethod *called_method)
3990 static char *limit = NULL;
3992 if (limit == NULL) {
3993 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3995 if (limit_string != NULL)
3996 limit = limit_string;
3998 limit = (char *) "";
4001 if (limit [0] != '\0') {
4002 char *called_method_name = mono_method_full_name (called_method, TRUE);
4004 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4005 g_free (called_method_name);
4007 //return (strncmp_result <= 0);
4008 return (strncmp_result == 0);
4015 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4017 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4020 static char *limit = NULL;
4022 if (limit == NULL) {
4023 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4024 if (limit_string != NULL) {
4025 limit = limit_string;
4027 limit = (char *) "";
4031 if (limit [0] != '\0') {
4032 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4034 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4035 g_free (caller_method_name);
4037 //return (strncmp_result <= 0);
4038 return (strncmp_result == 0);
4046 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4047 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4049 MonoInst *ins, *rvar = NULL;
4050 MonoMethodHeader *cheader;
4051 MonoBasicBlock *ebblock, *sbblock;
4053 MonoMethod *prev_inlined_method;
4054 MonoInst **prev_locals, **prev_args;
4055 MonoType **prev_arg_types;
4056 guint prev_real_offset;
4057 GHashTable *prev_cbb_hash;
4058 MonoBasicBlock **prev_cil_offset_to_bb;
4059 MonoBasicBlock *prev_cbb;
4060 unsigned char* prev_cil_start;
4061 guint32 prev_cil_offset_to_bb_len;
4062 MonoMethod *prev_current_method;
4063 MonoGenericContext *prev_generic_context;
4065 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4067 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4068 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4071 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4072 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4076 if (cfg->verbose_level > 2)
4077 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4079 if (!cmethod->inline_info) {
4080 mono_jit_stats.inlineable_methods++;
4081 cmethod->inline_info = 1;
4083 /* allocate space to store the return value */
4084 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4085 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4088 /* allocate local variables */
4089 cheader = mono_method_get_header (cmethod);
4090 prev_locals = cfg->locals;
4091 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4092 for (i = 0; i < cheader->num_locals; ++i)
4093 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4095 /* allocate start and end blocks */
4096 /* This is needed so if the inline is aborted, we can clean up */
4097 NEW_BBLOCK (cfg, sbblock);
4098 sbblock->real_offset = real_offset;
4100 NEW_BBLOCK (cfg, ebblock);
4101 ebblock->block_num = cfg->num_bblocks++;
4102 ebblock->real_offset = real_offset;
4104 prev_args = cfg->args;
4105 prev_arg_types = cfg->arg_types;
4106 prev_inlined_method = cfg->inlined_method;
4107 cfg->inlined_method = cmethod;
4108 cfg->ret_var_set = FALSE;
4109 prev_real_offset = cfg->real_offset;
4110 prev_cbb_hash = cfg->cbb_hash;
4111 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4112 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4113 prev_cil_start = cfg->cil_start;
4114 prev_cbb = cfg->cbb;
4115 prev_current_method = cfg->current_method;
4116 prev_generic_context = cfg->generic_context;
4118 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4120 cfg->inlined_method = prev_inlined_method;
4121 cfg->real_offset = prev_real_offset;
4122 cfg->cbb_hash = prev_cbb_hash;
4123 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4124 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4125 cfg->cil_start = prev_cil_start;
4126 cfg->locals = prev_locals;
4127 cfg->args = prev_args;
4128 cfg->arg_types = prev_arg_types;
4129 cfg->current_method = prev_current_method;
4130 cfg->generic_context = prev_generic_context;
4132 if ((costs >= 0 && costs < 60) || inline_allways) {
4133 if (cfg->verbose_level > 2)
4134 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4136 mono_jit_stats.inlined_methods++;
4138 /* always add some code to avoid block split failures */
4139 MONO_INST_NEW (cfg, ins, OP_NOP);
4140 MONO_ADD_INS (prev_cbb, ins);
4142 prev_cbb->next_bb = sbblock;
4143 link_bblock (cfg, prev_cbb, sbblock);
4146 * Get rid of the begin and end bblocks if possible to aid local
4149 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4151 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4152 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4154 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4155 MonoBasicBlock *prev = ebblock->in_bb [0];
4156 mono_merge_basic_blocks (cfg, prev, ebblock);
4158 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4159 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4160 cfg->cbb = prev_cbb;
4168 * If the inlined method contains only a throw, then the ret var is not
4169 * set, so set it to a dummy value.
4171 if (!cfg->ret_var_set) {
4172 static double r8_0 = 0.0;
4174 switch (rvar->type) {
4176 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4179 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4184 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4187 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4188 ins->type = STACK_R8;
4189 ins->inst_p0 = (void*)&r8_0;
4190 ins->dreg = rvar->dreg;
4191 MONO_ADD_INS (cfg->cbb, ins);
4194 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4197 g_assert_not_reached ();
4201 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4206 if (cfg->verbose_level > 2)
4207 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4208 cfg->exception_type = MONO_EXCEPTION_NONE;
4209 mono_loader_clear_error ();
4211 /* This gets rid of the newly added bblocks */
4212 cfg->cbb = prev_cbb;
4218 * Some of these comments may well be out-of-date.
4219 * Design decisions: we do a single pass over the IL code (and we do bblock
4220 * splitting/merging in the few cases when it's required: a back jump to an IL
4221 * address that was not already seen as bblock starting point).
4222 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4223 * Complex operations are decomposed in simpler ones right away. We need to let the
4224 * arch-specific code peek and poke inside this process somehow (except when the
4225 * optimizations can take advantage of the full semantic info of coarse opcodes).
4226 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4227 * MonoInst->opcode initially is the IL opcode or some simplification of that
4228 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4229 * opcode with value bigger than OP_LAST.
4230 * At this point the IR can be handed over to an interpreter, a dumb code generator
4231 * or to the optimizing code generator that will translate it to SSA form.
4233 * Profiling directed optimizations.
4234 * We may compile by default with few or no optimizations and instrument the code
4235 * or the user may indicate what methods to optimize the most either in a config file
4236 * or through repeated runs where the compiler applies offline the optimizations to
4237 * each method and then decides if it was worth it.
4240 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4241 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4242 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4243 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4244 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4245 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4246 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4247 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4249 /* offset from br.s -> br like opcodes */
4250 #define BIG_BRANCH_OFFSET 13
4253 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4255 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4257 return b == NULL || b == bb;
4261 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4263 unsigned char *ip = start;
4264 unsigned char *target;
4267 MonoBasicBlock *bblock;
4268 const MonoOpcode *opcode;
4271 cli_addr = ip - start;
4272 i = mono_opcode_value ((const guint8 **)&ip, end);
4275 opcode = &mono_opcodes [i];
4276 switch (opcode->argument) {
4277 case MonoInlineNone:
4280 case MonoInlineString:
4281 case MonoInlineType:
4282 case MonoInlineField:
4283 case MonoInlineMethod:
4286 case MonoShortInlineR:
4293 case MonoShortInlineVar:
4294 case MonoShortInlineI:
4297 case MonoShortInlineBrTarget:
4298 target = start + cli_addr + 2 + (signed char)ip [1];
4299 GET_BBLOCK (cfg, bblock, target);
4302 GET_BBLOCK (cfg, bblock, ip);
4304 case MonoInlineBrTarget:
4305 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4306 GET_BBLOCK (cfg, bblock, target);
4309 GET_BBLOCK (cfg, bblock, ip);
4311 case MonoInlineSwitch: {
4312 guint32 n = read32 (ip + 1);
4315 cli_addr += 5 + 4 * n;
4316 target = start + cli_addr;
4317 GET_BBLOCK (cfg, bblock, target);
4319 for (j = 0; j < n; ++j) {
4320 target = start + cli_addr + (gint32)read32 (ip);
4321 GET_BBLOCK (cfg, bblock, target);
4331 g_assert_not_reached ();
4334 if (i == CEE_THROW) {
4335 unsigned char *bb_start = ip - 1;
4337 /* Find the start of the bblock containing the throw */
4339 while ((bb_start >= start) && !bblock) {
4340 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4344 bblock->out_of_line = 1;
4353 static inline MonoMethod *
4354 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4358 if (m->wrapper_type != MONO_WRAPPER_NONE)
4359 return mono_method_get_wrapper_data (m, token);
4361 method = mono_get_method_full (m->klass->image, token, klass, context);
4366 static inline MonoMethod *
4367 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4369 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4371 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4377 static inline MonoClass*
4378 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4382 if (method->wrapper_type != MONO_WRAPPER_NONE)
4383 klass = mono_method_get_wrapper_data (method, token);
4385 klass = mono_class_get_full (method->klass->image, token, context);
4387 mono_class_init (klass);
4392 * Returns TRUE if the JIT should abort inlining because "callee"
4393 * is influenced by security attributes.
4396 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4400 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4404 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4405 if (result == MONO_JIT_SECURITY_OK)
4408 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4409 /* Generate code to throw a SecurityException before the actual call/link */
4410 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4413 NEW_ICONST (cfg, args [0], 4);
4414 NEW_METHODCONST (cfg, args [1], caller);
4415 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4416 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4417 /* don't hide previous results */
4418 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4419 cfg->exception_data = result;
4427 method_access_exception (void)
4429 static MonoMethod *method = NULL;
4432 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4433 method = mono_class_get_method_from_name (secman->securitymanager,
4434 "MethodAccessException", 2);
4441 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4442 MonoBasicBlock *bblock, unsigned char *ip)
4444 MonoMethod *thrower = method_access_exception ();
4447 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4448 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4449 mono_emit_method_call (cfg, thrower, args, NULL);
4453 verification_exception (void)
4455 static MonoMethod *method = NULL;
4458 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4459 method = mono_class_get_method_from_name (secman->securitymanager,
4460 "VerificationException", 0);
4467 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4469 MonoMethod *thrower = verification_exception ();
4471 mono_emit_method_call (cfg, thrower, NULL, NULL);
4475 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4476 MonoBasicBlock *bblock, unsigned char *ip)
4478 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4479 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4480 gboolean is_safe = TRUE;
4482 if (!(caller_level >= callee_level ||
4483 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4484 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4489 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4493 method_is_safe (MonoMethod *method)
4496 if (strcmp (method->name, "unsafeMethod") == 0)
4503 * Check that the IL instructions at ip are the array initialization
4504 * sequence and return the pointer to the data and the size.
4507 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4510 * newarr[System.Int32]
4512 * ldtoken field valuetype ...
4513 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4515 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4516 guint32 token = read32 (ip + 7);
4517 guint32 field_token = read32 (ip + 2);
4518 guint32 field_index = field_token & 0xffffff;
4520 const char *data_ptr;
4522 MonoMethod *cmethod;
4523 MonoClass *dummy_class;
4524 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4530 *out_field_token = field_token;
4532 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4535 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4537 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4538 case MONO_TYPE_BOOLEAN:
4542 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4543 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4544 case MONO_TYPE_CHAR:
4554 return NULL; /* stupid ARM FP swapped format */
4564 if (size > mono_type_size (field->type, &dummy_align))
4567 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4568 if (!method->klass->image->dynamic) {
4569 field_index = read32 (ip + 2) & 0xffffff;
4570 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4571 data_ptr = mono_image_rva_map (method->klass->image, rva);
4572 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4573 /* for aot code we do the lookup on load */
4574 if (aot && data_ptr)
4575 return GUINT_TO_POINTER (rva);
4577 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4579 data_ptr = mono_field_get_data (field);
4587 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4589 char *method_fname = mono_method_full_name (method, TRUE);
4592 if (mono_method_get_header (method)->code_size == 0)
4593 method_code = g_strdup ("method body is empty.");
4595 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4596 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4597 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4598 g_free (method_fname);
4599 g_free (method_code);
4603 set_exception_object (MonoCompile *cfg, MonoException *exception)
4605 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4606 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4607 cfg->exception_ptr = exception;
4611 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4615 if (cfg->generic_sharing_context)
4616 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4618 type = &klass->byval_arg;
4619 return MONO_TYPE_IS_REFERENCE (type);
4623 * mono_decompose_array_access_opts:
4625 * Decompose array access opcodes.
4626 * This should be in decompose.c, but it emits calls so it has to stay here until
4627 * the old JIT is gone.
4630 mono_decompose_array_access_opts (MonoCompile *cfg)
4632 MonoBasicBlock *bb, *first_bb;
4635 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4636 * can be executed anytime. It should be run before decompose_long
4640 * Create a dummy bblock and emit code into it so we can use the normal
4641 * code generation macros.
4643 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4644 first_bb = cfg->cbb;
4646 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4648 MonoInst *prev = NULL;
4650 MonoInst *iargs [3];
4653 if (!bb->has_array_access)
4656 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4658 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4664 for (ins = bb->code; ins; ins = ins->next) {
4665 switch (ins->opcode) {
4667 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4668 G_STRUCT_OFFSET (MonoArray, max_length));
4669 MONO_ADD_INS (cfg->cbb, dest);
4671 case OP_BOUNDS_CHECK:
4672 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4675 if (cfg->opt & MONO_OPT_SHARED) {
4676 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4677 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4678 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4679 iargs [2]->dreg = ins->sreg1;
4681 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4682 dest->dreg = ins->dreg;
4684 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4687 NEW_VTABLECONST (cfg, iargs [0], vtable);
4688 MONO_ADD_INS (cfg->cbb, iargs [0]);
4689 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4690 iargs [1]->dreg = ins->sreg1;
4692 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4693 dest->dreg = ins->dreg;
4697 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4698 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4699 MONO_ADD_INS (cfg->cbb, dest);
4705 g_assert (cfg->cbb == first_bb);
4707 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4708 /* Replace the original instruction with the new code sequence */
4710 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4711 first_bb->code = first_bb->last_ins = NULL;
4712 first_bb->in_count = first_bb->out_count = 0;
4713 cfg->cbb = first_bb;
4720 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4730 #ifdef MONO_ARCH_SOFT_FLOAT
4733 * mono_decompose_soft_float:
4735 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4736 * similar to long support on 32 bit platforms. 32 bit float values require special
4737 * handling when used as locals, arguments, and in calls.
4738 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4741 mono_decompose_soft_float (MonoCompile *cfg)
4743 MonoBasicBlock *bb, *first_bb;
4746 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4750 * Create a dummy bblock and emit code into it so we can use the normal
4751 * code generation macros.
4753 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4754 first_bb = cfg->cbb;
4756 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4758 MonoInst *prev = NULL;
4761 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4763 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4769 for (ins = bb->code; ins; ins = ins->next) {
4770 const char *spec = INS_INFO (ins->opcode);
4772 /* Most fp operations are handled automatically by opcode emulation */
4774 switch (ins->opcode) {
4777 d.vald = *(double*)ins->inst_p0;
4778 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4783 /* We load the r8 value */
4784 d.vald = *(float*)ins->inst_p0;
4785 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4789 ins->opcode = OP_LMOVE;
4792 ins->opcode = OP_MOVE;
4793 ins->sreg1 = ins->sreg1 + 1;
4796 ins->opcode = OP_MOVE;
4797 ins->sreg1 = ins->sreg1 + 2;
4800 int reg = ins->sreg1;
4802 ins->opcode = OP_SETLRET;
4804 ins->sreg1 = reg + 1;
4805 ins->sreg2 = reg + 2;
4808 case OP_LOADR8_MEMBASE:
4809 ins->opcode = OP_LOADI8_MEMBASE;
4811 case OP_STORER8_MEMBASE_REG:
4812 ins->opcode = OP_STOREI8_MEMBASE_REG;
4814 case OP_STORER4_MEMBASE_REG: {
4815 MonoInst *iargs [2];
4818 /* Arg 1 is the double value */
4819 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4820 iargs [0]->dreg = ins->sreg1;
4822 /* Arg 2 is the address to store to */
4823 addr_reg = mono_alloc_preg (cfg);
4824 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4825 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4829 case OP_LOADR4_MEMBASE: {
4830 MonoInst *iargs [1];
4834 addr_reg = mono_alloc_preg (cfg);
4835 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4836 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4837 conv->dreg = ins->dreg;
4842 case OP_FCALL_MEMBASE: {
4843 MonoCallInst *call = (MonoCallInst*)ins;
4844 if (call->signature->ret->type == MONO_TYPE_R4) {
4845 MonoCallInst *call2;
4846 MonoInst *iargs [1];
4849 /* Convert the call into a call returning an int */
4850 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4851 memcpy (call2, call, sizeof (MonoCallInst));
4852 switch (ins->opcode) {
4854 call2->inst.opcode = OP_CALL;
4857 call2->inst.opcode = OP_CALL_REG;
4859 case OP_FCALL_MEMBASE:
4860 call2->inst.opcode = OP_CALL_MEMBASE;
4863 g_assert_not_reached ();
4865 call2->inst.dreg = mono_alloc_ireg (cfg);
4866 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4868 /* FIXME: Optimize this */
4870 /* Emit an r4->r8 conversion */
4871 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4872 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4873 conv->dreg = ins->dreg;
4875 switch (ins->opcode) {
4877 ins->opcode = OP_LCALL;
4880 ins->opcode = OP_LCALL_REG;
4882 case OP_FCALL_MEMBASE:
4883 ins->opcode = OP_LCALL_MEMBASE;
4886 g_assert_not_reached ();
4892 MonoJitICallInfo *info;
4893 MonoInst *iargs [2];
4894 MonoInst *call, *cmp, *br;
4896 /* Convert fcompare+fbcc to icall+icompare+beq */
4898 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4901 /* Create dummy MonoInst's for the arguments */
4902 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4903 iargs [0]->dreg = ins->sreg1;
4904 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4905 iargs [1]->dreg = ins->sreg2;
4907 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4909 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4910 cmp->sreg1 = call->dreg;
4912 MONO_ADD_INS (cfg->cbb, cmp);
4914 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4915 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4916 br->inst_true_bb = ins->next->inst_true_bb;
4917 br->inst_false_bb = ins->next->inst_false_bb;
4918 MONO_ADD_INS (cfg->cbb, br);
4920 /* The call sequence might include fp ins */
4923 /* Skip fbcc or fccc */
4924 NULLIFY_INS (ins->next);
4932 MonoJitICallInfo *info;
4933 MonoInst *iargs [2];
4936 /* Convert fccc to icall+icompare+iceq */
4938 info = mono_find_jit_opcode_emulation (ins->opcode);
4941 /* Create dummy MonoInst's for the arguments */
4942 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4943 iargs [0]->dreg = ins->sreg1;
4944 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4945 iargs [1]->dreg = ins->sreg2;
4947 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4950 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4952 /* The call sequence might include fp ins */
4957 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4958 mono_print_ins (ins);
4959 g_assert_not_reached ();
4964 g_assert (cfg->cbb == first_bb);
4966 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4967 /* Replace the original instruction with the new code sequence */
4969 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4970 first_bb->code = first_bb->last_ins = NULL;
4971 first_bb->in_count = first_bb->out_count = 0;
4972 cfg->cbb = first_bb;
4979 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4982 mono_decompose_long_opts (cfg);
4988 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4991 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4992 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4993 /* Optimize reg-reg moves away */
4995 * Can't optimize other opcodes, since sp[0] might point to
4996 * the last ins of a decomposed opcode.
4998 sp [0]->dreg = (cfg)->locals [n]->dreg;
5000 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5005 * ldloca inhibits many optimizations so try to get rid of it in common
5008 static inline unsigned char *
5009 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5018 local = read16 (ip + 2);
5022 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5023 gboolean skip = FALSE;
5025 /* From the INITOBJ case */
5026 token = read32 (ip + 2);
5027 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5028 CHECK_TYPELOAD (klass);
5029 if (generic_class_is_reference_type (cfg, klass)) {
5030 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5031 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5032 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5033 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5034 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5047 * mono_method_to_ir: translates IL into basic blocks containing trees
5050 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5051 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5052 guint inline_offset, gboolean is_virtual_call)
5054 MonoInst *ins, **sp, **stack_start;
5055 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5056 MonoMethod *cmethod, *method_definition;
5057 MonoInst **arg_array;
5058 MonoMethodHeader *header;
5060 guint32 token, ins_flag;
5062 MonoClass *constrained_call = NULL;
5063 unsigned char *ip, *end, *target, *err_pos;
5064 static double r8_0 = 0.0;
5065 MonoMethodSignature *sig;
5066 MonoGenericContext *generic_context = NULL;
5067 MonoGenericContainer *generic_container = NULL;
5068 MonoType **param_types;
5069 int i, n, start_new_bblock, dreg;
5070 int num_calls = 0, inline_costs = 0;
5071 int breakpoint_id = 0;
5073 MonoBoolean security, pinvoke;
5074 MonoSecurityManager* secman = NULL;
5075 MonoDeclSecurityActions actions;
5076 GSList *class_inits = NULL;
5077 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5080 /* serialization and xdomain stuff may need access to private fields and methods */
5081 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5082 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5083 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5084 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5085 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5086 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5088 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5090 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5091 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5092 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5093 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5095 image = method->klass->image;
5096 header = mono_method_get_header (method);
5097 generic_container = mono_method_get_generic_container (method);
5098 sig = mono_method_signature (method);
5099 num_args = sig->hasthis + sig->param_count;
5100 ip = (unsigned char*)header->code;
5101 cfg->cil_start = ip;
5102 end = ip + header->code_size;
5103 mono_jit_stats.cil_code_size += header->code_size;
5105 method_definition = method;
5106 while (method_definition->is_inflated) {
5107 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5108 method_definition = imethod->declaring;
5111 /* SkipVerification is not allowed if core-clr is enabled */
5112 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5114 dont_verify_stloc = TRUE;
5117 if (!dont_verify && mini_method_verify (cfg, method_definition))
5118 goto exception_exit;
5120 if (mono_debug_using_mono_debugger ())
5121 cfg->keep_cil_nops = TRUE;
5123 if (sig->is_inflated)
5124 generic_context = mono_method_get_context (method);
5125 else if (generic_container)
5126 generic_context = &generic_container->context;
5127 cfg->generic_context = generic_context;
5129 if (!cfg->generic_sharing_context)
5130 g_assert (!sig->has_type_parameters);
5132 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5133 g_assert (method->is_inflated);
5134 g_assert (mono_method_get_context (method)->method_inst);
5136 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5137 g_assert (sig->generic_param_count);
5139 if (cfg->method == method) {
5140 cfg->real_offset = 0;
5142 cfg->real_offset = inline_offset;
5145 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5146 cfg->cil_offset_to_bb_len = header->code_size;
5148 cfg->current_method = method;
5150 if (cfg->verbose_level > 2)
5151 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5153 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5155 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5156 for (n = 0; n < sig->param_count; ++n)
5157 param_types [n + sig->hasthis] = sig->params [n];
5158 cfg->arg_types = param_types;
5160 dont_inline = g_list_prepend (dont_inline, method);
5161 if (cfg->method == method) {
5163 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5164 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5167 NEW_BBLOCK (cfg, start_bblock);
5168 cfg->bb_entry = start_bblock;
5169 start_bblock->cil_code = NULL;
5170 start_bblock->cil_length = 0;
5173 NEW_BBLOCK (cfg, end_bblock);
5174 cfg->bb_exit = end_bblock;
5175 end_bblock->cil_code = NULL;
5176 end_bblock->cil_length = 0;
5177 g_assert (cfg->num_bblocks == 2);
5179 arg_array = cfg->args;
5181 if (header->num_clauses) {
5182 cfg->spvars = g_hash_table_new (NULL, NULL);
5183 cfg->exvars = g_hash_table_new (NULL, NULL);
5185 /* handle exception clauses */
5186 for (i = 0; i < header->num_clauses; ++i) {
5187 MonoBasicBlock *try_bb;
5188 MonoExceptionClause *clause = &header->clauses [i];
5189 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5190 try_bb->real_offset = clause->try_offset;
5191 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5192 tblock->real_offset = clause->handler_offset;
5193 tblock->flags |= BB_EXCEPTION_HANDLER;
5195 link_bblock (cfg, try_bb, tblock);
5197 if (*(ip + clause->handler_offset) == CEE_POP)
5198 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5200 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5201 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5202 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5203 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5204 MONO_ADD_INS (tblock, ins);
5206 /* todo: is a fault block unsafe to optimize? */
5207 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5208 tblock->flags |= BB_EXCEPTION_UNSAFE;
5212 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5214 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5216 /* catch and filter blocks get the exception object on the stack */
5217 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5218 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5219 MonoInst *dummy_use;
5221 /* mostly like handle_stack_args (), but just sets the input args */
5222 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5223 tblock->in_scount = 1;
5224 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5225 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5228 * Add a dummy use for the exvar so its liveness info will be
5232 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5234 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5235 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5236 tblock->real_offset = clause->data.filter_offset;
5237 tblock->in_scount = 1;
5238 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5239 /* The filter block shares the exvar with the handler block */
5240 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5241 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5242 MONO_ADD_INS (tblock, ins);
5246 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5247 clause->data.catch_class &&
5248 cfg->generic_sharing_context &&
5249 mono_class_check_context_used (clause->data.catch_class)) {
5250 if (mono_method_get_context (method)->method_inst)
5251 GENERIC_SHARING_FAILURE (CEE_NOP);
5254 * In shared generic code with catch
5255 * clauses containing type variables
5256 * the exception handling code has to
5257 * be able to get to the rgctx.
5258 * Therefore we have to make sure that
5259 * the vtable/mrgctx argument (for
5260 * static or generic methods) or the
5261 * "this" argument (for non-static
5262 * methods) are live.
5264 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5265 mini_method_get_context (method)->method_inst ||
5266 method->klass->valuetype) {
5267 mono_get_vtable_var (cfg);
5269 MonoInst *dummy_use;
5271 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5276 arg_array = alloca (sizeof (MonoInst *) * num_args);
5277 cfg->cbb = start_bblock;
5278 cfg->args = arg_array;
5279 mono_save_args (cfg, sig, inline_args);
5282 /* FIRST CODE BLOCK */
5283 NEW_BBLOCK (cfg, bblock);
5284 bblock->cil_code = ip;
5288 ADD_BBLOCK (cfg, bblock);
5290 if (cfg->method == method) {
5291 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5292 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5293 MONO_INST_NEW (cfg, ins, OP_BREAK);
5294 MONO_ADD_INS (bblock, ins);
5298 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5299 secman = mono_security_manager_get_methods ();
5301 security = (secman && mono_method_has_declsec (method));
5302 /* at this point having security doesn't mean we have any code to generate */
5303 if (security && (cfg->method == method)) {
5304 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5305 * And we do not want to enter the next section (with allocation) if we
5306 * have nothing to generate */
5307 security = mono_declsec_get_demands (method, &actions);
5310 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5311 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5313 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5314 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5315 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5317 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5318 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5322 mono_custom_attrs_free (custom);
5325 custom = mono_custom_attrs_from_class (wrapped->klass);
5326 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5330 mono_custom_attrs_free (custom);
5333 /* not a P/Invoke after all */
5338 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5339 /* we use a separate basic block for the initialization code */
5340 NEW_BBLOCK (cfg, init_localsbb);
5341 cfg->bb_init = init_localsbb;
5342 init_localsbb->real_offset = cfg->real_offset;
5343 start_bblock->next_bb = init_localsbb;
5344 init_localsbb->next_bb = bblock;
5345 link_bblock (cfg, start_bblock, init_localsbb);
5346 link_bblock (cfg, init_localsbb, bblock);
5348 cfg->cbb = init_localsbb;
5350 start_bblock->next_bb = bblock;
5351 link_bblock (cfg, start_bblock, bblock);
5354 /* at this point we know, if security is TRUE, that some code needs to be generated */
5355 if (security && (cfg->method == method)) {
5358 mono_jit_stats.cas_demand_generation++;
5360 if (actions.demand.blob) {
5361 /* Add code for SecurityAction.Demand */
5362 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5363 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5364 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5365 mono_emit_method_call (cfg, secman->demand, args, NULL);
5367 if (actions.noncasdemand.blob) {
5368 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5369 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5370 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5371 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5372 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5373 mono_emit_method_call (cfg, secman->demand, args, NULL);
5375 if (actions.demandchoice.blob) {
5376 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5377 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5378 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5379 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5380 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5384 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5386 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5389 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5390 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5391 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5392 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5393 if (!(method->klass && method->klass->image &&
5394 mono_security_core_clr_is_platform_image (method->klass->image))) {
5395 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5399 if (!method_is_safe (method))
5400 emit_throw_verification_exception (cfg, bblock, ip);
5403 if (header->code_size == 0)
5406 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5411 if (cfg->method == method)
5412 mono_debug_init_method (cfg, bblock, breakpoint_id);
5414 for (n = 0; n < header->num_locals; ++n) {
5415 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5420 /* We force the vtable variable here for all shared methods
5421 for the possibility that they might show up in a stack
5422 trace where their exact instantiation is needed. */
5423 if (cfg->generic_sharing_context)
5424 mono_get_vtable_var (cfg);
5426 /* add a check for this != NULL to inlined methods */
5427 if (is_virtual_call) {
5430 NEW_ARGLOAD (cfg, arg_ins, 0);
5431 MONO_ADD_INS (cfg->cbb, arg_ins);
5432 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5433 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5434 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5437 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5438 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5441 start_new_bblock = 0;
5445 if (cfg->method == method)
5446 cfg->real_offset = ip - header->code;
5448 cfg->real_offset = inline_offset;
5453 if (start_new_bblock) {
5454 bblock->cil_length = ip - bblock->cil_code;
5455 if (start_new_bblock == 2) {
5456 g_assert (ip == tblock->cil_code);
5458 GET_BBLOCK (cfg, tblock, ip);
5460 bblock->next_bb = tblock;
5463 start_new_bblock = 0;
5464 for (i = 0; i < bblock->in_scount; ++i) {
5465 if (cfg->verbose_level > 3)
5466 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5467 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5471 g_slist_free (class_inits);
5474 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5475 link_bblock (cfg, bblock, tblock);
5476 if (sp != stack_start) {
5477 handle_stack_args (cfg, stack_start, sp - stack_start);
5479 CHECK_UNVERIFIABLE (cfg);
5481 bblock->next_bb = tblock;
5484 for (i = 0; i < bblock->in_scount; ++i) {
5485 if (cfg->verbose_level > 3)
5486 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5487 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5490 g_slist_free (class_inits);
5495 bblock->real_offset = cfg->real_offset;
5497 if ((cfg->method == method) && cfg->coverage_info) {
5498 guint32 cil_offset = ip - header->code;
5499 cfg->coverage_info->data [cil_offset].cil_code = ip;
5501 /* TODO: Use an increment here */
5502 #if defined(__i386__)
5503 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5504 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5506 MONO_ADD_INS (cfg->cbb, ins);
5508 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5509 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5513 if (cfg->verbose_level > 3)
5514 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5518 if (cfg->keep_cil_nops)
5519 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5521 MONO_INST_NEW (cfg, ins, OP_NOP);
5523 MONO_ADD_INS (bblock, ins);
5526 MONO_INST_NEW (cfg, ins, OP_BREAK);
5528 MONO_ADD_INS (bblock, ins);
5534 CHECK_STACK_OVF (1);
5535 n = (*ip)-CEE_LDARG_0;
5537 EMIT_NEW_ARGLOAD (cfg, ins, n);
5545 CHECK_STACK_OVF (1);
5546 n = (*ip)-CEE_LDLOC_0;
5548 EMIT_NEW_LOCLOAD (cfg, ins, n);
5557 n = (*ip)-CEE_STLOC_0;
5560 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5562 emit_stloc_ir (cfg, sp, header, n);
5569 CHECK_STACK_OVF (1);
5572 EMIT_NEW_ARGLOAD (cfg, ins, n);
5578 CHECK_STACK_OVF (1);
5581 NEW_ARGLOADA (cfg, ins, n);
5582 MONO_ADD_INS (cfg->cbb, ins);
5592 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5594 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5599 CHECK_STACK_OVF (1);
5602 EMIT_NEW_LOCLOAD (cfg, ins, n);
5606 case CEE_LDLOCA_S: {
5607 unsigned char *tmp_ip;
5609 CHECK_STACK_OVF (1);
5610 CHECK_LOCAL (ip [1]);
5612 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5618 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5627 CHECK_LOCAL (ip [1]);
5628 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5630 emit_stloc_ir (cfg, sp, header, ip [1]);
5635 CHECK_STACK_OVF (1);
5636 EMIT_NEW_PCONST (cfg, ins, NULL);
5637 ins->type = STACK_OBJ;
5642 CHECK_STACK_OVF (1);
5643 EMIT_NEW_ICONST (cfg, ins, -1);
5656 CHECK_STACK_OVF (1);
5657 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5663 CHECK_STACK_OVF (1);
5665 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5671 CHECK_STACK_OVF (1);
5672 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5678 CHECK_STACK_OVF (1);
5679 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5680 ins->type = STACK_I8;
5681 ins->dreg = alloc_dreg (cfg, STACK_I8);
5683 ins->inst_l = (gint64)read64 (ip);
5684 MONO_ADD_INS (bblock, ins);
5690 /* FIXME: we should really allocate this only late in the compilation process */
5691 mono_domain_lock (cfg->domain);
5692 f = mono_domain_alloc (cfg->domain, sizeof (float));
5693 mono_domain_unlock (cfg->domain);
5695 CHECK_STACK_OVF (1);
5696 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5697 ins->type = STACK_R8;
5698 ins->dreg = alloc_dreg (cfg, STACK_R8);
5702 MONO_ADD_INS (bblock, ins);
5710 /* FIXME: we should really allocate this only late in the compilation process */
5711 mono_domain_lock (cfg->domain);
5712 d = mono_domain_alloc (cfg->domain, sizeof (double));
5713 mono_domain_unlock (cfg->domain);
5715 CHECK_STACK_OVF (1);
5716 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5717 ins->type = STACK_R8;
5718 ins->dreg = alloc_dreg (cfg, STACK_R8);
5722 MONO_ADD_INS (bblock, ins);
5729 MonoInst *temp, *store;
5731 CHECK_STACK_OVF (1);
5735 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5736 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5738 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5741 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5754 if (sp [0]->type == STACK_R8)
5755 /* we need to pop the value from the x86 FP stack */
5756 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5763 if (stack_start != sp)
5765 token = read32 (ip + 1);
5766 /* FIXME: check the signature matches */
5767 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5772 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5773 GENERIC_SHARING_FAILURE (CEE_JMP);
5775 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5776 if (check_linkdemand (cfg, method, cmethod))
5778 CHECK_CFG_EXCEPTION;
5783 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5786 /* FIXME: Remove OP_JMP from mini-amd64.c when the old JIT is removed */
5788 /* Handle tail calls similarly to calls */
5789 n = fsig->param_count + fsig->hasthis;
5791 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5792 call->method = cmethod;
5793 call->tail_call = TRUE;
5794 call->signature = mono_method_signature (cmethod);
5795 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5796 call->inst.inst_p0 = cmethod;
5797 for (i = 0; i < n; ++i)
5798 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5800 mono_arch_emit_call (cfg, call);
5801 MONO_ADD_INS (bblock, (MonoInst*)call);
5804 for (i = 0; i < num_args; ++i)
5805 /* Prevent arguments from being optimized away */
5806 arg_array [i]->flags |= MONO_INST_VOLATILE;
5808 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5809 ins = (MonoInst*)call;
5810 ins->inst_p0 = cmethod;
5811 MONO_ADD_INS (bblock, ins);
5815 start_new_bblock = 1;
5820 case CEE_CALLVIRT: {
5821 MonoInst *addr = NULL;
5822 MonoMethodSignature *fsig = NULL;
5824 int virtual = *ip == CEE_CALLVIRT;
5825 int calli = *ip == CEE_CALLI;
5826 gboolean pass_imt_from_rgctx = FALSE;
5827 MonoInst *imt_arg = NULL;
5828 gboolean pass_vtable = FALSE;
5829 gboolean pass_mrgctx = FALSE;
5830 MonoInst *vtable_arg = NULL;
5831 gboolean check_this = FALSE;
5834 token = read32 (ip + 1);
5841 if (method->wrapper_type != MONO_WRAPPER_NONE)
5842 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5844 fsig = mono_metadata_parse_signature (image, token);
5846 n = fsig->param_count + fsig->hasthis;
5848 MonoMethod *cil_method;
5850 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5851 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5852 cil_method = cmethod;
5853 } else if (constrained_call) {
5854 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5856 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5857 cil_method = cmethod;
5862 if (!dont_verify && !cfg->skip_visibility) {
5863 MonoMethod *target_method = cil_method;
5864 if (method->is_inflated) {
5865 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5867 if (!mono_method_can_access_method (method_definition, target_method) &&
5868 !mono_method_can_access_method (method, cil_method))
5869 METHOD_ACCESS_FAILURE;
5872 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5873 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5875 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5876 /* MS.NET seems to silently convert this to a callvirt */
5879 if (!cmethod->klass->inited)
5880 if (!mono_class_init (cmethod->klass))
5883 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5884 mini_class_is_system_array (cmethod->klass)) {
5885 array_rank = cmethod->klass->rank;
5886 fsig = mono_method_signature (cmethod);
5888 if (mono_method_signature (cmethod)->pinvoke) {
5889 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5890 check_for_pending_exc, FALSE);
5891 fsig = mono_method_signature (wrapper);
5892 } else if (constrained_call) {
5893 fsig = mono_method_signature (cmethod);
5895 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5899 mono_save_token_info (cfg, image, token, cil_method);
5901 n = fsig->param_count + fsig->hasthis;
5903 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5904 if (check_linkdemand (cfg, method, cmethod))
5906 CHECK_CFG_EXCEPTION;
5909 if (cmethod->string_ctor)
5910 g_assert_not_reached ();
5913 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5916 if (!cfg->generic_sharing_context && cmethod)
5917 g_assert (!mono_method_check_context_used (cmethod));
5921 //g_assert (!virtual || fsig->hasthis);
5925 if (constrained_call) {
5927 * We have the `constrained.' prefix opcode.
5929 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5933 * The type parameter is instantiated as a valuetype,
5934 * but that type doesn't override the method we're
5935 * calling, so we need to box `this'.
5937 dreg = alloc_dreg (cfg, STACK_VTYPE);
5938 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5939 ins->klass = constrained_call;
5940 sp [0] = handle_box (cfg, ins, constrained_call);
5941 } else if (!constrained_call->valuetype) {
5942 int dreg = alloc_preg (cfg);
5945 * The type parameter is instantiated as a reference
5946 * type. We have a managed pointer on the stack, so
5947 * we need to dereference it here.
5949 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5950 ins->type = STACK_OBJ;
5952 } else if (cmethod->klass->valuetype)
5954 constrained_call = NULL;
5957 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5961 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
5962 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5963 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5964 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5965 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5968 * Pass vtable iff target method might
5969 * be shared, which means that sharing
5970 * is enabled for its class and its
5971 * context is sharable (and it's not a
5974 if (sharing_enabled && context_sharable &&
5975 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
5979 if (cmethod && mini_method_get_context (cmethod) &&
5980 mini_method_get_context (cmethod)->method_inst) {
5981 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5982 MonoGenericContext *context = mini_method_get_context (cmethod);
5983 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5985 g_assert (!pass_vtable);
5987 if (sharing_enabled && context_sharable)
5991 if (cfg->generic_sharing_context && cmethod) {
5992 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5994 context_used = mono_method_check_context_used (cmethod);
5996 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5997 /* Generic method interface
5998 calls are resolved via a
5999 helper function and don't
6001 if (!cmethod_context || !cmethod_context->method_inst)
6002 pass_imt_from_rgctx = TRUE;
6006 * If a shared method calls another
6007 * shared method then the caller must
6008 * have a generic sharing context
6009 * because the magic trampoline
6010 * requires it. FIXME: We shouldn't
6011 * have to force the vtable/mrgctx
6012 * variable here. Instead there
6013 * should be a flag in the cfg to
6014 * request a generic sharing context.
6017 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6018 mono_get_vtable_var (cfg);
6023 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6025 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6027 CHECK_TYPELOAD (cmethod->klass);
6028 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6033 g_assert (!vtable_arg);
6036 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6038 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6041 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6042 MONO_METHOD_IS_FINAL (cmethod)) {
6049 if (pass_imt_from_rgctx) {
6050 g_assert (!pass_vtable);
6053 imt_arg = emit_get_rgctx_method (cfg, context_used,
6054 cmethod, MONO_RGCTX_INFO_METHOD);
6060 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6061 check->sreg1 = sp [0]->dreg;
6062 MONO_ADD_INS (cfg->cbb, check);
6065 /* Calling virtual generic methods */
6066 if (cmethod && virtual &&
6067 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6068 !(MONO_METHOD_IS_FINAL (cmethod) &&
6069 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6070 mono_method_signature (cmethod)->generic_param_count) {
6071 MonoInst *this_temp, *this_arg_temp, *store;
6072 MonoInst *iargs [4];
6074 g_assert (mono_method_signature (cmethod)->is_inflated);
6076 /* Prevent inlining of methods that contain indirect calls */
6079 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6080 if (!(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) &&
6081 cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6082 g_assert (!imt_arg);
6084 imt_arg = emit_get_rgctx_method (cfg, context_used,
6085 cmethod, MONO_RGCTX_INFO_METHOD_CONTEXT);
6089 cfg->disable_aot = TRUE;
6090 g_assert (cmethod->is_inflated);
6091 EMIT_NEW_PCONST (cfg, imt_arg,
6092 ((MonoMethodInflated*)cmethod)->context.method_inst);
6094 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6098 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6099 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6100 MONO_ADD_INS (bblock, store);
6102 /* FIXME: This should be a managed pointer */
6103 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6105 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6107 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6108 cmethod, MONO_RGCTX_INFO_METHOD);
6109 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6110 addr = mono_emit_jit_icall (cfg,
6111 mono_helper_compile_generic_method, iargs);
6113 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6114 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6115 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6118 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6120 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6123 if (!MONO_TYPE_IS_VOID (fsig->ret))
6132 /* FIXME: runtime generic context pointer for jumps? */
6133 /* FIXME: handle this for generic sharing eventually */
6134 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6135 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6138 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6141 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6142 call->tail_call = TRUE;
6143 call->method = cmethod;
6144 call->signature = mono_method_signature (cmethod);
6147 /* Handle tail calls similarly to calls */
6148 call->inst.opcode = OP_TAILCALL;
6150 mono_arch_emit_call (cfg, call);
6153 * We implement tail calls by storing the actual arguments into the
6154 * argument variables, then emitting a CEE_JMP.
6156 for (i = 0; i < n; ++i) {
6157 /* Prevent argument from being register allocated */
6158 arg_array [i]->flags |= MONO_INST_VOLATILE;
6159 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6163 ins = (MonoInst*)call;
6164 ins->inst_p0 = cmethod;
6165 ins->inst_p1 = arg_array [0];
6166 MONO_ADD_INS (bblock, ins);
6167 link_bblock (cfg, bblock, end_bblock);
6168 start_new_bblock = 1;
6169 /* skip CEE_RET as well */
6175 /* Conversion to a JIT intrinsic */
6176 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6177 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6178 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6189 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6190 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6191 mono_method_check_inlining (cfg, cmethod) &&
6192 !g_list_find (dont_inline, cmethod)) {
6194 gboolean allways = FALSE;
6196 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6197 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6198 /* Prevent inlining of methods that call wrappers */
6200 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6204 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6206 cfg->real_offset += 5;
6209 if (!MONO_TYPE_IS_VOID (fsig->ret))
6210 /* *sp is already set by inline_method */
6213 inline_costs += costs;
6219 inline_costs += 10 * num_calls++;
6221 /* Tail recursion elimination */
6222 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6223 gboolean has_vtargs = FALSE;
6226 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6229 /* keep it simple */
6230 for (i = fsig->param_count - 1; i >= 0; i--) {
6231 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6236 for (i = 0; i < n; ++i)
6237 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6238 MONO_INST_NEW (cfg, ins, OP_BR);
6239 MONO_ADD_INS (bblock, ins);
6240 tblock = start_bblock->out_bb [0];
6241 link_bblock (cfg, bblock, tblock);
6242 ins->inst_target_bb = tblock;
6243 start_new_bblock = 1;
6245 /* skip the CEE_RET, too */
6246 if (ip_in_bb (cfg, bblock, ip + 5))
6256 /* Generic sharing */
6257 /* FIXME: only do this for generic methods if
6258 they are not shared! */
6259 if (context_used && !imt_arg && !array_rank &&
6260 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6261 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6262 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6263 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6266 g_assert (cfg->generic_sharing_context && cmethod);
6270 * We are compiling a call to a
6271 * generic method from shared code,
6272 * which means that we have to look up
6273 * the method in the rgctx and do an
6276 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6279 /* Indirect calls */
6281 g_assert (!imt_arg);
6283 if (*ip == CEE_CALL)
6284 g_assert (context_used);
6285 else if (*ip == CEE_CALLI)
6286 g_assert (!vtable_arg);
6288 /* FIXME: what the hell is this??? */
6289 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6290 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6292 /* Prevent inlining of methods with indirect calls */
6296 #ifdef MONO_ARCH_RGCTX_REG
6298 int rgctx_reg = mono_alloc_preg (cfg);
6300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6301 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6302 call = (MonoCallInst*)ins;
6303 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6304 cfg->uses_rgctx_reg = TRUE;
6309 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6311 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6312 if (fsig->pinvoke && !fsig->ret->byref) {
6316 * Native code might return non register sized integers
6317 * without initializing the upper bits.
6319 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6320 case OP_LOADI1_MEMBASE:
6321 widen_op = OP_ICONV_TO_I1;
6323 case OP_LOADU1_MEMBASE:
6324 widen_op = OP_ICONV_TO_U1;
6326 case OP_LOADI2_MEMBASE:
6327 widen_op = OP_ICONV_TO_I2;
6329 case OP_LOADU2_MEMBASE:
6330 widen_op = OP_ICONV_TO_U2;
6336 if (widen_op != -1) {
6337 int dreg = alloc_preg (cfg);
6340 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6341 widen->type = ins->type;
6358 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6359 if (sp [fsig->param_count]->type == STACK_OBJ) {
6360 MonoInst *iargs [2];
6363 iargs [1] = sp [fsig->param_count];
6365 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6368 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6369 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6370 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6371 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6373 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6376 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6377 if (!cmethod->klass->element_class->valuetype && !readonly)
6378 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6381 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6384 g_assert_not_reached ();
6392 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6394 if (!MONO_TYPE_IS_VOID (fsig->ret))
6405 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6407 } else if (imt_arg) {
6408 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6410 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6413 if (!MONO_TYPE_IS_VOID (fsig->ret))
6421 if (cfg->method != method) {
6422 /* return from inlined method */
6424 * If in_count == 0, that means the ret is unreachable due to
6425 * being preceeded by a throw. In that case, inline_method () will
6426 * handle setting the return value
6427 * (test case: test_0_inline_throw ()).
6429 if (return_var && cfg->cbb->in_count) {
6433 //g_assert (returnvar != -1);
6434 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6435 cfg->ret_var_set = TRUE;
6439 MonoType *ret_type = mono_method_signature (method)->ret;
6441 g_assert (!return_var);
6444 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6447 if (!cfg->vret_addr) {
6450 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6452 EMIT_NEW_RETLOADA (cfg, ret_addr);
6454 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6455 ins->klass = mono_class_from_mono_type (ret_type);
6458 #ifdef MONO_ARCH_SOFT_FLOAT
6459 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6460 MonoInst *iargs [1];
6464 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6465 mono_arch_emit_setret (cfg, method, conv);
6467 mono_arch_emit_setret (cfg, method, *sp);
6470 mono_arch_emit_setret (cfg, method, *sp);
6475 if (sp != stack_start)
6477 MONO_INST_NEW (cfg, ins, OP_BR);
6479 ins->inst_target_bb = end_bblock;
6480 MONO_ADD_INS (bblock, ins);
6481 link_bblock (cfg, bblock, end_bblock);
6482 start_new_bblock = 1;
6486 MONO_INST_NEW (cfg, ins, OP_BR);
6488 target = ip + 1 + (signed char)(*ip);
6490 GET_BBLOCK (cfg, tblock, target);
6491 link_bblock (cfg, bblock, tblock);
6492 ins->inst_target_bb = tblock;
6493 if (sp != stack_start) {
6494 handle_stack_args (cfg, stack_start, sp - stack_start);
6496 CHECK_UNVERIFIABLE (cfg);
6498 MONO_ADD_INS (bblock, ins);
6499 start_new_bblock = 1;
6500 inline_costs += BRANCH_COST;
6514 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6516 target = ip + 1 + *(signed char*)ip;
6522 inline_costs += BRANCH_COST;
6526 MONO_INST_NEW (cfg, ins, OP_BR);
6529 target = ip + 4 + (gint32)read32(ip);
6531 GET_BBLOCK (cfg, tblock, target);
6532 link_bblock (cfg, bblock, tblock);
6533 ins->inst_target_bb = tblock;
6534 if (sp != stack_start) {
6535 handle_stack_args (cfg, stack_start, sp - stack_start);
6537 CHECK_UNVERIFIABLE (cfg);
6540 MONO_ADD_INS (bblock, ins);
6542 start_new_bblock = 1;
6543 inline_costs += BRANCH_COST;
6550 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6551 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6552 guint32 opsize = is_short ? 1 : 4;
6554 CHECK_OPSIZE (opsize);
6556 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6559 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6564 GET_BBLOCK (cfg, tblock, target);
6565 link_bblock (cfg, bblock, tblock);
6566 GET_BBLOCK (cfg, tblock, ip);
6567 link_bblock (cfg, bblock, tblock);
6569 if (sp != stack_start) {
6570 handle_stack_args (cfg, stack_start, sp - stack_start);
6571 CHECK_UNVERIFIABLE (cfg);
6574 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6575 cmp->sreg1 = sp [0]->dreg;
6576 type_from_op (cmp, sp [0], NULL);
6579 #if SIZEOF_VOID_P == 4
6580 if (cmp->opcode == OP_LCOMPARE_IMM) {
6581 /* Convert it to OP_LCOMPARE */
6582 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6583 ins->type = STACK_I8;
6584 ins->dreg = alloc_dreg (cfg, STACK_I8);
6586 MONO_ADD_INS (bblock, ins);
6587 cmp->opcode = OP_LCOMPARE;
6588 cmp->sreg2 = ins->dreg;
6591 MONO_ADD_INS (bblock, cmp);
6593 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6594 type_from_op (ins, sp [0], NULL);
6595 MONO_ADD_INS (bblock, ins);
6596 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6597 GET_BBLOCK (cfg, tblock, target);
6598 ins->inst_true_bb = tblock;
6599 GET_BBLOCK (cfg, tblock, ip);
6600 ins->inst_false_bb = tblock;
6601 start_new_bblock = 2;
6604 inline_costs += BRANCH_COST;
6619 MONO_INST_NEW (cfg, ins, *ip);
6621 target = ip + 4 + (gint32)read32(ip);
6627 inline_costs += BRANCH_COST;
6631 MonoBasicBlock **targets;
6632 MonoBasicBlock *default_bblock;
6633 MonoJumpInfoBBTable *table;
6634 int offset_reg = alloc_preg (cfg);
6635 int target_reg = alloc_preg (cfg);
6636 int table_reg = alloc_preg (cfg);
6637 int sum_reg = alloc_preg (cfg);
6638 gboolean use_op_switch;
6642 n = read32 (ip + 1);
6645 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6649 CHECK_OPSIZE (n * sizeof (guint32));
6650 target = ip + n * sizeof (guint32);
6652 GET_BBLOCK (cfg, default_bblock, target);
6654 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6655 for (i = 0; i < n; ++i) {
6656 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6657 targets [i] = tblock;
6661 if (sp != stack_start) {
6663 * Link the current bb with the targets as well, so handle_stack_args
6664 * will set their in_stack correctly.
6666 link_bblock (cfg, bblock, default_bblock);
6667 for (i = 0; i < n; ++i)
6668 link_bblock (cfg, bblock, targets [i]);
6670 handle_stack_args (cfg, stack_start, sp - stack_start);
6672 CHECK_UNVERIFIABLE (cfg);
6675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6679 for (i = 0; i < n; ++i)
6680 link_bblock (cfg, bblock, targets [i]);
6682 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6683 table->table = targets;
6684 table->table_size = n;
6686 use_op_switch = FALSE;
6688 /* ARM implements SWITCH statements differently */
6689 /* FIXME: Make it use the generic implementation */
6690 if (!cfg->compile_aot)
6691 use_op_switch = TRUE;
6694 if (use_op_switch) {
6695 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6696 ins->sreg1 = src1->dreg;
6697 ins->inst_p0 = table;
6698 ins->inst_many_bb = targets;
6699 ins->klass = GUINT_TO_POINTER (n);
6700 MONO_ADD_INS (cfg->cbb, ins);
6702 if (sizeof (gpointer) == 8)
6703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6707 #if SIZEOF_VOID_P == 8
6708 /* The upper word might not be zero, and we add it to a 64 bit address later */
6709 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6712 if (cfg->compile_aot) {
6713 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6715 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6716 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6717 ins->inst_p0 = table;
6718 ins->dreg = table_reg;
6719 MONO_ADD_INS (cfg->cbb, ins);
6722 /* FIXME: Use load_memindex */
6723 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6725 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6727 start_new_bblock = 1;
6728 inline_costs += (BRANCH_COST * 2);
6748 dreg = alloc_freg (cfg);
6751 dreg = alloc_lreg (cfg);
6754 dreg = alloc_preg (cfg);
6757 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6758 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6759 ins->flags |= ins_flag;
6761 MONO_ADD_INS (bblock, ins);
6776 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6777 ins->flags |= ins_flag;
6779 MONO_ADD_INS (bblock, ins);
6787 MONO_INST_NEW (cfg, ins, (*ip));
6789 ins->sreg1 = sp [0]->dreg;
6790 ins->sreg2 = sp [1]->dreg;
6791 type_from_op (ins, sp [0], sp [1]);
6793 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6795 /* Use the immediate opcodes if possible */
6796 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6797 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6798 if (imm_opcode != -1) {
6799 ins->opcode = imm_opcode;
6800 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6803 sp [1]->opcode = OP_NOP;
6807 MONO_ADD_INS ((cfg)->cbb, (ins));
6810 mono_decompose_opcode (cfg, ins);
6827 MONO_INST_NEW (cfg, ins, (*ip));
6829 ins->sreg1 = sp [0]->dreg;
6830 ins->sreg2 = sp [1]->dreg;
6831 type_from_op (ins, sp [0], sp [1]);
6833 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6834 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6836 /* FIXME: Pass opcode to is_inst_imm */
6838 /* Use the immediate opcodes if possible */
6839 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6842 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6843 if (imm_opcode != -1) {
6844 ins->opcode = imm_opcode;
6845 if (sp [1]->opcode == OP_I8CONST) {
6846 #if SIZEOF_VOID_P == 8
6847 ins->inst_imm = sp [1]->inst_l;
6849 ins->inst_ls_word = sp [1]->inst_ls_word;
6850 ins->inst_ms_word = sp [1]->inst_ms_word;
6854 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6857 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6858 if (sp [1]->next == NULL)
6859 sp [1]->opcode = OP_NOP;
6862 MONO_ADD_INS ((cfg)->cbb, (ins));
6865 mono_decompose_opcode (cfg, ins);
6878 case CEE_CONV_OVF_I8:
6879 case CEE_CONV_OVF_U8:
6883 /* Special case this earlier so we have long constants in the IR */
6884 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6885 int data = sp [-1]->inst_c0;
6886 sp [-1]->opcode = OP_I8CONST;
6887 sp [-1]->type = STACK_I8;
6888 #if SIZEOF_VOID_P == 8
6889 if ((*ip) == CEE_CONV_U8)
6890 sp [-1]->inst_c0 = (guint32)data;
6892 sp [-1]->inst_c0 = data;
6894 sp [-1]->inst_ls_word = data;
6895 if ((*ip) == CEE_CONV_U8)
6896 sp [-1]->inst_ms_word = 0;
6898 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6900 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6907 case CEE_CONV_OVF_I4:
6908 case CEE_CONV_OVF_I1:
6909 case CEE_CONV_OVF_I2:
6910 case CEE_CONV_OVF_I:
6911 case CEE_CONV_OVF_U:
6914 if (sp [-1]->type == STACK_R8) {
6915 ADD_UNOP (CEE_CONV_OVF_I8);
6922 case CEE_CONV_OVF_U1:
6923 case CEE_CONV_OVF_U2:
6924 case CEE_CONV_OVF_U4:
6927 if (sp [-1]->type == STACK_R8) {
6928 ADD_UNOP (CEE_CONV_OVF_U8);
6935 case CEE_CONV_OVF_I1_UN:
6936 case CEE_CONV_OVF_I2_UN:
6937 case CEE_CONV_OVF_I4_UN:
6938 case CEE_CONV_OVF_I8_UN:
6939 case CEE_CONV_OVF_U1_UN:
6940 case CEE_CONV_OVF_U2_UN:
6941 case CEE_CONV_OVF_U4_UN:
6942 case CEE_CONV_OVF_U8_UN:
6943 case CEE_CONV_OVF_I_UN:
6944 case CEE_CONV_OVF_U_UN:
6954 case CEE_ADD_OVF_UN:
6956 case CEE_MUL_OVF_UN:
6958 case CEE_SUB_OVF_UN:
6966 token = read32 (ip + 1);
6967 klass = mini_get_class (method, token, generic_context);
6968 CHECK_TYPELOAD (klass);
6970 if (generic_class_is_reference_type (cfg, klass)) {
6971 MonoInst *store, *load;
6972 int dreg = alloc_preg (cfg);
6974 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6975 load->flags |= ins_flag;
6976 MONO_ADD_INS (cfg->cbb, load);
6978 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6979 store->flags |= ins_flag;
6980 MONO_ADD_INS (cfg->cbb, store);
6982 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6994 token = read32 (ip + 1);
6995 klass = mini_get_class (method, token, generic_context);
6996 CHECK_TYPELOAD (klass);
6998 /* Optimize the common ldobj+stloc combination */
7008 loc_index = ip [5] - CEE_STLOC_0;
7015 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7016 CHECK_LOCAL (loc_index);
7018 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7019 ins->dreg = cfg->locals [loc_index]->dreg;
7025 /* Optimize the ldobj+stobj combination */
7026 /* The reference case ends up being a load+store anyway */
7027 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7032 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7048 CHECK_STACK_OVF (1);
7050 n = read32 (ip + 1);
7052 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7053 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7054 ins->type = STACK_OBJ;
7057 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7058 MonoInst *iargs [1];
7060 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7061 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7063 if (cfg->opt & MONO_OPT_SHARED) {
7064 MonoInst *iargs [3];
7066 if (cfg->compile_aot) {
7067 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7069 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7070 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7071 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7072 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7073 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7075 if (bblock->out_of_line) {
7076 MonoInst *iargs [2];
7078 if (image == mono_defaults.corlib) {
7080 * Avoid relocations in AOT and save some space by using a
7081 * version of helper_ldstr specialized to mscorlib.
7083 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7084 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7086 /* Avoid creating the string object */
7087 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7088 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7089 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7093 if (cfg->compile_aot) {
7094 NEW_LDSTRCONST (cfg, ins, image, n);
7096 MONO_ADD_INS (bblock, ins);
7099 NEW_PCONST (cfg, ins, NULL);
7100 ins->type = STACK_OBJ;
7101 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7103 MONO_ADD_INS (bblock, ins);
7112 MonoInst *iargs [2];
7113 MonoMethodSignature *fsig;
7116 MonoInst *vtable_arg = NULL;
7119 token = read32 (ip + 1);
7120 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7123 fsig = mono_method_get_signature (cmethod, image, token);
7125 mono_save_token_info (cfg, image, token, cmethod);
7127 if (!mono_class_init (cmethod->klass))
7130 if (cfg->generic_sharing_context)
7131 context_used = mono_method_check_context_used (cmethod);
7133 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7134 if (check_linkdemand (cfg, method, cmethod))
7136 CHECK_CFG_EXCEPTION;
7137 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7138 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7141 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7142 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7143 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7145 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7146 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7148 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7152 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7153 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7155 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7157 CHECK_TYPELOAD (cmethod->klass);
7158 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7163 n = fsig->param_count;
7167 * Generate smaller code for the common newobj <exception> instruction in
7168 * argument checking code.
7170 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
7171 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7172 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7173 MonoInst *iargs [3];
7175 g_assert (!vtable_arg);
7179 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7182 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7186 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7191 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7194 g_assert_not_reached ();
7202 /* move the args to allow room for 'this' in the first position */
7208 /* check_call_signature () requires sp[0] to be set */
7209 this_ins.type = STACK_OBJ;
7211 if (check_call_signature (cfg, fsig, sp))
7216 if (mini_class_is_system_array (cmethod->klass)) {
7218 GENERIC_SHARING_FAILURE (*ip);
7219 g_assert (!context_used);
7220 g_assert (!vtable_arg);
7221 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7223 /* Avoid varargs in the common case */
7224 if (fsig->param_count == 1)
7225 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7226 else if (fsig->param_count == 2)
7227 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7229 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7230 } else if (cmethod->string_ctor) {
7231 g_assert (!context_used);
7232 g_assert (!vtable_arg);
7233 /* we simply pass a null pointer */
7234 EMIT_NEW_PCONST (cfg, *sp, NULL);
7235 /* now call the string ctor */
7236 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7238 MonoInst* callvirt_this_arg = NULL;
7240 if (cmethod->klass->valuetype) {
7241 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7242 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7243 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7248 * The code generated by mini_emit_virtual_call () expects
7249 * iargs [0] to be a boxed instance, but luckily the vcall
7250 * will be transformed into a normal call there.
7252 } else if (context_used) {
7256 if (cfg->opt & MONO_OPT_SHARED)
7257 rgctx_info = MONO_RGCTX_INFO_KLASS;
7259 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7260 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7262 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7265 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7267 CHECK_TYPELOAD (cmethod->klass);
7270 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7271 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7272 * As a workaround, we call class cctors before allocating objects.
7274 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7275 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7276 if (cfg->verbose_level > 2)
7277 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7278 class_inits = g_slist_prepend (class_inits, vtable);
7281 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7286 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7288 /* Now call the actual ctor */
7289 /* Avoid virtual calls to ctors if possible */
7290 if (cmethod->klass->marshalbyref)
7291 callvirt_this_arg = sp [0];
7293 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7294 mono_method_check_inlining (cfg, cmethod) &&
7295 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7296 !g_list_find (dont_inline, cmethod)) {
7299 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7300 cfg->real_offset += 5;
7303 inline_costs += costs - 5;
7306 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7308 } else if (context_used &&
7309 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7310 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7311 MonoInst *cmethod_addr;
7313 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7314 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7316 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7319 mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7320 callvirt_this_arg, NULL, vtable_arg);
7324 if (alloc == NULL) {
7326 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7327 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7341 token = read32 (ip + 1);
7342 klass = mini_get_class (method, token, generic_context);
7343 CHECK_TYPELOAD (klass);
7344 if (sp [0]->type != STACK_OBJ)
7347 if (cfg->generic_sharing_context)
7348 context_used = mono_class_check_context_used (klass);
7357 args [1] = emit_get_rgctx_klass (cfg, context_used,
7358 klass, MONO_RGCTX_INFO_KLASS);
7360 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7364 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7365 MonoMethod *mono_castclass;
7366 MonoInst *iargs [1];
7369 mono_castclass = mono_marshal_get_castclass (klass);
7372 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7373 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7374 g_assert (costs > 0);
7377 cfg->real_offset += 5;
7382 inline_costs += costs;
7385 ins = handle_castclass (cfg, klass, *sp);
7395 token = read32 (ip + 1);
7396 klass = mini_get_class (method, token, generic_context);
7397 CHECK_TYPELOAD (klass);
7398 if (sp [0]->type != STACK_OBJ)
7401 if (cfg->generic_sharing_context)
7402 context_used = mono_class_check_context_used (klass);
7411 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7413 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7417 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7418 MonoMethod *mono_isinst;
7419 MonoInst *iargs [1];
7422 mono_isinst = mono_marshal_get_isinst (klass);
7425 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7426 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7427 g_assert (costs > 0);
7430 cfg->real_offset += 5;
7435 inline_costs += costs;
7438 ins = handle_isinst (cfg, klass, *sp);
7445 case CEE_UNBOX_ANY: {
7449 token = read32 (ip + 1);
7450 klass = mini_get_class (method, token, generic_context);
7451 CHECK_TYPELOAD (klass);
7453 mono_save_token_info (cfg, image, token, klass);
7455 if (cfg->generic_sharing_context)
7456 context_used = mono_class_check_context_used (klass);
7458 if (generic_class_is_reference_type (cfg, klass)) {
7461 MonoInst *iargs [2];
7466 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7467 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7471 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7472 MonoMethod *mono_castclass;
7473 MonoInst *iargs [1];
7476 mono_castclass = mono_marshal_get_castclass (klass);
7479 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7480 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7482 g_assert (costs > 0);
7485 cfg->real_offset += 5;
7489 inline_costs += costs;
7491 ins = handle_castclass (cfg, klass, *sp);
7499 if (mono_class_is_nullable (klass)) {
7500 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7507 ins = handle_unbox (cfg, klass, sp, context_used);
7513 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7526 token = read32 (ip + 1);
7527 klass = mini_get_class (method, token, generic_context);
7528 CHECK_TYPELOAD (klass);
7530 mono_save_token_info (cfg, image, token, klass);
7532 if (cfg->generic_sharing_context)
7533 context_used = mono_class_check_context_used (klass);
7535 if (generic_class_is_reference_type (cfg, klass)) {
7541 if (klass == mono_defaults.void_class)
7543 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7545 /* frequent check in generic code: box (struct), brtrue */
7546 if (!mono_class_is_nullable (klass) &&
7547 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7548 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7550 MONO_INST_NEW (cfg, ins, OP_BR);
7551 if (*ip == CEE_BRTRUE_S) {
7554 target = ip + 1 + (signed char)(*ip);
7559 target = ip + 4 + (gint)(read32 (ip));
7562 GET_BBLOCK (cfg, tblock, target);
7563 link_bblock (cfg, bblock, tblock);
7564 ins->inst_target_bb = tblock;
7565 GET_BBLOCK (cfg, tblock, ip);
7567 * This leads to some inconsistency, since the two bblocks are not
7568 * really connected, but it is needed for handling stack arguments
7569 * correct (See test_0_box_brtrue_opt_regress_81102).
7571 link_bblock (cfg, bblock, tblock);
7572 if (sp != stack_start) {
7573 handle_stack_args (cfg, stack_start, sp - stack_start);
7575 CHECK_UNVERIFIABLE (cfg);
7577 MONO_ADD_INS (bblock, ins);
7578 start_new_bblock = 1;
7586 if (cfg->opt & MONO_OPT_SHARED)
7587 rgctx_info = MONO_RGCTX_INFO_KLASS;
7589 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7590 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7591 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7593 *sp++ = handle_box (cfg, val, klass);
7604 token = read32 (ip + 1);
7605 klass = mini_get_class (method, token, generic_context);
7606 CHECK_TYPELOAD (klass);
7608 mono_save_token_info (cfg, image, token, klass);
7610 if (cfg->generic_sharing_context)
7611 context_used = mono_class_check_context_used (klass);
7613 if (mono_class_is_nullable (klass)) {
7616 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7617 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7621 ins = handle_unbox (cfg, klass, sp, context_used);
7631 MonoClassField *field;
7635 if (*ip == CEE_STFLD) {
7642 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7644 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7647 token = read32 (ip + 1);
7648 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7649 field = mono_method_get_wrapper_data (method, token);
7650 klass = field->parent;
7653 field = mono_field_from_token (image, token, &klass, generic_context);
7657 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7658 FIELD_ACCESS_FAILURE;
7659 mono_class_init (klass);
7661 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7662 if (*ip == CEE_STFLD) {
7663 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7665 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7666 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7667 MonoInst *iargs [5];
7670 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7671 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7672 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7676 if (cfg->opt & MONO_OPT_INLINE) {
7677 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7678 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7679 g_assert (costs > 0);
7681 cfg->real_offset += 5;
7684 inline_costs += costs;
7686 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7691 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7693 store->flags |= ins_flag;
7700 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7701 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7702 MonoInst *iargs [4];
7705 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7706 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7707 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7708 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7709 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7710 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7712 g_assert (costs > 0);
7714 cfg->real_offset += 5;
7718 inline_costs += costs;
7720 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7724 if (sp [0]->type == STACK_VTYPE) {
7727 /* Have to compute the address of the variable */
7729 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7731 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7733 g_assert (var->klass == klass);
7735 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7739 if (*ip == CEE_LDFLDA) {
7740 dreg = alloc_preg (cfg);
7742 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7743 ins->klass = mono_class_from_mono_type (field->type);
7744 ins->type = STACK_MP;
7749 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7750 load->flags |= ins_flag;
7761 MonoClassField *field;
7762 gpointer addr = NULL;
7763 gboolean is_special_static;
7766 token = read32 (ip + 1);
7768 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7769 field = mono_method_get_wrapper_data (method, token);
7770 klass = field->parent;
7773 field = mono_field_from_token (image, token, &klass, generic_context);
7776 mono_class_init (klass);
7777 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7778 FIELD_ACCESS_FAILURE;
7781 * We can only support shared generic static
7782 * field access on architectures where the
7783 * trampoline code has been extended to handle
7784 * the generic class init.
7786 #ifndef MONO_ARCH_VTABLE_REG
7787 GENERIC_SHARING_FAILURE (*ip);
7790 if (cfg->generic_sharing_context)
7791 context_used = mono_class_check_context_used (klass);
7793 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7795 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7796 * to be called here.
7798 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7799 mono_class_vtable (cfg->domain, klass);
7800 CHECK_TYPELOAD (klass);
7802 mono_domain_lock (cfg->domain);
7803 if (cfg->domain->special_static_fields)
7804 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7805 mono_domain_unlock (cfg->domain);
7807 is_special_static = mono_class_field_is_special_static (field);
7809 /* Generate IR to compute the field address */
7811 if ((cfg->opt & MONO_OPT_SHARED) ||
7812 (cfg->compile_aot && is_special_static) ||
7813 (context_used && is_special_static)) {
7814 MonoInst *iargs [2];
7816 g_assert (field->parent);
7817 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7819 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7820 field, MONO_RGCTX_INFO_CLASS_FIELD);
7822 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7824 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7825 } else if (context_used) {
7826 MonoInst *static_data;
7829 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7830 method->klass->name_space, method->klass->name, method->name,
7831 depth, field->offset);
7834 if (mono_class_needs_cctor_run (klass, method)) {
7838 vtable = emit_get_rgctx_klass (cfg, context_used,
7839 klass, MONO_RGCTX_INFO_VTABLE);
7841 // FIXME: This doesn't work since it tries to pass the argument
7842 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7844 * The vtable pointer is always passed in a register regardless of
7845 * the calling convention, so assign it manually, and make a call
7846 * using a signature without parameters.
7848 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7849 #ifdef MONO_ARCH_VTABLE_REG
7850 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7851 cfg->uses_vtable_reg = TRUE;
7858 * The pointer we're computing here is
7860 * super_info.static_data + field->offset
7862 static_data = emit_get_rgctx_klass (cfg, context_used,
7863 klass, MONO_RGCTX_INFO_STATIC_DATA);
7865 if (field->offset == 0) {
7868 int addr_reg = mono_alloc_preg (cfg);
7869 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7871 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7872 MonoInst *iargs [2];
7874 g_assert (field->parent);
7875 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7876 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7877 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7879 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7881 CHECK_TYPELOAD (klass);
7883 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7884 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7885 if (cfg->verbose_level > 2)
7886 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7887 class_inits = g_slist_prepend (class_inits, vtable);
7889 if (cfg->run_cctors) {
7891 /* This makes so that inline cannot trigger */
7892 /* .cctors: too many apps depend on them */
7893 /* running with a specific order... */
7894 if (! vtable->initialized)
7896 ex = mono_runtime_class_init_full (vtable, FALSE);
7898 set_exception_object (cfg, ex);
7899 goto exception_exit;
7903 addr = (char*)vtable->data + field->offset;
7905 if (cfg->compile_aot)
7906 EMIT_NEW_SFLDACONST (cfg, ins, field);
7908 EMIT_NEW_PCONST (cfg, ins, addr);
7911 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7912 * This could be later optimized to do just a couple of
7913 * memory dereferences with constant offsets.
7915 MonoInst *iargs [1];
7916 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7917 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7921 /* Generate IR to do the actual load/store operation */
7923 if (*ip == CEE_LDSFLDA) {
7924 ins->klass = mono_class_from_mono_type (field->type);
7925 ins->type = STACK_PTR;
7927 } else if (*ip == CEE_STSFLD) {
7932 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7933 store->flags |= ins_flag;
7935 gboolean is_const = FALSE;
7936 MonoVTable *vtable = NULL;
7938 if (!context_used) {
7939 vtable = mono_class_vtable (cfg->domain, klass);
7940 CHECK_TYPELOAD (klass);
7942 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7943 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7944 gpointer addr = (char*)vtable->data + field->offset;
7945 int ro_type = field->type->type;
7946 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7947 ro_type = field->type->data.klass->enum_basetype->type;
7949 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
7952 case MONO_TYPE_BOOLEAN:
7954 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7958 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7961 case MONO_TYPE_CHAR:
7963 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7967 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7972 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7976 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7981 case MONO_TYPE_STRING:
7982 case MONO_TYPE_OBJECT:
7983 case MONO_TYPE_CLASS:
7984 case MONO_TYPE_SZARRAY:
7986 case MONO_TYPE_FNPTR:
7987 case MONO_TYPE_ARRAY:
7988 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7989 type_to_eval_stack_type ((cfg), field->type, *sp);
7994 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7999 case MONO_TYPE_VALUETYPE:
8009 CHECK_STACK_OVF (1);
8011 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8012 load->flags |= ins_flag;
8025 token = read32 (ip + 1);
8026 klass = mini_get_class (method, token, generic_context);
8027 CHECK_TYPELOAD (klass);
8028 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8029 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8040 const char *data_ptr;
8042 guint32 field_token;
8048 token = read32 (ip + 1);
8050 klass = mini_get_class (method, token, generic_context);
8051 CHECK_TYPELOAD (klass);
8053 if (cfg->generic_sharing_context)
8054 context_used = mono_class_check_context_used (klass);
8059 /* FIXME: Decompose later to help abcrem */
8062 args [0] = emit_get_rgctx_klass (cfg, context_used,
8063 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8068 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8070 if (cfg->opt & MONO_OPT_SHARED) {
8071 /* Decompose now to avoid problems with references to the domainvar */
8072 MonoInst *iargs [3];
8074 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8075 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8078 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8080 /* Decompose later since it is needed by abcrem */
8081 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8082 ins->dreg = alloc_preg (cfg);
8083 ins->sreg1 = sp [0]->dreg;
8084 ins->inst_newa_class = klass;
8085 ins->type = STACK_OBJ;
8087 MONO_ADD_INS (cfg->cbb, ins);
8088 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8089 cfg->cbb->has_array_access = TRUE;
8091 /* Needed so mono_emit_load_get_addr () gets called */
8092 mono_get_got_var (cfg);
8102 * we inline/optimize the initialization sequence if possible.
8103 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8104 * for small sizes open code the memcpy
8105 * ensure the rva field is big enough
8107 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8108 MonoMethod *memcpy_method = get_memcpy_method ();
8109 MonoInst *iargs [3];
8110 int add_reg = alloc_preg (cfg);
8112 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8113 if (cfg->compile_aot) {
8114 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8116 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8118 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8119 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8128 if (sp [0]->type != STACK_OBJ)
8131 dreg = alloc_preg (cfg);
8132 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8133 ins->dreg = alloc_preg (cfg);
8134 ins->sreg1 = sp [0]->dreg;
8135 ins->type = STACK_I4;
8136 MONO_ADD_INS (cfg->cbb, ins);
8137 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8138 cfg->cbb->has_array_access = TRUE;
8146 if (sp [0]->type != STACK_OBJ)
8149 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8151 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8152 CHECK_TYPELOAD (klass);
8153 /* we need to make sure that this array is exactly the type it needs
8154 * to be for correctness. the wrappers are lax with their usage
8155 * so we need to ignore them here
8157 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8158 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8161 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8165 case CEE_LDELEM_ANY:
8176 case CEE_LDELEM_REF: {
8182 if (*ip == CEE_LDELEM_ANY) {
8184 token = read32 (ip + 1);
8185 klass = mini_get_class (method, token, generic_context);
8186 CHECK_TYPELOAD (klass);
8187 mono_class_init (klass);
8190 klass = array_access_to_klass (*ip);
8192 if (sp [0]->type != STACK_OBJ)
8195 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8197 if (sp [1]->opcode == OP_ICONST) {
8198 int array_reg = sp [0]->dreg;
8199 int index_reg = sp [1]->dreg;
8200 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8202 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8203 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8205 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8206 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8209 if (*ip == CEE_LDELEM_ANY)
8222 case CEE_STELEM_REF:
8223 case CEE_STELEM_ANY: {
8229 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8231 if (*ip == CEE_STELEM_ANY) {
8233 token = read32 (ip + 1);
8234 klass = mini_get_class (method, token, generic_context);
8235 CHECK_TYPELOAD (klass);
8236 mono_class_init (klass);
8239 klass = array_access_to_klass (*ip);
8241 if (sp [0]->type != STACK_OBJ)
8244 /* storing a NULL doesn't need any of the complex checks in stelemref */
8245 if (generic_class_is_reference_type (cfg, klass) &&
8246 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8247 MonoMethod* helper = mono_marshal_get_stelemref ();
8248 MonoInst *iargs [3];
8250 if (sp [0]->type != STACK_OBJ)
8252 if (sp [2]->type != STACK_OBJ)
8259 mono_emit_method_call (cfg, helper, iargs, NULL);
8261 if (sp [1]->opcode == OP_ICONST) {
8262 int array_reg = sp [0]->dreg;
8263 int index_reg = sp [1]->dreg;
8264 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8266 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8267 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8269 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8270 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8274 if (*ip == CEE_STELEM_ANY)
8281 case CEE_CKFINITE: {
8285 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8286 ins->sreg1 = sp [0]->dreg;
8287 ins->dreg = alloc_freg (cfg);
8288 ins->type = STACK_R8;
8289 MONO_ADD_INS (bblock, ins);
8292 mono_decompose_opcode (cfg, ins);
8297 case CEE_REFANYVAL: {
8298 MonoInst *src_var, *src;
8300 int klass_reg = alloc_preg (cfg);
8301 int dreg = alloc_preg (cfg);
8304 MONO_INST_NEW (cfg, ins, *ip);
8307 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8308 CHECK_TYPELOAD (klass);
8309 mono_class_init (klass);
8311 if (cfg->generic_sharing_context)
8312 context_used = mono_class_check_context_used (klass);
8315 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8317 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8318 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8319 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8322 MonoInst *klass_ins;
8324 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8325 klass, MONO_RGCTX_INFO_KLASS);
8328 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8329 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8331 mini_emit_class_check (cfg, klass_reg, klass);
8333 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8334 ins->type = STACK_MP;
8339 case CEE_MKREFANY: {
8340 MonoInst *loc, *addr;
8343 MONO_INST_NEW (cfg, ins, *ip);
8346 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8347 CHECK_TYPELOAD (klass);
8348 mono_class_init (klass);
8350 if (cfg->generic_sharing_context)
8351 context_used = mono_class_check_context_used (klass);
8353 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8354 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8357 MonoInst *const_ins;
8358 int type_reg = alloc_preg (cfg);
8360 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8363 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8364 } else if (cfg->compile_aot) {
8365 int const_reg = alloc_preg (cfg);
8366 int type_reg = alloc_preg (cfg);
8368 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8369 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8373 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8374 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8376 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8378 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8379 ins->type = STACK_VTYPE;
8380 ins->klass = mono_defaults.typed_reference_class;
8387 MonoClass *handle_class;
8389 CHECK_STACK_OVF (1);
8392 n = read32 (ip + 1);
8394 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8395 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8396 handle = mono_method_get_wrapper_data (method, n);
8397 handle_class = mono_method_get_wrapper_data (method, n + 1);
8398 if (handle_class == mono_defaults.typehandle_class)
8399 handle = &((MonoClass*)handle)->byval_arg;
8402 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8406 mono_class_init (handle_class);
8407 if (cfg->generic_sharing_context) {
8408 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8409 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8410 /* This case handles ldtoken
8411 of an open type, like for
8414 } else if (handle_class == mono_defaults.typehandle_class) {
8415 /* If we get a MONO_TYPE_CLASS
8416 then we need to provide the
8418 instantiation of it. */
8419 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8422 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8423 } else if (handle_class == mono_defaults.fieldhandle_class)
8424 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8425 else if (handle_class == mono_defaults.methodhandle_class)
8426 context_used = mono_method_check_context_used (handle);
8428 g_assert_not_reached ();
8431 if ((cfg->opt & MONO_OPT_SHARED) &&
8432 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8433 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8434 MonoInst *addr, *vtvar, *iargs [3];
8435 int method_context_used;
8437 if (cfg->generic_sharing_context)
8438 method_context_used = mono_method_check_context_used (method);
8440 method_context_used = 0;
8442 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8444 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8445 EMIT_NEW_ICONST (cfg, iargs [1], n);
8446 if (method_context_used) {
8447 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8448 method, MONO_RGCTX_INFO_METHOD);
8449 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8451 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8452 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8454 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8456 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8458 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8460 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8461 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8462 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8463 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8464 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8465 MonoClass *tclass = mono_class_from_mono_type (handle);
8467 mono_class_init (tclass);
8469 ins = emit_get_rgctx_klass (cfg, context_used,
8470 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8471 } else if (cfg->compile_aot) {
8472 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8474 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8476 ins->type = STACK_OBJ;
8477 ins->klass = cmethod->klass;
8480 MonoInst *addr, *vtvar;
8482 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8485 if (handle_class == mono_defaults.typehandle_class) {
8486 ins = emit_get_rgctx_klass (cfg, context_used,
8487 mono_class_from_mono_type (handle),
8488 MONO_RGCTX_INFO_TYPE);
8489 } else if (handle_class == mono_defaults.methodhandle_class) {
8490 ins = emit_get_rgctx_method (cfg, context_used,
8491 handle, MONO_RGCTX_INFO_METHOD);
8492 } else if (handle_class == mono_defaults.fieldhandle_class) {
8493 ins = emit_get_rgctx_field (cfg, context_used,
8494 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8496 g_assert_not_reached ();
8498 } else if (cfg->compile_aot) {
8499 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8501 EMIT_NEW_PCONST (cfg, ins, handle);
8503 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8504 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8505 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8515 MONO_INST_NEW (cfg, ins, OP_THROW);
8517 ins->sreg1 = sp [0]->dreg;
8519 bblock->out_of_line = TRUE;
8520 MONO_ADD_INS (bblock, ins);
8521 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8522 MONO_ADD_INS (bblock, ins);
8525 link_bblock (cfg, bblock, end_bblock);
8526 start_new_bblock = 1;
8528 case CEE_ENDFINALLY:
8529 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8530 MONO_ADD_INS (bblock, ins);
8532 start_new_bblock = 1;
8535 * Control will leave the method so empty the stack, otherwise
8536 * the next basic block will start with a nonempty stack.
8538 while (sp != stack_start) {
8546 if (*ip == CEE_LEAVE) {
8548 target = ip + 5 + (gint32)read32(ip + 1);
8551 target = ip + 2 + (signed char)(ip [1]);
8554 /* empty the stack */
8555 while (sp != stack_start) {
8560 * If this leave statement is in a catch block, check for a
8561 * pending exception, and rethrow it if necessary.
8563 for (i = 0; i < header->num_clauses; ++i) {
8564 MonoExceptionClause *clause = &header->clauses [i];
8567 * Use <= in the final comparison to handle clauses with multiple
8568 * leave statements, like in bug #78024.
8569 * The ordering of the exception clauses guarantees that we find the
8572 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8574 MonoBasicBlock *dont_throw;
8579 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8582 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8584 NEW_BBLOCK (cfg, dont_throw);
8587 * Currently, we allways rethrow the abort exception, despite the
8588 * fact that this is not correct. See thread6.cs for an example.
8589 * But propagating the abort exception is more important than
8590 * getting the sematics right.
8592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8594 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8596 MONO_START_BB (cfg, dont_throw);
8601 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8603 for (tmp = handlers; tmp; tmp = tmp->next) {
8605 link_bblock (cfg, bblock, tblock);
8606 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8607 ins->inst_target_bb = tblock;
8608 MONO_ADD_INS (bblock, ins);
8610 g_list_free (handlers);
8613 MONO_INST_NEW (cfg, ins, OP_BR);
8614 MONO_ADD_INS (bblock, ins);
8615 GET_BBLOCK (cfg, tblock, target);
8616 link_bblock (cfg, bblock, tblock);
8617 ins->inst_target_bb = tblock;
8618 start_new_bblock = 1;
8620 if (*ip == CEE_LEAVE)
8629 * Mono specific opcodes
8631 case MONO_CUSTOM_PREFIX: {
8633 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8637 case CEE_MONO_ICALL: {
8639 MonoJitICallInfo *info;
8641 token = read32 (ip + 2);
8642 func = mono_method_get_wrapper_data (method, token);
8643 info = mono_find_jit_icall_by_addr (func);
8646 CHECK_STACK (info->sig->param_count);
8647 sp -= info->sig->param_count;
8649 ins = mono_emit_jit_icall (cfg, info->func, sp);
8650 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8654 inline_costs += 10 * num_calls++;
8658 case CEE_MONO_LDPTR: {
8661 CHECK_STACK_OVF (1);
8663 token = read32 (ip + 2);
8665 ptr = mono_method_get_wrapper_data (method, token);
8666 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8667 MonoJitICallInfo *callinfo;
8668 const char *icall_name;
8670 icall_name = method->name + strlen ("__icall_wrapper_");
8671 g_assert (icall_name);
8672 callinfo = mono_find_jit_icall_by_name (icall_name);
8673 g_assert (callinfo);
8675 if (ptr == callinfo->func) {
8676 /* Will be transformed into an AOTCONST later */
8677 EMIT_NEW_PCONST (cfg, ins, ptr);
8683 /* FIXME: Generalize this */
8684 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8685 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8690 EMIT_NEW_PCONST (cfg, ins, ptr);
8693 inline_costs += 10 * num_calls++;
8694 /* Can't embed random pointers into AOT code */
8695 cfg->disable_aot = 1;
8698 case CEE_MONO_ICALL_ADDR: {
8699 MonoMethod *cmethod;
8702 CHECK_STACK_OVF (1);
8704 token = read32 (ip + 2);
8706 cmethod = mono_method_get_wrapper_data (method, token);
8708 if (cfg->compile_aot) {
8709 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8711 ptr = mono_lookup_internal_call (cmethod);
8713 EMIT_NEW_PCONST (cfg, ins, ptr);
8719 case CEE_MONO_VTADDR: {
8720 MonoInst *src_var, *src;
8726 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8727 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8732 case CEE_MONO_NEWOBJ: {
8733 MonoInst *iargs [2];
8735 CHECK_STACK_OVF (1);
8737 token = read32 (ip + 2);
8738 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8739 mono_class_init (klass);
8740 NEW_DOMAINCONST (cfg, iargs [0]);
8741 MONO_ADD_INS (cfg->cbb, iargs [0]);
8742 NEW_CLASSCONST (cfg, iargs [1], klass);
8743 MONO_ADD_INS (cfg->cbb, iargs [1]);
8744 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8746 inline_costs += 10 * num_calls++;
8749 case CEE_MONO_OBJADDR:
8752 MONO_INST_NEW (cfg, ins, OP_MOVE);
8753 ins->dreg = alloc_preg (cfg);
8754 ins->sreg1 = sp [0]->dreg;
8755 ins->type = STACK_MP;
8756 MONO_ADD_INS (cfg->cbb, ins);
8760 case CEE_MONO_LDNATIVEOBJ:
8762 * Similar to LDOBJ, but instead load the unmanaged
8763 * representation of the vtype to the stack.
8768 token = read32 (ip + 2);
8769 klass = mono_method_get_wrapper_data (method, token);
8770 g_assert (klass->valuetype);
8771 mono_class_init (klass);
8774 MonoInst *src, *dest, *temp;
8777 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8778 temp->backend.is_pinvoke = 1;
8779 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8780 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8782 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8783 dest->type = STACK_VTYPE;
8784 dest->klass = klass;
8790 case CEE_MONO_RETOBJ: {
8792 * Same as RET, but return the native representation of a vtype
8795 g_assert (cfg->ret);
8796 g_assert (mono_method_signature (method)->pinvoke);
8801 token = read32 (ip + 2);
8802 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8804 if (!cfg->vret_addr) {
8805 g_assert (cfg->ret_var_is_local);
8807 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8809 EMIT_NEW_RETLOADA (cfg, ins);
8811 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8813 if (sp != stack_start)
8816 MONO_INST_NEW (cfg, ins, OP_BR);
8817 ins->inst_target_bb = end_bblock;
8818 MONO_ADD_INS (bblock, ins);
8819 link_bblock (cfg, bblock, end_bblock);
8820 start_new_bblock = 1;
8824 case CEE_MONO_CISINST:
8825 case CEE_MONO_CCASTCLASS: {
8830 token = read32 (ip + 2);
8831 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8832 if (ip [1] == CEE_MONO_CISINST)
8833 ins = handle_cisinst (cfg, klass, sp [0]);
8835 ins = handle_ccastclass (cfg, klass, sp [0]);
8841 case CEE_MONO_SAVE_LMF:
8842 case CEE_MONO_RESTORE_LMF:
8843 #ifdef MONO_ARCH_HAVE_LMF_OPS
8844 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8845 MONO_ADD_INS (bblock, ins);
8846 cfg->need_lmf_area = TRUE;
8850 case CEE_MONO_CLASSCONST:
8851 CHECK_STACK_OVF (1);
8853 token = read32 (ip + 2);
8854 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8857 inline_costs += 10 * num_calls++;
8859 case CEE_MONO_NOT_TAKEN:
8860 bblock->out_of_line = TRUE;
8864 CHECK_STACK_OVF (1);
8866 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8867 ins->dreg = alloc_preg (cfg);
8868 ins->inst_offset = (gint32)read32 (ip + 2);
8869 ins->type = STACK_PTR;
8870 MONO_ADD_INS (bblock, ins);
8875 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8885 /* somewhat similar to LDTOKEN */
8886 MonoInst *addr, *vtvar;
8887 CHECK_STACK_OVF (1);
8888 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8890 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8891 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8893 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8894 ins->type = STACK_VTYPE;
8895 ins->klass = mono_defaults.argumenthandle_class;
8908 * The following transforms:
8909 * CEE_CEQ into OP_CEQ
8910 * CEE_CGT into OP_CGT
8911 * CEE_CGT_UN into OP_CGT_UN
8912 * CEE_CLT into OP_CLT
8913 * CEE_CLT_UN into OP_CLT_UN
8915 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8917 MONO_INST_NEW (cfg, ins, cmp->opcode);
8919 cmp->sreg1 = sp [0]->dreg;
8920 cmp->sreg2 = sp [1]->dreg;
8921 type_from_op (cmp, sp [0], sp [1]);
8923 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8924 cmp->opcode = OP_LCOMPARE;
8925 else if (sp [0]->type == STACK_R8)
8926 cmp->opcode = OP_FCOMPARE;
8928 cmp->opcode = OP_ICOMPARE;
8929 MONO_ADD_INS (bblock, cmp);
8930 ins->type = STACK_I4;
8931 ins->dreg = alloc_dreg (cfg, ins->type);
8932 type_from_op (ins, sp [0], sp [1]);
8934 if (cmp->opcode == OP_FCOMPARE) {
8936 * The backends expect the fceq opcodes to do the
8939 cmp->opcode = OP_NOP;
8940 ins->sreg1 = cmp->sreg1;
8941 ins->sreg2 = cmp->sreg2;
8943 MONO_ADD_INS (bblock, ins);
8950 MonoMethod *cil_method, *ctor_method;
8951 gboolean needs_static_rgctx_invoke;
8953 CHECK_STACK_OVF (1);
8955 n = read32 (ip + 2);
8956 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8959 mono_class_init (cmethod->klass);
8961 mono_save_token_info (cfg, image, n, cmethod);
8963 if (cfg->generic_sharing_context)
8964 context_used = mono_method_check_context_used (cmethod);
8966 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
8968 cil_method = cmethod;
8969 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8970 METHOD_ACCESS_FAILURE;
8972 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8973 if (check_linkdemand (cfg, method, cmethod))
8975 CHECK_CFG_EXCEPTION;
8976 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8977 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8981 * Optimize the common case of ldftn+delegate creation
8983 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8984 /* FIXME: SGEN support */
8985 /* FIXME: handle shared static generic methods */
8986 /* FIXME: handle this in shared code */
8987 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8988 MonoInst *target_ins;
8991 if (cfg->verbose_level > 3)
8992 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8993 target_ins = sp [-1];
8995 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9003 if (needs_static_rgctx_invoke)
9004 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9006 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9007 } else if (needs_static_rgctx_invoke) {
9008 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9010 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9012 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9016 inline_costs += 10 * num_calls++;
9019 case CEE_LDVIRTFTN: {
9024 n = read32 (ip + 2);
9025 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9028 mono_class_init (cmethod->klass);
9030 if (cfg->generic_sharing_context)
9031 context_used = mono_method_check_context_used (cmethod);
9033 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9034 if (check_linkdemand (cfg, method, cmethod))
9036 CHECK_CFG_EXCEPTION;
9037 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9038 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9045 args [1] = emit_get_rgctx_method (cfg, context_used,
9046 cmethod, MONO_RGCTX_INFO_METHOD);
9047 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9049 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9050 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9054 inline_costs += 10 * num_calls++;
9058 CHECK_STACK_OVF (1);
9060 n = read16 (ip + 2);
9062 EMIT_NEW_ARGLOAD (cfg, ins, n);
9067 CHECK_STACK_OVF (1);
9069 n = read16 (ip + 2);
9071 NEW_ARGLOADA (cfg, ins, n);
9072 MONO_ADD_INS (cfg->cbb, ins);
9080 n = read16 (ip + 2);
9082 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9084 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9088 CHECK_STACK_OVF (1);
9090 n = read16 (ip + 2);
9092 EMIT_NEW_LOCLOAD (cfg, ins, n);
9097 unsigned char *tmp_ip;
9098 CHECK_STACK_OVF (1);
9100 n = read16 (ip + 2);
9103 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9109 EMIT_NEW_LOCLOADA (cfg, ins, n);
9118 n = read16 (ip + 2);
9120 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9122 emit_stloc_ir (cfg, sp, header, n);
9129 if (sp != stack_start)
9131 if (cfg->method != method)
9133 * Inlining this into a loop in a parent could lead to
9134 * stack overflows which is different behavior than the
9135 * non-inlined case, thus disable inlining in this case.
9137 goto inline_failure;
9139 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9140 ins->dreg = alloc_preg (cfg);
9141 ins->sreg1 = sp [0]->dreg;
9142 ins->type = STACK_PTR;
9143 MONO_ADD_INS (cfg->cbb, ins);
9145 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9146 if (header->init_locals)
9147 ins->flags |= MONO_INST_INIT;
9152 case CEE_ENDFILTER: {
9153 MonoExceptionClause *clause, *nearest;
9154 int cc, nearest_num;
9158 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9160 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9161 ins->sreg1 = (*sp)->dreg;
9162 MONO_ADD_INS (bblock, ins);
9163 start_new_bblock = 1;
9168 for (cc = 0; cc < header->num_clauses; ++cc) {
9169 clause = &header->clauses [cc];
9170 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9171 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9172 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9178 if ((ip - header->code) != nearest->handler_offset)
9183 case CEE_UNALIGNED_:
9184 ins_flag |= MONO_INST_UNALIGNED;
9185 /* FIXME: record alignment? we can assume 1 for now */
9190 ins_flag |= MONO_INST_VOLATILE;
9194 ins_flag |= MONO_INST_TAILCALL;
9195 cfg->flags |= MONO_CFG_HAS_TAIL;
9196 /* Can't inline tail calls at this time */
9197 inline_costs += 100000;
9204 token = read32 (ip + 2);
9205 klass = mini_get_class (method, token, generic_context);
9206 CHECK_TYPELOAD (klass);
9207 if (generic_class_is_reference_type (cfg, klass))
9208 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9210 mini_emit_initobj (cfg, *sp, NULL, klass);
9214 case CEE_CONSTRAINED_:
9216 token = read32 (ip + 2);
9217 constrained_call = mono_class_get_full (image, token, generic_context);
9218 CHECK_TYPELOAD (constrained_call);
9223 MonoInst *iargs [3];
9227 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9228 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9229 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9230 /* emit_memset only works when val == 0 */
9231 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9236 if (ip [1] == CEE_CPBLK) {
9237 MonoMethod *memcpy_method = get_memcpy_method ();
9238 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9240 MonoMethod *memset_method = get_memset_method ();
9241 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9251 ins_flag |= MONO_INST_NOTYPECHECK;
9253 ins_flag |= MONO_INST_NORANGECHECK;
9254 /* we ignore the no-nullcheck for now since we
9255 * really do it explicitly only when doing callvirt->call
9261 int handler_offset = -1;
9263 for (i = 0; i < header->num_clauses; ++i) {
9264 MonoExceptionClause *clause = &header->clauses [i];
9265 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9266 handler_offset = clause->handler_offset;
9271 bblock->flags |= BB_EXCEPTION_UNSAFE;
9273 g_assert (handler_offset != -1);
9275 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9276 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9277 ins->sreg1 = load->dreg;
9278 MONO_ADD_INS (bblock, ins);
9280 link_bblock (cfg, bblock, end_bblock);
9281 start_new_bblock = 1;
9289 CHECK_STACK_OVF (1);
9291 token = read32 (ip + 2);
9292 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9293 MonoType *type = mono_type_create_from_typespec (image, token);
9294 token = mono_type_size (type, &ialign);
9296 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9297 CHECK_TYPELOAD (klass);
9298 mono_class_init (klass);
9299 token = mono_class_value_size (klass, &align);
9301 EMIT_NEW_ICONST (cfg, ins, token);
9306 case CEE_REFANYTYPE: {
9307 MonoInst *src_var, *src;
9313 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9315 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9316 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9317 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9327 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9332 g_error ("opcode 0x%02x not handled", *ip);
9335 if (start_new_bblock != 1)
9338 bblock->cil_length = ip - bblock->cil_code;
9339 bblock->next_bb = end_bblock;
9341 if (cfg->method == method && cfg->domainvar) {
9343 MonoInst *get_domain;
9345 cfg->cbb = init_localsbb;
9347 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9348 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9351 get_domain->dreg = alloc_preg (cfg);
9352 MONO_ADD_INS (cfg->cbb, get_domain);
9354 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9355 MONO_ADD_INS (cfg->cbb, store);
9358 if (cfg->method == method && cfg->got_var)
9359 mono_emit_load_got_addr (cfg);
9361 if (header->init_locals) {
9364 cfg->cbb = init_localsbb;
9366 for (i = 0; i < header->num_locals; ++i) {
9367 MonoType *ptype = header->locals [i];
9368 int t = ptype->type;
9369 dreg = cfg->locals [i]->dreg;
9371 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9372 t = ptype->data.klass->enum_basetype->type;
9374 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9375 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9376 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9377 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9378 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9379 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9380 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9381 ins->type = STACK_R8;
9382 ins->inst_p0 = (void*)&r8_0;
9383 ins->dreg = alloc_dreg (cfg, STACK_R8);
9384 MONO_ADD_INS (init_localsbb, ins);
9385 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9386 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9387 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9388 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9390 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9397 if (cfg->method == method) {
9399 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9400 bb->region = mono_find_block_region (cfg, bb->real_offset);
9402 mono_create_spvar_for_region (cfg, bb->region);
9403 if (cfg->verbose_level > 2)
9404 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9408 g_slist_free (class_inits);
9409 dont_inline = g_list_remove (dont_inline, method);
9411 if (inline_costs < 0) {
9414 /* Method is too large */
9415 mname = mono_method_full_name (method, TRUE);
9416 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9417 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9422 if ((cfg->verbose_level > 2) && (cfg->method == method))
9423 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9425 return inline_costs;
9428 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9429 g_slist_free (class_inits);
9430 dont_inline = g_list_remove (dont_inline, method);
9434 g_slist_free (class_inits);
9435 dont_inline = g_list_remove (dont_inline, method);
9439 g_slist_free (class_inits);
9440 dont_inline = g_list_remove (dont_inline, method);
9441 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9445 g_slist_free (class_inits);
9446 dont_inline = g_list_remove (dont_inline, method);
9447 set_exception_type_from_invalid_il (cfg, method, ip);
9452 store_membase_reg_to_store_membase_imm (int opcode)
9455 case OP_STORE_MEMBASE_REG:
9456 return OP_STORE_MEMBASE_IMM;
9457 case OP_STOREI1_MEMBASE_REG:
9458 return OP_STOREI1_MEMBASE_IMM;
9459 case OP_STOREI2_MEMBASE_REG:
9460 return OP_STOREI2_MEMBASE_IMM;
9461 case OP_STOREI4_MEMBASE_REG:
9462 return OP_STOREI4_MEMBASE_IMM;
9463 case OP_STOREI8_MEMBASE_REG:
9464 return OP_STOREI8_MEMBASE_IMM;
9466 g_assert_not_reached ();
9472 #endif /* DISABLE_JIT */
9475 mono_op_to_op_imm (int opcode)
9485 return OP_IDIV_UN_IMM;
9489 return OP_IREM_UN_IMM;
9503 return OP_ISHR_UN_IMM;
9520 return OP_LSHR_UN_IMM;
9523 return OP_COMPARE_IMM;
9525 return OP_ICOMPARE_IMM;
9527 return OP_LCOMPARE_IMM;
9529 case OP_STORE_MEMBASE_REG:
9530 return OP_STORE_MEMBASE_IMM;
9531 case OP_STOREI1_MEMBASE_REG:
9532 return OP_STOREI1_MEMBASE_IMM;
9533 case OP_STOREI2_MEMBASE_REG:
9534 return OP_STOREI2_MEMBASE_IMM;
9535 case OP_STOREI4_MEMBASE_REG:
9536 return OP_STOREI4_MEMBASE_IMM;
9538 #if defined(__i386__) || defined (__x86_64__)
9540 return OP_X86_PUSH_IMM;
9541 case OP_X86_COMPARE_MEMBASE_REG:
9542 return OP_X86_COMPARE_MEMBASE_IMM;
9544 #if defined(__x86_64__)
9545 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9546 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9548 case OP_VOIDCALL_REG:
9557 return OP_LOCALLOC_IMM;
9564 ldind_to_load_membase (int opcode)
9568 return OP_LOADI1_MEMBASE;
9570 return OP_LOADU1_MEMBASE;
9572 return OP_LOADI2_MEMBASE;
9574 return OP_LOADU2_MEMBASE;
9576 return OP_LOADI4_MEMBASE;
9578 return OP_LOADU4_MEMBASE;
9580 return OP_LOAD_MEMBASE;
9582 return OP_LOAD_MEMBASE;
9584 return OP_LOADI8_MEMBASE;
9586 return OP_LOADR4_MEMBASE;
9588 return OP_LOADR8_MEMBASE;
9590 g_assert_not_reached ();
9597 stind_to_store_membase (int opcode)
9601 return OP_STOREI1_MEMBASE_REG;
9603 return OP_STOREI2_MEMBASE_REG;
9605 return OP_STOREI4_MEMBASE_REG;
9608 return OP_STORE_MEMBASE_REG;
9610 return OP_STOREI8_MEMBASE_REG;
9612 return OP_STORER4_MEMBASE_REG;
9614 return OP_STORER8_MEMBASE_REG;
9616 g_assert_not_reached ();
9623 mono_load_membase_to_load_mem (int opcode)
9625 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9626 #if defined(__i386__) || defined(__x86_64__)
9628 case OP_LOAD_MEMBASE:
9630 case OP_LOADU1_MEMBASE:
9631 return OP_LOADU1_MEM;
9632 case OP_LOADU2_MEMBASE:
9633 return OP_LOADU2_MEM;
9634 case OP_LOADI4_MEMBASE:
9635 return OP_LOADI4_MEM;
9636 case OP_LOADU4_MEMBASE:
9637 return OP_LOADU4_MEM;
9638 #if SIZEOF_VOID_P == 8
9639 case OP_LOADI8_MEMBASE:
9640 return OP_LOADI8_MEM;
9649 op_to_op_dest_membase (int store_opcode, int opcode)
9651 #if defined(__i386__)
9652 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9657 return OP_X86_ADD_MEMBASE_REG;
9659 return OP_X86_SUB_MEMBASE_REG;
9661 return OP_X86_AND_MEMBASE_REG;
9663 return OP_X86_OR_MEMBASE_REG;
9665 return OP_X86_XOR_MEMBASE_REG;
9668 return OP_X86_ADD_MEMBASE_IMM;
9671 return OP_X86_SUB_MEMBASE_IMM;
9674 return OP_X86_AND_MEMBASE_IMM;
9677 return OP_X86_OR_MEMBASE_IMM;
9680 return OP_X86_XOR_MEMBASE_IMM;
9686 #if defined(__x86_64__)
9687 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9692 return OP_X86_ADD_MEMBASE_REG;
9694 return OP_X86_SUB_MEMBASE_REG;
9696 return OP_X86_AND_MEMBASE_REG;
9698 return OP_X86_OR_MEMBASE_REG;
9700 return OP_X86_XOR_MEMBASE_REG;
9702 return OP_X86_ADD_MEMBASE_IMM;
9704 return OP_X86_SUB_MEMBASE_IMM;
9706 return OP_X86_AND_MEMBASE_IMM;
9708 return OP_X86_OR_MEMBASE_IMM;
9710 return OP_X86_XOR_MEMBASE_IMM;
9712 return OP_AMD64_ADD_MEMBASE_REG;
9714 return OP_AMD64_SUB_MEMBASE_REG;
9716 return OP_AMD64_AND_MEMBASE_REG;
9718 return OP_AMD64_OR_MEMBASE_REG;
9720 return OP_AMD64_XOR_MEMBASE_REG;
9723 return OP_AMD64_ADD_MEMBASE_IMM;
9726 return OP_AMD64_SUB_MEMBASE_IMM;
9729 return OP_AMD64_AND_MEMBASE_IMM;
9732 return OP_AMD64_OR_MEMBASE_IMM;
9735 return OP_AMD64_XOR_MEMBASE_IMM;
9745 op_to_op_store_membase (int store_opcode, int opcode)
9747 #if defined(__i386__) || defined(__x86_64__)
9750 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9751 return OP_X86_SETEQ_MEMBASE;
9753 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9754 return OP_X86_SETNE_MEMBASE;
9762 op_to_op_src1_membase (int load_opcode, int opcode)
9765 /* FIXME: This has sign extension issues */
9767 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9768 return OP_X86_COMPARE_MEMBASE8_IMM;
9771 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9776 return OP_X86_PUSH_MEMBASE;
9777 case OP_COMPARE_IMM:
9778 case OP_ICOMPARE_IMM:
9779 return OP_X86_COMPARE_MEMBASE_IMM;
9782 return OP_X86_COMPARE_MEMBASE_REG;
9787 /* FIXME: This has sign extension issues */
9789 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9790 return OP_X86_COMPARE_MEMBASE8_IMM;
9795 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9796 return OP_X86_PUSH_MEMBASE;
9798 /* FIXME: This only works for 32 bit immediates
9799 case OP_COMPARE_IMM:
9800 case OP_LCOMPARE_IMM:
9801 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9802 return OP_AMD64_COMPARE_MEMBASE_IMM;
9804 case OP_ICOMPARE_IMM:
9805 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9806 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9810 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9811 return OP_AMD64_COMPARE_MEMBASE_REG;
9814 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9815 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9824 op_to_op_src2_membase (int load_opcode, int opcode)
9827 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9833 return OP_X86_COMPARE_REG_MEMBASE;
9835 return OP_X86_ADD_REG_MEMBASE;
9837 return OP_X86_SUB_REG_MEMBASE;
9839 return OP_X86_AND_REG_MEMBASE;
9841 return OP_X86_OR_REG_MEMBASE;
9843 return OP_X86_XOR_REG_MEMBASE;
9850 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9851 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9855 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9856 return OP_AMD64_COMPARE_REG_MEMBASE;
9859 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9860 return OP_X86_ADD_REG_MEMBASE;
9862 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9863 return OP_X86_SUB_REG_MEMBASE;
9865 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9866 return OP_X86_AND_REG_MEMBASE;
9868 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9869 return OP_X86_OR_REG_MEMBASE;
9871 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9872 return OP_X86_XOR_REG_MEMBASE;
9874 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9875 return OP_AMD64_ADD_REG_MEMBASE;
9877 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9878 return OP_AMD64_SUB_REG_MEMBASE;
9880 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9881 return OP_AMD64_AND_REG_MEMBASE;
9883 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9884 return OP_AMD64_OR_REG_MEMBASE;
9886 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9887 return OP_AMD64_XOR_REG_MEMBASE;
9895 mono_op_to_op_imm_noemul (int opcode)
9898 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
9903 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9911 return mono_op_to_op_imm (opcode);
9918 * mono_handle_global_vregs:
9920 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9924 mono_handle_global_vregs (MonoCompile *cfg)
9930 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9932 #ifdef MONO_ARCH_SIMD_INTRINSICS
9933 if (cfg->uses_simd_intrinsics)
9934 mono_simd_simplify_indirection (cfg);
9937 /* Find local vregs used in more than one bb */
9938 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9939 MonoInst *ins = bb->code;
9940 int block_num = bb->block_num;
9942 if (cfg->verbose_level > 2)
9943 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9946 for (; ins; ins = ins->next) {
9947 const char *spec = INS_INFO (ins->opcode);
9948 int regtype, regindex;
9951 if (G_UNLIKELY (cfg->verbose_level > 2))
9952 mono_print_ins (ins);
9954 g_assert (ins->opcode >= MONO_CEE_LAST);
9956 for (regindex = 0; regindex < 3; regindex ++) {
9959 if (regindex == 0) {
9960 regtype = spec [MONO_INST_DEST];
9964 } else if (regindex == 1) {
9965 regtype = spec [MONO_INST_SRC1];
9970 regtype = spec [MONO_INST_SRC2];
9976 #if SIZEOF_VOID_P == 4
9977 if (regtype == 'l') {
9979 * Since some instructions reference the original long vreg,
9980 * and some reference the two component vregs, it is quite hard
9981 * to determine when it needs to be global. So be conservative.
9983 if (!get_vreg_to_inst (cfg, vreg)) {
9984 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9986 if (cfg->verbose_level > 2)
9987 printf ("LONG VREG R%d made global.\n", vreg);
9991 * Make the component vregs volatile since the optimizations can
9992 * get confused otherwise.
9994 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9995 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9999 g_assert (vreg != -1);
10001 prev_bb = vreg_to_bb [vreg];
10002 if (prev_bb == 0) {
10003 /* 0 is a valid block num */
10004 vreg_to_bb [vreg] = block_num + 1;
10005 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10006 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10009 if (!get_vreg_to_inst (cfg, vreg)) {
10010 if (G_UNLIKELY (cfg->verbose_level > 2))
10011 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10015 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10018 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10021 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10024 g_assert_not_reached ();
10028 /* Flag as having been used in more than one bb */
10029 vreg_to_bb [vreg] = -1;
10035 /* If a variable is used in only one bblock, convert it into a local vreg */
10036 for (i = 0; i < cfg->num_varinfo; i++) {
10037 MonoInst *var = cfg->varinfo [i];
10038 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10040 switch (var->type) {
10046 #if SIZEOF_VOID_P == 8
10049 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10050 /* Enabling this screws up the fp stack on x86 */
10053 /* Arguments are implicitly global */
10054 /* Putting R4 vars into registers doesn't work currently */
10055 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10057 * Make that the variable's liveness interval doesn't contain a call, since
10058 * that would cause the lvreg to be spilled, making the whole optimization
10061 /* This is too slow for JIT compilation */
10063 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10065 int def_index, call_index, ins_index;
10066 gboolean spilled = FALSE;
10071 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10072 const char *spec = INS_INFO (ins->opcode);
10074 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10075 def_index = ins_index;
10077 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10078 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10079 if (call_index > def_index) {
10085 if (MONO_IS_CALL (ins))
10086 call_index = ins_index;
10096 if (G_UNLIKELY (cfg->verbose_level > 2))
10097 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10098 var->flags |= MONO_INST_IS_DEAD;
10099 cfg->vreg_to_inst [var->dreg] = NULL;
10106 * Compress the varinfo and vars tables so the liveness computation is faster and
10107 * takes up less space.
10110 for (i = 0; i < cfg->num_varinfo; ++i) {
10111 MonoInst *var = cfg->varinfo [i];
10112 if (pos < i && cfg->locals_start == i)
10113 cfg->locals_start = pos;
10114 if (!(var->flags & MONO_INST_IS_DEAD)) {
10116 cfg->varinfo [pos] = cfg->varinfo [i];
10117 cfg->varinfo [pos]->inst_c0 = pos;
10118 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10119 cfg->vars [pos].idx = pos;
10120 #if SIZEOF_VOID_P == 4
10121 if (cfg->varinfo [pos]->type == STACK_I8) {
10122 /* Modify the two component vars too */
10125 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10126 var1->inst_c0 = pos;
10127 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10128 var1->inst_c0 = pos;
10135 cfg->num_varinfo = pos;
10136 if (cfg->locals_start > cfg->num_varinfo)
10137 cfg->locals_start = cfg->num_varinfo;
10141 * mono_spill_global_vars:
10143 * Generate spill code for variables which are not allocated to registers,
10144 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10145 * code is generated which could be optimized by the local optimization passes.
10148 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10150 MonoBasicBlock *bb;
10152 int orig_next_vreg;
10153 guint32 *vreg_to_lvreg;
10155 guint32 i, lvregs_len;
10156 gboolean dest_has_lvreg = FALSE;
10157 guint32 stacktypes [128];
10159 *need_local_opts = FALSE;
10161 memset (spec2, 0, sizeof (spec2));
10163 /* FIXME: Move this function to mini.c */
10164 stacktypes ['i'] = STACK_PTR;
10165 stacktypes ['l'] = STACK_I8;
10166 stacktypes ['f'] = STACK_R8;
10167 #ifdef MONO_ARCH_SIMD_INTRINSICS
10168 stacktypes ['x'] = STACK_VTYPE;
10171 #if SIZEOF_VOID_P == 4
10172 /* Create MonoInsts for longs */
10173 for (i = 0; i < cfg->num_varinfo; i++) {
10174 MonoInst *ins = cfg->varinfo [i];
10176 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10177 switch (ins->type) {
10178 #ifdef MONO_ARCH_SOFT_FLOAT
10184 g_assert (ins->opcode == OP_REGOFFSET);
10186 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10188 tree->opcode = OP_REGOFFSET;
10189 tree->inst_basereg = ins->inst_basereg;
10190 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10192 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10194 tree->opcode = OP_REGOFFSET;
10195 tree->inst_basereg = ins->inst_basereg;
10196 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10206 /* FIXME: widening and truncation */
10209 * As an optimization, when a variable allocated to the stack is first loaded into
10210 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10211 * the variable again.
10213 orig_next_vreg = cfg->next_vreg;
10214 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10215 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10218 /* Add spill loads/stores */
10219 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10222 if (cfg->verbose_level > 2)
10223 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10225 /* Clear vreg_to_lvreg array */
10226 for (i = 0; i < lvregs_len; i++)
10227 vreg_to_lvreg [lvregs [i]] = 0;
10231 MONO_BB_FOR_EACH_INS (bb, ins) {
10232 const char *spec = INS_INFO (ins->opcode);
10233 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10234 gboolean store, no_lvreg;
10236 if (G_UNLIKELY (cfg->verbose_level > 2))
10237 mono_print_ins (ins);
10239 if (ins->opcode == OP_NOP)
10243 * We handle LDADDR here as well, since it can only be decomposed
10244 * when variable addresses are known.
10246 if (ins->opcode == OP_LDADDR) {
10247 MonoInst *var = ins->inst_p0;
10249 if (var->opcode == OP_VTARG_ADDR) {
10250 /* Happens on SPARC/S390 where vtypes are passed by reference */
10251 MonoInst *vtaddr = var->inst_left;
10252 if (vtaddr->opcode == OP_REGVAR) {
10253 ins->opcode = OP_MOVE;
10254 ins->sreg1 = vtaddr->dreg;
10256 else if (var->inst_left->opcode == OP_REGOFFSET) {
10257 ins->opcode = OP_LOAD_MEMBASE;
10258 ins->inst_basereg = vtaddr->inst_basereg;
10259 ins->inst_offset = vtaddr->inst_offset;
10263 g_assert (var->opcode == OP_REGOFFSET);
10265 ins->opcode = OP_ADD_IMM;
10266 ins->sreg1 = var->inst_basereg;
10267 ins->inst_imm = var->inst_offset;
10270 *need_local_opts = TRUE;
10271 spec = INS_INFO (ins->opcode);
10274 if (ins->opcode < MONO_CEE_LAST) {
10275 mono_print_ins (ins);
10276 g_assert_not_reached ();
10280 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10284 if (MONO_IS_STORE_MEMBASE (ins)) {
10285 tmp_reg = ins->dreg;
10286 ins->dreg = ins->sreg2;
10287 ins->sreg2 = tmp_reg;
10290 spec2 [MONO_INST_DEST] = ' ';
10291 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10292 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10294 } else if (MONO_IS_STORE_MEMINDEX (ins))
10295 g_assert_not_reached ();
10300 if (G_UNLIKELY (cfg->verbose_level > 2))
10301 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10306 regtype = spec [MONO_INST_DEST];
10307 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10310 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10311 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10312 MonoInst *store_ins;
10315 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10317 if (var->opcode == OP_REGVAR) {
10318 ins->dreg = var->dreg;
10319 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10321 * Instead of emitting a load+store, use a _membase opcode.
10323 g_assert (var->opcode == OP_REGOFFSET);
10324 if (ins->opcode == OP_MOVE) {
10327 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10328 ins->inst_basereg = var->inst_basereg;
10329 ins->inst_offset = var->inst_offset;
10332 spec = INS_INFO (ins->opcode);
10336 g_assert (var->opcode == OP_REGOFFSET);
10338 prev_dreg = ins->dreg;
10340 /* Invalidate any previous lvreg for this vreg */
10341 vreg_to_lvreg [ins->dreg] = 0;
10345 #ifdef MONO_ARCH_SOFT_FLOAT
10346 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10348 store_opcode = OP_STOREI8_MEMBASE_REG;
10352 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10354 if (regtype == 'l') {
10355 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10356 mono_bblock_insert_after_ins (bb, ins, store_ins);
10357 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10358 mono_bblock_insert_after_ins (bb, ins, store_ins);
10361 g_assert (store_opcode != OP_STOREV_MEMBASE);
10363 /* Try to fuse the store into the instruction itself */
10364 /* FIXME: Add more instructions */
10365 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10366 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10367 ins->inst_imm = ins->inst_c0;
10368 ins->inst_destbasereg = var->inst_basereg;
10369 ins->inst_offset = var->inst_offset;
10370 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10371 ins->opcode = store_opcode;
10372 ins->inst_destbasereg = var->inst_basereg;
10373 ins->inst_offset = var->inst_offset;
10377 tmp_reg = ins->dreg;
10378 ins->dreg = ins->sreg2;
10379 ins->sreg2 = tmp_reg;
10382 spec2 [MONO_INST_DEST] = ' ';
10383 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10384 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10386 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10387 // FIXME: The backends expect the base reg to be in inst_basereg
10388 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10390 ins->inst_basereg = var->inst_basereg;
10391 ins->inst_offset = var->inst_offset;
10392 spec = INS_INFO (ins->opcode);
10394 /* printf ("INS: "); mono_print_ins (ins); */
10395 /* Create a store instruction */
10396 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10398 /* Insert it after the instruction */
10399 mono_bblock_insert_after_ins (bb, ins, store_ins);
10402 * We can't assign ins->dreg to var->dreg here, since the
10403 * sregs could use it. So set a flag, and do it after
10406 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10407 dest_has_lvreg = TRUE;
10416 for (srcindex = 0; srcindex < 2; ++srcindex) {
10417 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10418 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10420 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10421 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10422 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10423 MonoInst *load_ins;
10424 guint32 load_opcode;
10426 if (var->opcode == OP_REGVAR) {
10428 ins->sreg1 = var->dreg;
10430 ins->sreg2 = var->dreg;
10434 g_assert (var->opcode == OP_REGOFFSET);
10436 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10438 g_assert (load_opcode != OP_LOADV_MEMBASE);
10440 if (vreg_to_lvreg [sreg]) {
10441 /* The variable is already loaded to an lvreg */
10442 if (G_UNLIKELY (cfg->verbose_level > 2))
10443 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10445 ins->sreg1 = vreg_to_lvreg [sreg];
10447 ins->sreg2 = vreg_to_lvreg [sreg];
10451 /* Try to fuse the load into the instruction */
10452 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10453 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10454 ins->inst_basereg = var->inst_basereg;
10455 ins->inst_offset = var->inst_offset;
10456 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10457 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10458 ins->sreg2 = var->inst_basereg;
10459 ins->inst_offset = var->inst_offset;
10461 if (MONO_IS_REAL_MOVE (ins)) {
10462 ins->opcode = OP_NOP;
10465 //printf ("%d ", srcindex); mono_print_ins (ins);
10467 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10469 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10470 if (var->dreg == prev_dreg) {
10472 * sreg refers to the value loaded by the load
10473 * emitted below, but we need to use ins->dreg
10474 * since it refers to the store emitted earlier.
10478 vreg_to_lvreg [var->dreg] = sreg;
10479 g_assert (lvregs_len < 1024);
10480 lvregs [lvregs_len ++] = var->dreg;
10489 if (regtype == 'l') {
10490 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10491 mono_bblock_insert_before_ins (bb, ins, load_ins);
10492 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10493 mono_bblock_insert_before_ins (bb, ins, load_ins);
10496 #if SIZEOF_VOID_P == 4
10497 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10499 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10500 mono_bblock_insert_before_ins (bb, ins, load_ins);
10506 if (dest_has_lvreg) {
10507 vreg_to_lvreg [prev_dreg] = ins->dreg;
10508 g_assert (lvregs_len < 1024);
10509 lvregs [lvregs_len ++] = prev_dreg;
10510 dest_has_lvreg = FALSE;
10514 tmp_reg = ins->dreg;
10515 ins->dreg = ins->sreg2;
10516 ins->sreg2 = tmp_reg;
10519 if (MONO_IS_CALL (ins)) {
10520 /* Clear vreg_to_lvreg array */
10521 for (i = 0; i < lvregs_len; i++)
10522 vreg_to_lvreg [lvregs [i]] = 0;
10526 if (cfg->verbose_level > 2)
10527 mono_print_ins_index (1, ins);
10534 * - use 'iadd' instead of 'int_add'
10535 * - handling ovf opcodes: decompose in method_to_ir.
10536 * - unify iregs/fregs
10537 * -> partly done, the missing parts are:
10538 * - a more complete unification would involve unifying the hregs as well, so
10539 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10540 * would no longer map to the machine hregs, so the code generators would need to
10541 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10542 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10543 * fp/non-fp branches speeds it up by about 15%.
10544 * - use sext/zext opcodes instead of shifts
10546 * - get rid of TEMPLOADs if possible and use vregs instead
10547 * - clean up usage of OP_P/OP_ opcodes
10548 * - cleanup usage of DUMMY_USE
10549 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10551 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10552 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10553 * - make sure handle_stack_args () is called before the branch is emitted
10554 * - when the new IR is done, get rid of all unused stuff
10555 * - COMPARE/BEQ as separate instructions or unify them ?
10556 * - keeping them separate allows specialized compare instructions like
10557 * compare_imm, compare_membase
10558 * - most back ends unify fp compare+branch, fp compare+ceq
10559 * - integrate mono_save_args into inline_method
10560 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10561 * - handle long shift opts on 32 bit platforms somehow: they require
10562 * 3 sregs (2 for arg1 and 1 for arg2)
10563 * - make byref a 'normal' type.
10564 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10565 * variable if needed.
10566 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10567 * like inline_method.
10568 * - remove inlining restrictions
10569 * - fix LNEG and enable cfold of INEG
10570 * - generalize x86 optimizations like ldelema as a peephole optimization
10571 * - add store_mem_imm for amd64
10572 * - optimize the loading of the interruption flag in the managed->native wrappers
10573 * - avoid special handling of OP_NOP in passes
10574 * - move code inserting instructions into one function/macro.
10575 * - try a coalescing phase after liveness analysis
10576 * - add float -> vreg conversion + local optimizations on !x86
10577 * - figure out how to handle decomposed branches during optimizations, ie.
10578 * compare+branch, op_jump_table+op_br etc.
10579 * - promote RuntimeXHandles to vregs
10580 * - vtype cleanups:
10581 * - add a NEW_VARLOADA_VREG macro
10582 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10583 * accessing vtype fields.
10584 * - get rid of I8CONST on 64 bit platforms
10585 * - dealing with the increase in code size due to branches created during opcode
10587 * - use extended basic blocks
10588 * - all parts of the JIT
10589 * - handle_global_vregs () && local regalloc
10590 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10591 * - sources of increase in code size:
10594 * - isinst and castclass
10595 * - lvregs not allocated to global registers even if used multiple times
10596 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10598 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10599 * - add all micro optimizations from the old JIT
10600 * - put tree optimizations into the deadce pass
10601 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10602 * specific function.
10603 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10604 * fcompare + branchCC.
10605 * - create a helper function for allocating a stack slot, taking into account
10606 * MONO_CFG_HAS_SPILLUP.
10607 * - merge new GC changes in mini.c.
10609 * - merge the ia64 switch changes.
10610 * - merge the mips conditional changes.
10611 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10612 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10613 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10614 * - optimize mono_regstate2_alloc_int/float.
10615 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10616 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10617 * parts of the tree could be separated by other instructions, killing the tree
10618 * arguments, or stores killing loads etc. Also, should we fold loads into other
10619 * instructions if the result of the load is used multiple times ?
10620 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10621 * - LAST MERGE: 108395.
10622 * - when returning vtypes in registers, generate IR and append it to the end of the
10623 * last bb instead of doing it in the epilog.
10624 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10625 * ones in inssel.h.
10626 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10634 - When to decompose opcodes:
10635 - earlier: this makes some optimizations hard to implement, since the low level IR
10636 no longer contains the neccessary information. But it is easier to do.
10637 - later: harder to implement, enables more optimizations.
10638 - Branches inside bblocks:
10639 - created when decomposing complex opcodes.
10640 - branches to another bblock: harmless, but not tracked by the branch
10641 optimizations, so need to branch to a label at the start of the bblock.
10642 - branches to inside the same bblock: very problematic, trips up the local
10643 reg allocator. Can be fixed by spitting the current bblock, but that is a
10644 complex operation, since some local vregs can become global vregs etc.
10645 - Local/global vregs:
10646 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10647 local register allocator.
10648 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10649 structure, created by mono_create_var (). Assigned to hregs or the stack by
10650 the global register allocator.
10651 - When to do optimizations like alu->alu_imm:
10652 - earlier -> saves work later on since the IR will be smaller/simpler
10653 - later -> can work on more instructions
10654 - Handling of valuetypes:
10655 - When a vtype is pushed on the stack, a new temporary is created, an
10656 instruction computing its address (LDADDR) is emitted and pushed on
10657 the stack. Need to optimize cases when the vtype is used immediately as in
10658 argument passing, stloc etc.
10659 - Instead of the to_end stuff in the old JIT, simply call the function handling
10660 the values on the stack before emitting the last instruction of the bb.
10663 #endif /* DISABLE_JIT */