2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1099 case MONO_TYPE_BOOLEAN:
1102 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_FNPTR:
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1133 g_assert_not_reached ();
1140 array_access_to_klass (int opcode)
1144 return mono_defaults.byte_class;
1146 return mono_defaults.uint16_class;
1149 return mono_defaults.int_class;
1152 return mono_defaults.sbyte_class;
1155 return mono_defaults.int16_class;
1158 return mono_defaults.int32_class;
1160 return mono_defaults.uint32_class;
1163 return mono_defaults.int64_class;
1166 return mono_defaults.single_class;
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1174 g_assert_not_reached ();
1180 * We try to share variables when possible
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1265 bb->out_stack = outb->in_stack;
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1328 /* Find a bblock which has a different in_stack */
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1643 g_assert (val == 0);
1648 if ((size <= 4) && (size <= align)) {
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 #if SIZEOF_REGISTER == 8
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 /* This could be optimized further if neccesary */
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1716 #endif /* DISABLE_JIT */
1719 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1726 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1727 g_assert (size < 10000);
1730 /* This could be optimized further if neccesary */
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1763 cur_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1771 cur_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1795 case MONO_TYPE_BOOLEAN:
1798 case MONO_TYPE_CHAR:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1845 * Returns: non-0 value if arg can't be stored on a target.
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1868 case MONO_TYPE_BOOLEAN:
1871 case MONO_TYPE_CHAR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1895 /* FIXME: check type compatibility */
1899 if (arg->type != STACK_I8)
1904 if (arg->type != STACK_R8)
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1930 if (arg->type != STACK_OBJ)
1932 /* FIXME: check type compatibility */
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1986 case MONO_TYPE_BOOLEAN:
1989 case MONO_TYPE_CHAR:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2012 if (args [i]->type != STACK_I8)
2017 if (args [i]->type != STACK_R8)
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2037 g_error ("unknown type 0x%02x in check_call_signature",
2045 callvirt_to_call (int opcode)
2050 case OP_VOIDCALLVIRT:
2059 g_assert_not_reached ();
2066 callvirt_to_call_membase (int opcode)
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2074 return OP_FCALL_MEMBASE;
2076 return OP_LCALL_MEMBASE;
2078 return OP_VCALL_MEMBASE;
2080 g_assert_not_reached ();
2086 #ifdef MONO_ARCH_HAVE_IMT
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2090 #ifdef MONO_ARCH_IMT_REG
2091 int method_reg = alloc_preg (cfg);
2094 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2095 } else if (cfg->compile_aot) {
2096 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2099 MONO_INST_NEW (cfg, ins, OP_PCONST);
2100 ins->inst_p0 = call->method;
2101 ins->dreg = method_reg;
2102 MONO_ADD_INS (cfg->cbb, ins);
2105 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2107 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2112 static MonoJumpInfo *
2113 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2115 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2119 ji->data.target = target;
2124 inline static MonoCallInst *
2125 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2126 MonoInst **args, int calli, int virtual, int tail)
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2134 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2136 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2139 call->signature = sig;
2141 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2144 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 call->vret_var = cfg->vret_addr;
2146 //g_assert_not_reached ();
2148 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2149 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2152 temp->backend.is_pinvoke = sig->pinvoke;
2155 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2156 * address of return value to increase optimization opportunities.
2157 * Before vtype decomposition, the dreg of the call ins itself represents the
2158 * fact the call modifies the return value. After decomposition, the call will
2159 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2160 * will be transformed into an LDADDR.
2162 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2163 loada->dreg = alloc_preg (cfg);
2164 loada->inst_p0 = temp;
2165 /* We reference the call too since call->dreg could change during optimization */
2166 loada->inst_p1 = call;
2167 MONO_ADD_INS (cfg->cbb, loada);
2169 call->inst.dreg = temp->dreg;
2171 call->vret_var = loada;
2172 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2173 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2175 #ifdef MONO_ARCH_SOFT_FLOAT
2176 if (COMPILE_SOFT_FLOAT (cfg)) {
2178 * If the call has a float argument, we would need to do an r8->r4 conversion using
2179 * an icall, but that cannot be done during the call sequence since it would clobber
2180 * the call registers + the stack. So we do it before emitting the call.
2182 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2184 MonoInst *in = call->args [i];
2186 if (i >= sig->hasthis)
2187 t = sig->params [i - sig->hasthis];
2189 t = &mono_defaults.int_class->byval_arg;
2190 t = mono_type_get_underlying_type (t);
2192 if (!t->byref && t->type == MONO_TYPE_R4) {
2193 MonoInst *iargs [1];
2197 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2199 /* The result will be in an int vreg */
2200 call->args [i] = conv;
2207 if (COMPILE_LLVM (cfg))
2208 mono_llvm_emit_call (cfg, call);
2210 mono_arch_emit_call (cfg, call);
2212 mono_arch_emit_call (cfg, call);
2215 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2216 cfg->flags |= MONO_CFG_HAS_CALLS;
2221 inline static MonoInst*
2222 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2224 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2226 call->inst.sreg1 = addr->dreg;
2228 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2230 return (MonoInst*)call;
2233 inline static MonoInst*
2234 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2236 #ifdef MONO_ARCH_RGCTX_REG
2241 rgctx_reg = mono_alloc_preg (cfg);
2242 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2244 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 call->rgctx_reg = TRUE;
2250 return (MonoInst*)call;
2252 g_assert_not_reached ();
2258 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2260 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2263 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2264 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2266 gboolean might_be_remote;
2267 gboolean virtual = this != NULL;
2268 gboolean enable_for_aot = TRUE;
2272 if (method->string_ctor) {
2273 /* Create the real signature */
2274 /* FIXME: Cache these */
2275 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2276 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2281 might_be_remote = this && sig->hasthis &&
2282 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2283 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2285 context_used = mono_method_check_context_used (method);
2286 if (might_be_remote && context_used) {
2289 g_assert (cfg->generic_sharing_context);
2291 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2293 return mono_emit_calli (cfg, sig, args, addr);
2296 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2298 if (might_be_remote)
2299 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2301 call->method = method;
2302 call->inst.flags |= MONO_INST_HAS_METHOD;
2303 call->inst.inst_left = this;
2306 int vtable_reg, slot_reg, this_reg;
2308 this_reg = this->dreg;
2310 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2311 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2312 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2314 /* Make a call to delegate->invoke_impl */
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2316 call->inst.inst_basereg = this_reg;
2317 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2318 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2320 return (MonoInst*)call;
2324 if ((!cfg->compile_aot || enable_for_aot) &&
2325 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2326 (MONO_METHOD_IS_FINAL (method) &&
2327 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2328 !(method->klass->marshalbyref && context_used)) {
2330 * the method is not virtual, we just need to ensure this is not null
2331 * and then we can call the method directly.
2333 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2335 * The check above ensures method is not gshared, this is needed since
2336 * gshared methods can't have wrappers.
2338 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2341 if (!method->string_ctor)
2342 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2344 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 return (MonoInst*)call;
2351 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2353 * the method is virtual, but we can statically dispatch since either
2354 * it's class or the method itself are sealed.
2355 * But first we need to ensure it's not a null reference.
2357 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2359 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2360 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2362 return (MonoInst*)call;
2365 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 vtable_reg = alloc_preg (cfg);
2368 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2369 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2371 #ifdef MONO_ARCH_HAVE_IMT
2373 guint32 imt_slot = mono_method_get_imt_slot (method);
2374 emit_imt_argument (cfg, call, imt_arg);
2375 slot_reg = vtable_reg;
2376 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2379 if (slot_reg == -1) {
2380 slot_reg = alloc_preg (cfg);
2381 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2382 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2385 slot_reg = vtable_reg;
2386 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2387 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2388 #ifdef MONO_ARCH_HAVE_IMT
2390 g_assert (mono_method_signature (method)->generic_param_count);
2391 emit_imt_argument (cfg, call, imt_arg);
2396 call->inst.sreg1 = slot_reg;
2397 call->virtual = TRUE;
2400 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 return (MonoInst*)call;
2406 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2407 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2409 #ifdef MONO_ARCH_RGCTX_REG
2416 #ifdef MONO_ARCH_RGCTX_REG
2417 rgctx_reg = mono_alloc_preg (cfg);
2418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2423 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2425 call = (MonoCallInst*)ins;
2427 #ifdef MONO_ARCH_RGCTX_REG
2428 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2429 cfg->uses_rgctx_reg = TRUE;
2430 call->rgctx_reg = TRUE;
2440 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2442 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2446 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2453 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2456 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2458 return (MonoInst*)call;
2462 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2464 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2468 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2472 * mono_emit_abs_call:
2474 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2476 inline static MonoInst*
2477 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2478 MonoMethodSignature *sig, MonoInst **args)
2480 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2484 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2487 if (cfg->abs_patches == NULL)
2488 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2489 g_hash_table_insert (cfg->abs_patches, ji, ji);
2490 ins = mono_emit_native_call (cfg, ji, sig, args);
2491 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2496 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2498 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2499 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2503 * Native code might return non register sized integers
2504 * without initializing the upper bits.
2506 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2507 case OP_LOADI1_MEMBASE:
2508 widen_op = OP_ICONV_TO_I1;
2510 case OP_LOADU1_MEMBASE:
2511 widen_op = OP_ICONV_TO_U1;
2513 case OP_LOADI2_MEMBASE:
2514 widen_op = OP_ICONV_TO_I2;
2516 case OP_LOADU2_MEMBASE:
2517 widen_op = OP_ICONV_TO_U2;
2523 if (widen_op != -1) {
2524 int dreg = alloc_preg (cfg);
2527 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2528 widen->type = ins->type;
2538 get_memcpy_method (void)
2540 static MonoMethod *memcpy_method = NULL;
2541 if (!memcpy_method) {
2542 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2544 g_error ("Old corlib found. Install a new one");
2546 return memcpy_method;
2550 * Emit code to copy a valuetype of type @klass whose address is stored in
2551 * @src->dreg to memory whose address is stored at @dest->dreg.
2554 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2556 MonoInst *iargs [3];
2559 MonoMethod *memcpy_method;
2563 * This check breaks with spilled vars... need to handle it during verification anyway.
2564 * g_assert (klass && klass == src->klass && klass == dest->klass);
2568 n = mono_class_native_size (klass, &align);
2570 n = mono_class_value_size (klass, &align);
2572 #if HAVE_WRITE_BARRIERS
2573 /* if native is true there should be no references in the struct */
2574 if (klass->has_references && !native) {
2575 /* Avoid barriers when storing to the stack */
2576 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2577 (dest->opcode == OP_LDADDR))) {
2578 int context_used = 0;
2583 if (cfg->generic_sharing_context)
2584 context_used = mono_class_check_context_used (klass);
2586 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2588 if (cfg->compile_aot) {
2589 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2591 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2592 mono_class_compute_gc_descriptor (klass);
2596 /* FIXME: this does the memcpy as well (or
2597 should), so we don't need the memcpy
2599 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2604 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2605 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2606 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2610 EMIT_NEW_ICONST (cfg, iargs [2], n);
2612 memcpy_method = get_memcpy_method ();
2613 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2618 get_memset_method (void)
2620 static MonoMethod *memset_method = NULL;
2621 if (!memset_method) {
2622 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2624 g_error ("Old corlib found. Install a new one");
2626 return memset_method;
2630 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2632 MonoInst *iargs [3];
2635 MonoMethod *memset_method;
2637 /* FIXME: Optimize this for the case when dest is an LDADDR */
2639 mono_class_init (klass);
2640 n = mono_class_value_size (klass, &align);
2642 if (n <= sizeof (gpointer) * 5) {
2643 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2646 memset_method = get_memset_method ();
2648 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2649 EMIT_NEW_ICONST (cfg, iargs [2], n);
2650 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2655 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2657 MonoInst *this = NULL;
2659 g_assert (cfg->generic_sharing_context);
2661 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2662 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2663 !method->klass->valuetype)
2664 EMIT_NEW_ARGLOAD (cfg, this, 0);
2666 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2667 MonoInst *mrgctx_loc, *mrgctx_var;
2670 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2672 mrgctx_loc = mono_get_vtable_var (cfg);
2673 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2676 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2677 MonoInst *vtable_loc, *vtable_var;
2681 vtable_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2684 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2685 MonoInst *mrgctx_var = vtable_var;
2688 vtable_reg = alloc_preg (cfg);
2689 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2690 vtable_var->type = STACK_PTR;
2696 int vtable_reg, res_reg;
2698 vtable_reg = alloc_preg (cfg);
2699 res_reg = alloc_preg (cfg);
2700 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2705 static MonoJumpInfoRgctxEntry *
2706 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2708 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2709 res->method = method;
2710 res->in_mrgctx = in_mrgctx;
2711 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2712 res->data->type = patch_type;
2713 res->data->data.target = patch_data;
2714 res->info_type = info_type;
2719 static inline MonoInst*
2720 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2722 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2726 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2727 MonoClass *klass, int rgctx_type)
2729 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2730 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2732 return emit_rgctx_fetch (cfg, rgctx, entry);
2736 * emit_get_rgctx_method:
2738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2739 * normal constants, else emit a load from the rgctx.
2742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2743 MonoMethod *cmethod, int rgctx_type)
2745 if (!context_used) {
2748 switch (rgctx_type) {
2749 case MONO_RGCTX_INFO_METHOD:
2750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2752 case MONO_RGCTX_INFO_METHOD_RGCTX:
2753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2756 g_assert_not_reached ();
2759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2762 return emit_rgctx_fetch (cfg, rgctx, entry);
2767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2768 MonoClassField *field, int rgctx_type)
2770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2773 return emit_rgctx_fetch (cfg, rgctx, entry);
2777 * On return the caller must check @klass for load errors.
2780 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2782 MonoInst *vtable_arg;
2784 int context_used = 0;
2786 if (cfg->generic_sharing_context)
2787 context_used = mono_class_check_context_used (klass);
2790 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2791 klass, MONO_RGCTX_INFO_VTABLE);
2793 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2797 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2800 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2801 #ifdef MONO_ARCH_VTABLE_REG
2802 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2803 cfg->uses_vtable_reg = TRUE;
2810 * On return the caller must check @array_class for load errors
2813 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2815 int vtable_reg = alloc_preg (cfg);
2816 int context_used = 0;
2818 if (cfg->generic_sharing_context)
2819 context_used = mono_class_check_context_used (array_class);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 int class_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2826 if (cfg->compile_aot) {
2827 int klass_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2833 } else if (context_used) {
2834 MonoInst *vtable_ins;
2836 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2837 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2839 if (cfg->compile_aot) {
2843 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2845 vt_reg = alloc_preg (cfg);
2846 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2850 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2856 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2860 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2862 if (mini_get_debug_options ()->better_cast_details) {
2863 int to_klass_reg = alloc_preg (cfg);
2864 int vtable_reg = alloc_preg (cfg);
2865 int klass_reg = alloc_preg (cfg);
2866 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2869 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2873 MONO_ADD_INS (cfg->cbb, tls_get);
2874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2878 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2884 reset_cast_details (MonoCompile *cfg)
2886 /* Reset the variables holding the cast details */
2887 if (mini_get_debug_options ()->better_cast_details) {
2888 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2890 MONO_ADD_INS (cfg->cbb, tls_get);
2891 /* It is enough to reset the from field */
2892 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2897 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2898 * generic code is generated.
2901 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2903 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2906 MonoInst *rgctx, *addr;
2908 /* FIXME: What if the class is shared? We might not
2909 have to get the address of the method from the
2911 addr = emit_get_rgctx_method (cfg, context_used, method,
2912 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2914 rgctx = emit_get_rgctx (cfg, method, context_used);
2916 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2918 return mono_emit_method_call (cfg, method, &val, NULL);
2923 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2927 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2928 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2929 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2930 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2932 obj_reg = sp [0]->dreg;
2933 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2934 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2936 /* FIXME: generics */
2937 g_assert (klass->rank == 0);
2940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2947 MonoInst *element_class;
2949 /* This assertion is from the unboxcast insn */
2950 g_assert (klass->rank == 0);
2952 element_class = emit_get_rgctx_klass (cfg, context_used,
2953 klass->element_class, MONO_RGCTX_INFO_KLASS);
2955 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2956 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2958 save_cast_details (cfg, klass->element_class, obj_reg);
2959 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2960 reset_cast_details (cfg);
2963 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2964 MONO_ADD_INS (cfg->cbb, add);
2965 add->type = STACK_MP;
2972 * Returns NULL and set the cfg exception on error.
2975 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2977 MonoInst *iargs [2];
2980 if (cfg->opt & MONO_OPT_SHARED) {
2981 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2982 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2984 alloc_ftn = mono_object_new;
2985 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2986 /* This happens often in argument checking code, eg. throw new FooException... */
2987 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2988 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2989 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2991 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 MonoMethod *managed_alloc = NULL;
2996 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2997 cfg->exception_ptr = klass;
3001 #ifndef MONO_CROSS_COMPILE
3002 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3005 if (managed_alloc) {
3006 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3007 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3009 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3011 guint32 lw = vtable->klass->instance_size;
3012 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3013 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3014 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3017 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3021 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3025 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3028 MonoInst *iargs [2];
3029 MonoMethod *managed_alloc = NULL;
3033 FIXME: we cannot get managed_alloc here because we can't get
3034 the class's vtable (because it's not a closed class)
3036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3037 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3040 if (cfg->opt & MONO_OPT_SHARED) {
3041 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3042 iargs [1] = data_inst;
3043 alloc_ftn = mono_object_new;
3045 if (managed_alloc) {
3046 iargs [0] = data_inst;
3047 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3050 iargs [0] = data_inst;
3051 alloc_ftn = mono_object_new_specific;
3054 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3058 * Returns NULL and set the cfg exception on error.
3061 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3063 MonoInst *alloc, *ins;
3065 if (mono_class_is_nullable (klass)) {
3066 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3067 return mono_emit_method_call (cfg, method, &val, NULL);
3070 alloc = handle_alloc (cfg, klass, TRUE);
3074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3080 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3082 MonoInst *alloc, *ins;
3084 if (mono_class_is_nullable (klass)) {
3085 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3086 /* FIXME: What if the class is shared? We might not
3087 have to get the method address from the RGCTX. */
3088 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3089 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3090 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3092 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3094 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3096 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3102 // FIXME: This doesn't work yet (class libs tests fail?)
3103 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3106 * Returns NULL and set the cfg exception on error.
3109 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3111 MonoBasicBlock *is_null_bb;
3112 int obj_reg = src->dreg;
3113 int vtable_reg = alloc_preg (cfg);
3114 MonoInst *klass_inst = NULL;
3119 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3120 klass, MONO_RGCTX_INFO_KLASS);
3122 if (is_complex_isinst (klass)) {
3123 /* Complex case, handle by an icall */
3129 args [1] = klass_inst;
3131 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3133 /* Simple case, handled by the code below */
3137 NEW_BBLOCK (cfg, is_null_bb);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3142 save_cast_details (cfg, klass, obj_reg);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3148 int klass_reg = alloc_preg (cfg);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3153 /* the remoting code is broken, access the class for now */
3154 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3155 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3157 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3158 cfg->exception_ptr = klass;
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3169 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3173 MONO_START_BB (cfg, is_null_bb);
3175 reset_cast_details (cfg);
3181 * Returns NULL and set the cfg exception on error.
3184 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3187 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3188 int obj_reg = src->dreg;
3189 int vtable_reg = alloc_preg (cfg);
3190 int res_reg = alloc_preg (cfg);
3191 MonoInst *klass_inst = NULL;
3194 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3196 if (is_complex_isinst (klass)) {
3199 /* Complex case, handle by an icall */
3205 args [1] = klass_inst;
3207 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3209 /* Simple case, the code below can handle it */
3213 NEW_BBLOCK (cfg, is_null_bb);
3214 NEW_BBLOCK (cfg, false_bb);
3215 NEW_BBLOCK (cfg, end_bb);
3217 /* Do the assignment at the beginning, so the other assignment can be if converted */
3218 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3219 ins->type = STACK_OBJ;
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3228 g_assert (!context_used);
3229 /* the is_null_bb target simply copies the input register to the output */
3230 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3232 int klass_reg = alloc_preg (cfg);
3235 int rank_reg = alloc_preg (cfg);
3236 int eclass_reg = alloc_preg (cfg);
3238 g_assert (!context_used);
3239 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3241 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3244 if (klass->cast_class == mono_defaults.object_class) {
3245 int parent_reg = alloc_preg (cfg);
3246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3247 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3248 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3250 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3251 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3252 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3254 } else if (klass->cast_class == mono_defaults.enum_class) {
3255 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3257 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3258 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3260 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3261 /* Check that the object is a vector too */
3262 int bounds_reg = alloc_preg (cfg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3268 /* the is_null_bb target simply copies the input register to the output */
3269 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3271 } else if (mono_class_is_nullable (klass)) {
3272 g_assert (!context_used);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3277 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3278 g_assert (!context_used);
3279 /* the remoting code is broken, access the class for now */
3280 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3293 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 /* the is_null_bb target simply copies the input register to the output */
3297 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3302 MONO_START_BB (cfg, false_bb);
3304 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3305 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3307 MONO_START_BB (cfg, is_null_bb);
3309 MONO_START_BB (cfg, end_bb);
3315 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3317 /* This opcode takes as input an object reference and a class, and returns:
3318 0) if the object is an instance of the class,
3319 1) if the object is not instance of the class,
3320 2) if the object is a proxy whose type cannot be determined */
3323 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3324 int obj_reg = src->dreg;
3325 int dreg = alloc_ireg (cfg);
3327 int klass_reg = alloc_preg (cfg);
3329 NEW_BBLOCK (cfg, true_bb);
3330 NEW_BBLOCK (cfg, false_bb);
3331 NEW_BBLOCK (cfg, false2_bb);
3332 NEW_BBLOCK (cfg, end_bb);
3333 NEW_BBLOCK (cfg, no_proxy_bb);
3335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3338 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3339 NEW_BBLOCK (cfg, interface_fail_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3343 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3344 MONO_START_BB (cfg, interface_fail_bb);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3347 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3349 tmp_reg = alloc_preg (cfg);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3354 tmp_reg = alloc_preg (cfg);
3355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3358 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3359 tmp_reg = alloc_preg (cfg);
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3368 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3371 MONO_START_BB (cfg, no_proxy_bb);
3373 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3376 MONO_START_BB (cfg, false_bb);
3378 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3381 MONO_START_BB (cfg, false2_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, true_bb);
3388 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3390 MONO_START_BB (cfg, end_bb);
3393 MONO_INST_NEW (cfg, ins, OP_ICONST);
3395 ins->type = STACK_I4;
3401 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3403 /* This opcode takes as input an object reference and a class, and returns:
3404 0) if the object is an instance of the class,
3405 1) if the object is a proxy whose type cannot be determined
3406 an InvalidCastException exception is thrown otherwhise*/
3409 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3410 int obj_reg = src->dreg;
3411 int dreg = alloc_ireg (cfg);
3412 int tmp_reg = alloc_preg (cfg);
3413 int klass_reg = alloc_preg (cfg);
3415 NEW_BBLOCK (cfg, end_bb);
3416 NEW_BBLOCK (cfg, ok_result_bb);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3421 save_cast_details (cfg, klass, obj_reg);
3423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3424 NEW_BBLOCK (cfg, interface_fail_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3428 MONO_START_BB (cfg, interface_fail_bb);
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3431 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3433 tmp_reg = alloc_preg (cfg);
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3438 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3442 NEW_BBLOCK (cfg, no_proxy_bb);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3448 tmp_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3457 NEW_BBLOCK (cfg, fail_1_bb);
3459 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3461 MONO_START_BB (cfg, fail_1_bb);
3463 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3466 MONO_START_BB (cfg, no_proxy_bb);
3468 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3471 MONO_START_BB (cfg, ok_result_bb);
3473 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3475 MONO_START_BB (cfg, end_bb);
3478 MONO_INST_NEW (cfg, ins, OP_ICONST);
3480 ins->type = STACK_I4;
3486 * Returns NULL and set the cfg exception on error.
3488 static G_GNUC_UNUSED MonoInst*
3489 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3491 gpointer *trampoline;
3492 MonoInst *obj, *method_ins, *tramp_ins;
3496 obj = handle_alloc (cfg, klass, FALSE);
3500 /* Inline the contents of mono_delegate_ctor */
3502 /* Set target field */
3503 /* Optimize away setting of NULL target */
3504 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3507 /* Set method field */
3508 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3512 * To avoid looking up the compiled code belonging to the target method
3513 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3514 * store it, and we fill it after the method has been compiled.
3516 if (!cfg->compile_aot && !method->dynamic) {
3517 MonoInst *code_slot_ins;
3520 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3522 domain = mono_domain_get ();
3523 mono_domain_lock (domain);
3524 if (!domain_jit_info (domain)->method_code_hash)
3525 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3526 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3528 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3529 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3531 mono_domain_unlock (domain);
3533 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3538 /* Set invoke_impl field */
3539 if (cfg->compile_aot) {
3540 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3542 trampoline = mono_create_delegate_trampoline (klass);
3543 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3545 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3547 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3553 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3555 MonoJitICallInfo *info;
3557 /* Need to register the icall so it gets an icall wrapper */
3558 info = mono_get_array_new_va_icall (rank);
3560 cfg->flags |= MONO_CFG_HAS_VARARGS;
3562 /* mono_array_new_va () needs a vararg calling convention */
3563 cfg->disable_llvm = TRUE;
3565 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3566 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3570 mono_emit_load_got_addr (MonoCompile *cfg)
3572 MonoInst *getaddr, *dummy_use;
3574 if (!cfg->got_var || cfg->got_var_allocated)
3577 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3578 getaddr->dreg = cfg->got_var->dreg;
3580 /* Add it to the start of the first bblock */
3581 if (cfg->bb_entry->code) {
3582 getaddr->next = cfg->bb_entry->code;
3583 cfg->bb_entry->code = getaddr;
3586 MONO_ADD_INS (cfg->bb_entry, getaddr);
3588 cfg->got_var_allocated = TRUE;
3591 * Add a dummy use to keep the got_var alive, since real uses might
3592 * only be generated by the back ends.
3593 * Add it to end_bblock, so the variable's lifetime covers the whole
3595 * It would be better to make the usage of the got var explicit in all
3596 * cases when the backend needs it (i.e. calls, throw etc.), so this
3597 * wouldn't be needed.
3599 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3600 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3603 static int inline_limit;
3604 static gboolean inline_limit_inited;
3607 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3609 MonoMethodHeaderSummary header;
3611 #ifdef MONO_ARCH_SOFT_FLOAT
3612 MonoMethodSignature *sig = mono_method_signature (method);
3616 if (cfg->generic_sharing_context)
3619 if (cfg->inline_depth > 10)
3622 #ifdef MONO_ARCH_HAVE_LMF_OPS
3623 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3624 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3625 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3630 if (!mono_method_get_header_summary (method, &header))
3633 /*runtime, icall and pinvoke are checked by summary call*/
3634 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3635 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3636 (method->klass->marshalbyref) ||
3640 /* also consider num_locals? */
3641 /* Do the size check early to avoid creating vtables */
3642 if (!inline_limit_inited) {
3643 if (getenv ("MONO_INLINELIMIT"))
3644 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3646 inline_limit = INLINE_LENGTH_LIMIT;
3647 inline_limit_inited = TRUE;
3649 if (header.code_size >= inline_limit)
3653 * if we can initialize the class of the method right away, we do,
3654 * otherwise we don't allow inlining if the class needs initialization,
3655 * since it would mean inserting a call to mono_runtime_class_init()
3656 * inside the inlined code
3658 if (!(cfg->opt & MONO_OPT_SHARED)) {
3659 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3660 if (cfg->run_cctors && method->klass->has_cctor) {
3661 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3662 if (!method->klass->runtime_info)
3663 /* No vtable created yet */
3665 vtable = mono_class_vtable (cfg->domain, method->klass);
3668 /* This makes so that inline cannot trigger */
3669 /* .cctors: too many apps depend on them */
3670 /* running with a specific order... */
3671 if (! vtable->initialized)
3673 mono_runtime_class_init (vtable);
3675 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3676 if (!method->klass->runtime_info)
3677 /* No vtable created yet */
3679 vtable = mono_class_vtable (cfg->domain, method->klass);
3682 if (!vtable->initialized)
3687 * If we're compiling for shared code
3688 * the cctor will need to be run at aot method load time, for example,
3689 * or at the end of the compilation of the inlining method.
3691 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3696 * CAS - do not inline methods with declarative security
3697 * Note: this has to be before any possible return TRUE;
3699 if (mono_method_has_declsec (method))
3702 #ifdef MONO_ARCH_SOFT_FLOAT
3704 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3706 for (i = 0; i < sig->param_count; ++i)
3707 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3715 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3717 if (vtable->initialized && !cfg->compile_aot)
3720 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3723 if (!mono_class_needs_cctor_run (vtable->klass, method))
3726 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3727 /* The initialization is already done before the method is called */
3734 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3738 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3740 mono_class_init (klass);
3741 size = mono_class_array_element_size (klass);
3743 mult_reg = alloc_preg (cfg);
3744 array_reg = arr->dreg;
3745 index_reg = index->dreg;
3747 #if SIZEOF_REGISTER == 8
3748 /* The array reg is 64 bits but the index reg is only 32 */
3749 if (COMPILE_LLVM (cfg)) {
3751 index2_reg = index_reg;
3753 index2_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3757 if (index->type == STACK_I8) {
3758 index2_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3761 index2_reg = index_reg;
3766 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3768 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3769 if (size == 1 || size == 2 || size == 4 || size == 8) {
3770 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3772 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3773 ins->type = STACK_PTR;
3779 add_reg = alloc_preg (cfg);
3781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3782 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3783 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3784 ins->type = STACK_PTR;
3785 MONO_ADD_INS (cfg->cbb, ins);
3790 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3792 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3794 int bounds_reg = alloc_preg (cfg);
3795 int add_reg = alloc_preg (cfg);
3796 int mult_reg = alloc_preg (cfg);
3797 int mult2_reg = alloc_preg (cfg);
3798 int low1_reg = alloc_preg (cfg);
3799 int low2_reg = alloc_preg (cfg);
3800 int high1_reg = alloc_preg (cfg);
3801 int high2_reg = alloc_preg (cfg);
3802 int realidx1_reg = alloc_preg (cfg);
3803 int realidx2_reg = alloc_preg (cfg);
3804 int sum_reg = alloc_preg (cfg);
3809 mono_class_init (klass);
3810 size = mono_class_array_element_size (klass);
3812 index1 = index_ins1->dreg;
3813 index2 = index_ins2->dreg;
3815 /* range checking */
3816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3817 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3820 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3821 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3823 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3824 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3825 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3828 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3829 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3831 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3832 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3833 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3836 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3838 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3839 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3841 ins->type = STACK_MP;
3843 MONO_ADD_INS (cfg->cbb, ins);
3850 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3854 MonoMethod *addr_method;
3857 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3860 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3862 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3863 /* emit_ldelema_2 depends on OP_LMUL */
3864 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3865 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3869 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3870 addr_method = mono_marshal_get_array_address (rank, element_size);
3871 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3876 static MonoBreakPolicy
3877 always_insert_breakpoint (MonoMethod *method)
3879 return MONO_BREAK_POLICY_ALWAYS;
3882 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3885 * mono_set_break_policy:
3886 * policy_callback: the new callback function
3888 * Allow embedders to decide wherther to actually obey breakpoint instructions
3889 * (both break IL instructions and Debugger.Break () method calls), for example
3890 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3891 * untrusted or semi-trusted code.
3893 * @policy_callback will be called every time a break point instruction needs to
3894 * be inserted with the method argument being the method that calls Debugger.Break()
3895 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3896 * if it wants the breakpoint to not be effective in the given method.
3897 * #MONO_BREAK_POLICY_ALWAYS is the default.
3900 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3902 if (policy_callback)
3903 break_policy_func = policy_callback;
3905 break_policy_func = always_insert_breakpoint;
3909 should_insert_brekpoint (MonoMethod *method) {
3910 switch (break_policy_func (method)) {
3911 case MONO_BREAK_POLICY_ALWAYS:
3913 case MONO_BREAK_POLICY_NEVER:
3915 case MONO_BREAK_POLICY_ON_DBG:
3916 return mono_debug_using_mono_debugger ();
3918 g_warning ("Incorrect value returned from break policy callback");
3923 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
3925 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
3927 MonoInst *addr, *store, *load;
3928 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
3930 /* the bounds check is already done by the callers */
3931 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
3933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
3934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
3936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3937 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3943 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3945 MonoInst *ins = NULL;
3947 static MonoClass *runtime_helpers_class = NULL;
3948 if (! runtime_helpers_class)
3949 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3950 "System.Runtime.CompilerServices", "RuntimeHelpers");
3952 if (cmethod->klass == mono_defaults.string_class) {
3953 if (strcmp (cmethod->name, "get_Chars") == 0) {
3954 int dreg = alloc_ireg (cfg);
3955 int index_reg = alloc_preg (cfg);
3956 int mult_reg = alloc_preg (cfg);
3957 int add_reg = alloc_preg (cfg);
3959 #if SIZEOF_REGISTER == 8
3960 /* The array reg is 64 bits but the index reg is only 32 */
3961 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3963 index_reg = args [1]->dreg;
3965 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3967 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3968 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3969 add_reg = ins->dreg;
3970 /* Avoid a warning */
3972 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3976 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3977 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3978 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3980 type_from_op (ins, NULL, NULL);
3982 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3983 int dreg = alloc_ireg (cfg);
3984 /* Decompose later to allow more optimizations */
3985 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3986 ins->type = STACK_I4;
3987 cfg->cbb->has_array_access = TRUE;
3988 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3991 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3992 int mult_reg = alloc_preg (cfg);
3993 int add_reg = alloc_preg (cfg);
3995 /* The corlib functions check for oob already. */
3996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3997 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3998 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3999 return cfg->cbb->last_ins;
4002 } else if (cmethod->klass == mono_defaults.object_class) {
4004 if (strcmp (cmethod->name, "GetType") == 0) {
4005 int dreg = alloc_preg (cfg);
4006 int vt_reg = alloc_preg (cfg);
4007 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4009 type_from_op (ins, NULL, NULL);
4012 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4013 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4014 int dreg = alloc_ireg (cfg);
4015 int t1 = alloc_ireg (cfg);
4017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4018 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4019 ins->type = STACK_I4;
4023 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4024 MONO_INST_NEW (cfg, ins, OP_NOP);
4025 MONO_ADD_INS (cfg->cbb, ins);
4029 } else if (cmethod->klass == mono_defaults.array_class) {
4030 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4031 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4032 if (cmethod->name [0] != 'g')
4035 if (strcmp (cmethod->name, "get_Rank") == 0) {
4036 int dreg = alloc_ireg (cfg);
4037 int vtable_reg = alloc_preg (cfg);
4038 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4039 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4040 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4041 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4042 type_from_op (ins, NULL, NULL);
4045 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4046 int dreg = alloc_ireg (cfg);
4048 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4049 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4050 type_from_op (ins, NULL, NULL);
4055 } else if (cmethod->klass == runtime_helpers_class) {
4057 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4058 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4062 } else if (cmethod->klass == mono_defaults.thread_class) {
4063 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4064 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4065 MONO_ADD_INS (cfg->cbb, ins);
4067 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4068 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4069 MONO_ADD_INS (cfg->cbb, ins);
4072 } else if (cmethod->klass == mono_defaults.monitor_class) {
4073 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4074 if (strcmp (cmethod->name, "Enter") == 0) {
4077 if (COMPILE_LLVM (cfg)) {
4079 * Pass the argument normally, the LLVM backend will handle the
4080 * calling convention problems.
4082 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4084 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4085 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4086 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4087 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4090 return (MonoInst*)call;
4091 } else if (strcmp (cmethod->name, "Exit") == 0) {
4094 if (COMPILE_LLVM (cfg)) {
4095 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4097 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4098 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4099 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4100 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4103 return (MonoInst*)call;
4105 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4106 MonoMethod *fast_method = NULL;
4108 /* Avoid infinite recursion */
4109 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4110 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4111 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4114 if (strcmp (cmethod->name, "Enter") == 0 ||
4115 strcmp (cmethod->name, "Exit") == 0)
4116 fast_method = mono_monitor_get_fast_path (cmethod);
4120 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4122 } else if (cmethod->klass->image == mono_defaults.corlib &&
4123 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4124 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4127 #if SIZEOF_REGISTER == 8
4128 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4129 /* 64 bit reads are already atomic */
4130 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4131 ins->dreg = mono_alloc_preg (cfg);
4132 ins->inst_basereg = args [0]->dreg;
4133 ins->inst_offset = 0;
4134 MONO_ADD_INS (cfg->cbb, ins);
4138 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4139 if (strcmp (cmethod->name, "Increment") == 0) {
4140 MonoInst *ins_iconst;
4143 if (fsig->params [0]->type == MONO_TYPE_I4)
4144 opcode = OP_ATOMIC_ADD_NEW_I4;
4145 #if SIZEOF_REGISTER == 8
4146 else if (fsig->params [0]->type == MONO_TYPE_I8)
4147 opcode = OP_ATOMIC_ADD_NEW_I8;
4150 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4151 ins_iconst->inst_c0 = 1;
4152 ins_iconst->dreg = mono_alloc_ireg (cfg);
4153 MONO_ADD_INS (cfg->cbb, ins_iconst);
4155 MONO_INST_NEW (cfg, ins, opcode);
4156 ins->dreg = mono_alloc_ireg (cfg);
4157 ins->inst_basereg = args [0]->dreg;
4158 ins->inst_offset = 0;
4159 ins->sreg2 = ins_iconst->dreg;
4160 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4161 MONO_ADD_INS (cfg->cbb, ins);
4163 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4164 MonoInst *ins_iconst;
4167 if (fsig->params [0]->type == MONO_TYPE_I4)
4168 opcode = OP_ATOMIC_ADD_NEW_I4;
4169 #if SIZEOF_REGISTER == 8
4170 else if (fsig->params [0]->type == MONO_TYPE_I8)
4171 opcode = OP_ATOMIC_ADD_NEW_I8;
4174 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4175 ins_iconst->inst_c0 = -1;
4176 ins_iconst->dreg = mono_alloc_ireg (cfg);
4177 MONO_ADD_INS (cfg->cbb, ins_iconst);
4179 MONO_INST_NEW (cfg, ins, opcode);
4180 ins->dreg = mono_alloc_ireg (cfg);
4181 ins->inst_basereg = args [0]->dreg;
4182 ins->inst_offset = 0;
4183 ins->sreg2 = ins_iconst->dreg;
4184 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4185 MONO_ADD_INS (cfg->cbb, ins);
4187 } else if (strcmp (cmethod->name, "Add") == 0) {
4190 if (fsig->params [0]->type == MONO_TYPE_I4)
4191 opcode = OP_ATOMIC_ADD_NEW_I4;
4192 #if SIZEOF_REGISTER == 8
4193 else if (fsig->params [0]->type == MONO_TYPE_I8)
4194 opcode = OP_ATOMIC_ADD_NEW_I8;
4198 MONO_INST_NEW (cfg, ins, opcode);
4199 ins->dreg = mono_alloc_ireg (cfg);
4200 ins->inst_basereg = args [0]->dreg;
4201 ins->inst_offset = 0;
4202 ins->sreg2 = args [1]->dreg;
4203 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4204 MONO_ADD_INS (cfg->cbb, ins);
4207 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4209 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4210 if (strcmp (cmethod->name, "Exchange") == 0) {
4212 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4214 if (fsig->params [0]->type == MONO_TYPE_I4)
4215 opcode = OP_ATOMIC_EXCHANGE_I4;
4216 #if SIZEOF_REGISTER == 8
4217 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4218 (fsig->params [0]->type == MONO_TYPE_I))
4219 opcode = OP_ATOMIC_EXCHANGE_I8;
4221 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4222 opcode = OP_ATOMIC_EXCHANGE_I4;
4227 MONO_INST_NEW (cfg, ins, opcode);
4228 ins->dreg = mono_alloc_ireg (cfg);
4229 ins->inst_basereg = args [0]->dreg;
4230 ins->inst_offset = 0;
4231 ins->sreg2 = args [1]->dreg;
4232 MONO_ADD_INS (cfg->cbb, ins);
4234 switch (fsig->params [0]->type) {
4236 ins->type = STACK_I4;
4240 ins->type = STACK_I8;
4242 case MONO_TYPE_OBJECT:
4243 ins->type = STACK_OBJ;
4246 g_assert_not_reached ();
4249 #if HAVE_WRITE_BARRIERS
4251 MonoInst *dummy_use;
4252 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4253 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4254 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4258 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4260 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4261 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4263 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4264 if (fsig->params [1]->type == MONO_TYPE_I4)
4266 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4267 size = sizeof (gpointer);
4268 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4271 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4272 ins->dreg = alloc_ireg (cfg);
4273 ins->sreg1 = args [0]->dreg;
4274 ins->sreg2 = args [1]->dreg;
4275 ins->sreg3 = args [2]->dreg;
4276 ins->type = STACK_I4;
4277 MONO_ADD_INS (cfg->cbb, ins);
4278 } else if (size == 8) {
4279 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4280 ins->dreg = alloc_ireg (cfg);
4281 ins->sreg1 = args [0]->dreg;
4282 ins->sreg2 = args [1]->dreg;
4283 ins->sreg3 = args [2]->dreg;
4284 ins->type = STACK_I8;
4285 MONO_ADD_INS (cfg->cbb, ins);
4287 /* g_assert_not_reached (); */
4289 #if HAVE_WRITE_BARRIERS
4291 MonoInst *dummy_use;
4292 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4293 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4294 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4298 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4302 } else if (cmethod->klass->image == mono_defaults.corlib) {
4303 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4304 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4305 if (should_insert_brekpoint (cfg->method))
4306 MONO_INST_NEW (cfg, ins, OP_BREAK);
4308 MONO_INST_NEW (cfg, ins, OP_NOP);
4309 MONO_ADD_INS (cfg->cbb, ins);
4312 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4313 && strcmp (cmethod->klass->name, "Environment") == 0) {
4315 EMIT_NEW_ICONST (cfg, ins, 1);
4317 EMIT_NEW_ICONST (cfg, ins, 0);
4321 } else if (cmethod->klass == mono_defaults.math_class) {
4323 * There is general branches code for Min/Max, but it does not work for
4325 * http://everything2.com/?node_id=1051618
4329 #ifdef MONO_ARCH_SIMD_INTRINSICS
4330 if (cfg->opt & MONO_OPT_SIMD) {
4331 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4337 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4341 * This entry point could be used later for arbitrary method
4344 inline static MonoInst*
4345 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4346 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4348 if (method->klass == mono_defaults.string_class) {
4349 /* managed string allocation support */
4350 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4351 MonoInst *iargs [2];
4352 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4353 MonoMethod *managed_alloc = NULL;
4355 g_assert (vtable); /*Should not fail since it System.String*/
4356 #ifndef MONO_CROSS_COMPILE
4357 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4361 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4362 iargs [1] = args [0];
4363 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4370 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4372 MonoInst *store, *temp;
4375 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4376 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4379 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4380 * would be different than the MonoInst's used to represent arguments, and
4381 * the ldelema implementation can't deal with that.
4382 * Solution: When ldelema is used on an inline argument, create a var for
4383 * it, emit ldelema on that var, and emit the saving code below in
4384 * inline_method () if needed.
4386 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4387 cfg->args [i] = temp;
4388 /* This uses cfg->args [i] which is set by the preceeding line */
4389 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4390 store->cil_code = sp [0]->cil_code;
4395 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4396 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4398 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4400 check_inline_called_method_name_limit (MonoMethod *called_method)
4403 static char *limit = NULL;
4405 if (limit == NULL) {
4406 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4408 if (limit_string != NULL)
4409 limit = limit_string;
4411 limit = (char *) "";
4414 if (limit [0] != '\0') {
4415 char *called_method_name = mono_method_full_name (called_method, TRUE);
4417 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4418 g_free (called_method_name);
4420 //return (strncmp_result <= 0);
4421 return (strncmp_result == 0);
4428 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4430 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4433 static char *limit = NULL;
4435 if (limit == NULL) {
4436 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4437 if (limit_string != NULL) {
4438 limit = limit_string;
4440 limit = (char *) "";
4444 if (limit [0] != '\0') {
4445 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4447 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4448 g_free (caller_method_name);
4450 //return (strncmp_result <= 0);
4451 return (strncmp_result == 0);
4459 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4460 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4462 MonoInst *ins, *rvar = NULL;
4463 MonoMethodHeader *cheader;
4464 MonoBasicBlock *ebblock, *sbblock;
4466 MonoMethod *prev_inlined_method;
4467 MonoInst **prev_locals, **prev_args;
4468 MonoType **prev_arg_types;
4469 guint prev_real_offset;
4470 GHashTable *prev_cbb_hash;
4471 MonoBasicBlock **prev_cil_offset_to_bb;
4472 MonoBasicBlock *prev_cbb;
4473 unsigned char* prev_cil_start;
4474 guint32 prev_cil_offset_to_bb_len;
4475 MonoMethod *prev_current_method;
4476 MonoGenericContext *prev_generic_context;
4477 gboolean ret_var_set, prev_ret_var_set;
4479 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4481 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4482 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4485 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4486 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4490 if (cfg->verbose_level > 2)
4491 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4493 if (!cmethod->inline_info) {
4494 mono_jit_stats.inlineable_methods++;
4495 cmethod->inline_info = 1;
4498 /* allocate local variables */
4499 cheader = mono_method_get_header (cmethod);
4501 if (cheader == NULL || mono_loader_get_last_error ()) {
4503 mono_metadata_free_mh (cheader);
4504 mono_loader_clear_error ();
4508 /* allocate space to store the return value */
4509 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4510 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4514 prev_locals = cfg->locals;
4515 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4516 for (i = 0; i < cheader->num_locals; ++i)
4517 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4519 /* allocate start and end blocks */
4520 /* This is needed so if the inline is aborted, we can clean up */
4521 NEW_BBLOCK (cfg, sbblock);
4522 sbblock->real_offset = real_offset;
4524 NEW_BBLOCK (cfg, ebblock);
4525 ebblock->block_num = cfg->num_bblocks++;
4526 ebblock->real_offset = real_offset;
4528 prev_args = cfg->args;
4529 prev_arg_types = cfg->arg_types;
4530 prev_inlined_method = cfg->inlined_method;
4531 cfg->inlined_method = cmethod;
4532 cfg->ret_var_set = FALSE;
4533 cfg->inline_depth ++;
4534 prev_real_offset = cfg->real_offset;
4535 prev_cbb_hash = cfg->cbb_hash;
4536 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4537 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4538 prev_cil_start = cfg->cil_start;
4539 prev_cbb = cfg->cbb;
4540 prev_current_method = cfg->current_method;
4541 prev_generic_context = cfg->generic_context;
4542 prev_ret_var_set = cfg->ret_var_set;
4544 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4546 ret_var_set = cfg->ret_var_set;
4548 cfg->inlined_method = prev_inlined_method;
4549 cfg->real_offset = prev_real_offset;
4550 cfg->cbb_hash = prev_cbb_hash;
4551 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4552 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4553 cfg->cil_start = prev_cil_start;
4554 cfg->locals = prev_locals;
4555 cfg->args = prev_args;
4556 cfg->arg_types = prev_arg_types;
4557 cfg->current_method = prev_current_method;
4558 cfg->generic_context = prev_generic_context;
4559 cfg->ret_var_set = prev_ret_var_set;
4560 cfg->inline_depth --;
4562 if ((costs >= 0 && costs < 60) || inline_allways) {
4563 if (cfg->verbose_level > 2)
4564 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4566 mono_jit_stats.inlined_methods++;
4568 /* always add some code to avoid block split failures */
4569 MONO_INST_NEW (cfg, ins, OP_NOP);
4570 MONO_ADD_INS (prev_cbb, ins);
4572 prev_cbb->next_bb = sbblock;
4573 link_bblock (cfg, prev_cbb, sbblock);
4576 * Get rid of the begin and end bblocks if possible to aid local
4579 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4581 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4582 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4584 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4585 MonoBasicBlock *prev = ebblock->in_bb [0];
4586 mono_merge_basic_blocks (cfg, prev, ebblock);
4588 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4589 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4590 cfg->cbb = prev_cbb;
4598 * If the inlined method contains only a throw, then the ret var is not
4599 * set, so set it to a dummy value.
4602 static double r8_0 = 0.0;
4604 switch (rvar->type) {
4606 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4609 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4614 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4617 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4618 ins->type = STACK_R8;
4619 ins->inst_p0 = (void*)&r8_0;
4620 ins->dreg = rvar->dreg;
4621 MONO_ADD_INS (cfg->cbb, ins);
4624 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4627 g_assert_not_reached ();
4631 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4634 mono_metadata_free_mh (cheader);
4637 if (cfg->verbose_level > 2)
4638 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4639 cfg->exception_type = MONO_EXCEPTION_NONE;
4640 mono_loader_clear_error ();
4642 /* This gets rid of the newly added bblocks */
4643 cfg->cbb = prev_cbb;
4645 mono_metadata_free_mh (cheader);
4650 * Some of these comments may well be out-of-date.
4651 * Design decisions: we do a single pass over the IL code (and we do bblock
4652 * splitting/merging in the few cases when it's required: a back jump to an IL
4653 * address that was not already seen as bblock starting point).
4654 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4655 * Complex operations are decomposed in simpler ones right away. We need to let the
4656 * arch-specific code peek and poke inside this process somehow (except when the
4657 * optimizations can take advantage of the full semantic info of coarse opcodes).
4658 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4659 * MonoInst->opcode initially is the IL opcode or some simplification of that
4660 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4661 * opcode with value bigger than OP_LAST.
4662 * At this point the IR can be handed over to an interpreter, a dumb code generator
4663 * or to the optimizing code generator that will translate it to SSA form.
4665 * Profiling directed optimizations.
4666 * We may compile by default with few or no optimizations and instrument the code
4667 * or the user may indicate what methods to optimize the most either in a config file
4668 * or through repeated runs where the compiler applies offline the optimizations to
4669 * each method and then decides if it was worth it.
4672 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4673 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4674 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4675 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4676 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4677 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4678 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4679 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4681 /* offset from br.s -> br like opcodes */
4682 #define BIG_BRANCH_OFFSET 13
4685 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4687 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4689 return b == NULL || b == bb;
4693 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4695 unsigned char *ip = start;
4696 unsigned char *target;
4699 MonoBasicBlock *bblock;
4700 const MonoOpcode *opcode;
4703 cli_addr = ip - start;
4704 i = mono_opcode_value ((const guint8 **)&ip, end);
4707 opcode = &mono_opcodes [i];
4708 switch (opcode->argument) {
4709 case MonoInlineNone:
4712 case MonoInlineString:
4713 case MonoInlineType:
4714 case MonoInlineField:
4715 case MonoInlineMethod:
4718 case MonoShortInlineR:
4725 case MonoShortInlineVar:
4726 case MonoShortInlineI:
4729 case MonoShortInlineBrTarget:
4730 target = start + cli_addr + 2 + (signed char)ip [1];
4731 GET_BBLOCK (cfg, bblock, target);
4734 GET_BBLOCK (cfg, bblock, ip);
4736 case MonoInlineBrTarget:
4737 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4738 GET_BBLOCK (cfg, bblock, target);
4741 GET_BBLOCK (cfg, bblock, ip);
4743 case MonoInlineSwitch: {
4744 guint32 n = read32 (ip + 1);
4747 cli_addr += 5 + 4 * n;
4748 target = start + cli_addr;
4749 GET_BBLOCK (cfg, bblock, target);
4751 for (j = 0; j < n; ++j) {
4752 target = start + cli_addr + (gint32)read32 (ip);
4753 GET_BBLOCK (cfg, bblock, target);
4763 g_assert_not_reached ();
4766 if (i == CEE_THROW) {
4767 unsigned char *bb_start = ip - 1;
4769 /* Find the start of the bblock containing the throw */
4771 while ((bb_start >= start) && !bblock) {
4772 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4776 bblock->out_of_line = 1;
4785 static inline MonoMethod *
4786 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4790 if (m->wrapper_type != MONO_WRAPPER_NONE)
4791 return mono_method_get_wrapper_data (m, token);
4793 method = mono_get_method_full (m->klass->image, token, klass, context);
4798 static inline MonoMethod *
4799 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4801 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4803 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4809 static inline MonoClass*
4810 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4814 if (method->wrapper_type != MONO_WRAPPER_NONE)
4815 klass = mono_method_get_wrapper_data (method, token);
4817 klass = mono_class_get_full (method->klass->image, token, context);
4819 mono_class_init (klass);
4824 * Returns TRUE if the JIT should abort inlining because "callee"
4825 * is influenced by security attributes.
4828 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4832 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4836 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4837 if (result == MONO_JIT_SECURITY_OK)
4840 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4841 /* Generate code to throw a SecurityException before the actual call/link */
4842 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4845 NEW_ICONST (cfg, args [0], 4);
4846 NEW_METHODCONST (cfg, args [1], caller);
4847 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4848 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4849 /* don't hide previous results */
4850 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4851 cfg->exception_data = result;
4859 throw_exception (void)
4861 static MonoMethod *method = NULL;
4864 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4865 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4872 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4874 MonoMethod *thrower = throw_exception ();
4877 EMIT_NEW_PCONST (cfg, args [0], ex);
4878 mono_emit_method_call (cfg, thrower, args, NULL);
4882 * Return the original method is a wrapper is specified. We can only access
4883 * the custom attributes from the original method.
4886 get_original_method (MonoMethod *method)
4888 if (method->wrapper_type == MONO_WRAPPER_NONE)
4891 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4892 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4895 /* in other cases we need to find the original method */
4896 return mono_marshal_method_from_wrapper (method);
4900 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4901 MonoBasicBlock *bblock, unsigned char *ip)
4903 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4904 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4906 emit_throw_exception (cfg, ex);
4910 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4911 MonoBasicBlock *bblock, unsigned char *ip)
4913 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4914 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4916 emit_throw_exception (cfg, ex);
4920 * Check that the IL instructions at ip are the array initialization
4921 * sequence and return the pointer to the data and the size.
4924 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4927 * newarr[System.Int32]
4929 * ldtoken field valuetype ...
4930 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4932 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4933 guint32 token = read32 (ip + 7);
4934 guint32 field_token = read32 (ip + 2);
4935 guint32 field_index = field_token & 0xffffff;
4937 const char *data_ptr;
4939 MonoMethod *cmethod;
4940 MonoClass *dummy_class;
4941 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4947 *out_field_token = field_token;
4949 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4952 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4954 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4955 case MONO_TYPE_BOOLEAN:
4959 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4960 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4961 case MONO_TYPE_CHAR:
4971 return NULL; /* stupid ARM FP swapped format */
4981 if (size > mono_type_size (field->type, &dummy_align))
4984 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4985 if (!method->klass->image->dynamic) {
4986 field_index = read32 (ip + 2) & 0xffffff;
4987 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4988 data_ptr = mono_image_rva_map (method->klass->image, rva);
4989 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4990 /* for aot code we do the lookup on load */
4991 if (aot && data_ptr)
4992 return GUINT_TO_POINTER (rva);
4994 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4996 data_ptr = mono_field_get_data (field);
5004 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5006 char *method_fname = mono_method_full_name (method, TRUE);
5008 MonoMethodHeader *header = mono_method_get_header (method);
5010 if (header->code_size == 0)
5011 method_code = g_strdup ("method body is empty.");
5013 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5014 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5015 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5016 g_free (method_fname);
5017 g_free (method_code);
5018 mono_metadata_free_mh (header);
5022 set_exception_object (MonoCompile *cfg, MonoException *exception)
5024 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5025 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5026 cfg->exception_ptr = exception;
5030 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5034 if (cfg->generic_sharing_context)
5035 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5037 type = &klass->byval_arg;
5038 return MONO_TYPE_IS_REFERENCE (type);
5042 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5045 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5046 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5047 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5048 /* Optimize reg-reg moves away */
5050 * Can't optimize other opcodes, since sp[0] might point to
5051 * the last ins of a decomposed opcode.
5053 sp [0]->dreg = (cfg)->locals [n]->dreg;
5055 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5060 * ldloca inhibits many optimizations so try to get rid of it in common
5063 static inline unsigned char *
5064 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5073 local = read16 (ip + 2);
5077 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5078 gboolean skip = FALSE;
5080 /* From the INITOBJ case */
5081 token = read32 (ip + 2);
5082 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5083 CHECK_TYPELOAD (klass);
5084 if (generic_class_is_reference_type (cfg, klass)) {
5085 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5086 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5087 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5088 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5089 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5102 is_exception_class (MonoClass *class)
5105 if (class == mono_defaults.exception_class)
5107 class = class->parent;
5113 * mono_method_to_ir:
5115 * Translate the .net IL into linear IR.
5118 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5119 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5120 guint inline_offset, gboolean is_virtual_call)
5123 MonoInst *ins, **sp, **stack_start;
5124 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5125 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5126 MonoMethod *cmethod, *method_definition;
5127 MonoInst **arg_array;
5128 MonoMethodHeader *header;
5130 guint32 token, ins_flag;
5132 MonoClass *constrained_call = NULL;
5133 unsigned char *ip, *end, *target, *err_pos;
5134 static double r8_0 = 0.0;
5135 MonoMethodSignature *sig;
5136 MonoGenericContext *generic_context = NULL;
5137 MonoGenericContainer *generic_container = NULL;
5138 MonoType **param_types;
5139 int i, n, start_new_bblock, dreg;
5140 int num_calls = 0, inline_costs = 0;
5141 int breakpoint_id = 0;
5143 MonoBoolean security, pinvoke;
5144 MonoSecurityManager* secman = NULL;
5145 MonoDeclSecurityActions actions;
5146 GSList *class_inits = NULL;
5147 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5149 gboolean init_locals, seq_points, skip_dead_blocks;
5151 /* serialization and xdomain stuff may need access to private fields and methods */
5152 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5153 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5154 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5155 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5156 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5157 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5159 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5161 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5162 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5163 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5164 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5166 image = method->klass->image;
5167 header = mono_method_get_header (method);
5168 generic_container = mono_method_get_generic_container (method);
5169 sig = mono_method_signature (method);
5170 num_args = sig->hasthis + sig->param_count;
5171 ip = (unsigned char*)header->code;
5172 cfg->cil_start = ip;
5173 end = ip + header->code_size;
5174 mono_jit_stats.cil_code_size += header->code_size;
5175 init_locals = header->init_locals;
5177 seq_points = cfg->gen_seq_points && cfg->method == method;
5180 * Methods without init_locals set could cause asserts in various passes
5185 method_definition = method;
5186 while (method_definition->is_inflated) {
5187 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5188 method_definition = imethod->declaring;
5191 /* SkipVerification is not allowed if core-clr is enabled */
5192 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5194 dont_verify_stloc = TRUE;
5197 if (!dont_verify && mini_method_verify (cfg, method_definition))
5198 goto exception_exit;
5200 if (mono_debug_using_mono_debugger ())
5201 cfg->keep_cil_nops = TRUE;
5203 if (sig->is_inflated)
5204 generic_context = mono_method_get_context (method);
5205 else if (generic_container)
5206 generic_context = &generic_container->context;
5207 cfg->generic_context = generic_context;
5209 if (!cfg->generic_sharing_context)
5210 g_assert (!sig->has_type_parameters);
5212 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5213 g_assert (method->is_inflated);
5214 g_assert (mono_method_get_context (method)->method_inst);
5216 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5217 g_assert (sig->generic_param_count);
5219 if (cfg->method == method) {
5220 cfg->real_offset = 0;
5222 cfg->real_offset = inline_offset;
5225 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5226 cfg->cil_offset_to_bb_len = header->code_size;
5228 cfg->current_method = method;
5230 if (cfg->verbose_level > 2)
5231 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5233 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5235 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5236 for (n = 0; n < sig->param_count; ++n)
5237 param_types [n + sig->hasthis] = sig->params [n];
5238 cfg->arg_types = param_types;
5240 dont_inline = g_list_prepend (dont_inline, method);
5241 if (cfg->method == method) {
5243 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5244 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5247 NEW_BBLOCK (cfg, start_bblock);
5248 cfg->bb_entry = start_bblock;
5249 start_bblock->cil_code = NULL;
5250 start_bblock->cil_length = 0;
5253 NEW_BBLOCK (cfg, end_bblock);
5254 cfg->bb_exit = end_bblock;
5255 end_bblock->cil_code = NULL;
5256 end_bblock->cil_length = 0;
5257 g_assert (cfg->num_bblocks == 2);
5259 arg_array = cfg->args;
5261 if (header->num_clauses) {
5262 cfg->spvars = g_hash_table_new (NULL, NULL);
5263 cfg->exvars = g_hash_table_new (NULL, NULL);
5265 /* handle exception clauses */
5266 for (i = 0; i < header->num_clauses; ++i) {
5267 MonoBasicBlock *try_bb;
5268 MonoExceptionClause *clause = &header->clauses [i];
5269 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5270 try_bb->real_offset = clause->try_offset;
5271 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5272 tblock->real_offset = clause->handler_offset;
5273 tblock->flags |= BB_EXCEPTION_HANDLER;
5275 link_bblock (cfg, try_bb, tblock);
5277 if (*(ip + clause->handler_offset) == CEE_POP)
5278 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5280 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5281 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5282 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5283 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5284 MONO_ADD_INS (tblock, ins);
5286 /* todo: is a fault block unsafe to optimize? */
5287 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5288 tblock->flags |= BB_EXCEPTION_UNSAFE;
5292 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5294 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5296 /* catch and filter blocks get the exception object on the stack */
5297 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5298 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5299 MonoInst *dummy_use;
5301 /* mostly like handle_stack_args (), but just sets the input args */
5302 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5303 tblock->in_scount = 1;
5304 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5305 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5308 * Add a dummy use for the exvar so its liveness info will be
5312 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5314 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5315 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5316 tblock->flags |= BB_EXCEPTION_HANDLER;
5317 tblock->real_offset = clause->data.filter_offset;
5318 tblock->in_scount = 1;
5319 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5320 /* The filter block shares the exvar with the handler block */
5321 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5322 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5323 MONO_ADD_INS (tblock, ins);
5327 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5328 clause->data.catch_class &&
5329 cfg->generic_sharing_context &&
5330 mono_class_check_context_used (clause->data.catch_class)) {
5332 * In shared generic code with catch
5333 * clauses containing type variables
5334 * the exception handling code has to
5335 * be able to get to the rgctx.
5336 * Therefore we have to make sure that
5337 * the vtable/mrgctx argument (for
5338 * static or generic methods) or the
5339 * "this" argument (for non-static
5340 * methods) are live.
5342 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5343 mini_method_get_context (method)->method_inst ||
5344 method->klass->valuetype) {
5345 mono_get_vtable_var (cfg);
5347 MonoInst *dummy_use;
5349 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5354 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5355 cfg->cbb = start_bblock;
5356 cfg->args = arg_array;
5357 mono_save_args (cfg, sig, inline_args);
5360 /* FIRST CODE BLOCK */
5361 NEW_BBLOCK (cfg, bblock);
5362 bblock->cil_code = ip;
5366 ADD_BBLOCK (cfg, bblock);
5368 if (cfg->method == method) {
5369 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5370 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5371 MONO_INST_NEW (cfg, ins, OP_BREAK);
5372 MONO_ADD_INS (bblock, ins);
5376 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5377 secman = mono_security_manager_get_methods ();
5379 security = (secman && mono_method_has_declsec (method));
5380 /* at this point having security doesn't mean we have any code to generate */
5381 if (security && (cfg->method == method)) {
5382 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5383 * And we do not want to enter the next section (with allocation) if we
5384 * have nothing to generate */
5385 security = mono_declsec_get_demands (method, &actions);
5388 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5389 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5391 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5392 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5393 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5395 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5396 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5400 mono_custom_attrs_free (custom);
5403 custom = mono_custom_attrs_from_class (wrapped->klass);
5404 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5408 mono_custom_attrs_free (custom);
5411 /* not a P/Invoke after all */
5416 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5417 /* we use a separate basic block for the initialization code */
5418 NEW_BBLOCK (cfg, init_localsbb);
5419 cfg->bb_init = init_localsbb;
5420 init_localsbb->real_offset = cfg->real_offset;
5421 start_bblock->next_bb = init_localsbb;
5422 init_localsbb->next_bb = bblock;
5423 link_bblock (cfg, start_bblock, init_localsbb);
5424 link_bblock (cfg, init_localsbb, bblock);
5426 cfg->cbb = init_localsbb;
5428 start_bblock->next_bb = bblock;
5429 link_bblock (cfg, start_bblock, bblock);
5432 /* at this point we know, if security is TRUE, that some code needs to be generated */
5433 if (security && (cfg->method == method)) {
5436 mono_jit_stats.cas_demand_generation++;
5438 if (actions.demand.blob) {
5439 /* Add code for SecurityAction.Demand */
5440 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5441 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5442 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5443 mono_emit_method_call (cfg, secman->demand, args, NULL);
5445 if (actions.noncasdemand.blob) {
5446 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5447 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5448 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5449 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5450 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5451 mono_emit_method_call (cfg, secman->demand, args, NULL);
5453 if (actions.demandchoice.blob) {
5454 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5455 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5456 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5457 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5458 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5462 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5464 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5467 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5468 /* check if this is native code, e.g. an icall or a p/invoke */
5469 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5470 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5472 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5473 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5475 /* if this ia a native call then it can only be JITted from platform code */
5476 if ((icall || pinvk) && method->klass && method->klass->image) {
5477 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5478 MonoException *ex = icall ? mono_get_exception_security () :
5479 mono_get_exception_method_access ();
5480 emit_throw_exception (cfg, ex);
5487 if (header->code_size == 0)
5490 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5495 if (cfg->method == method)
5496 mono_debug_init_method (cfg, bblock, breakpoint_id);
5498 for (n = 0; n < header->num_locals; ++n) {
5499 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5504 /* We force the vtable variable here for all shared methods
5505 for the possibility that they might show up in a stack
5506 trace where their exact instantiation is needed. */
5507 if (cfg->generic_sharing_context && method == cfg->method) {
5508 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5509 mini_method_get_context (method)->method_inst ||
5510 method->klass->valuetype) {
5511 mono_get_vtable_var (cfg);
5513 /* FIXME: Is there a better way to do this?
5514 We need the variable live for the duration
5515 of the whole method. */
5516 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5520 /* add a check for this != NULL to inlined methods */
5521 if (is_virtual_call) {
5524 NEW_ARGLOAD (cfg, arg_ins, 0);
5525 MONO_ADD_INS (cfg->cbb, arg_ins);
5526 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5529 skip_dead_blocks = !dont_verify;
5530 if (skip_dead_blocks) {
5531 original_bb = bb = mono_basic_block_split (method, &error);
5532 if (!mono_error_ok (&error)) {
5533 mono_error_cleanup (&error);
5539 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5540 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5543 start_new_bblock = 0;
5546 if (cfg->method == method)
5547 cfg->real_offset = ip - header->code;
5549 cfg->real_offset = inline_offset;
5554 if (start_new_bblock) {
5555 bblock->cil_length = ip - bblock->cil_code;
5556 if (start_new_bblock == 2) {
5557 g_assert (ip == tblock->cil_code);
5559 GET_BBLOCK (cfg, tblock, ip);
5561 bblock->next_bb = tblock;
5564 start_new_bblock = 0;
5565 for (i = 0; i < bblock->in_scount; ++i) {
5566 if (cfg->verbose_level > 3)
5567 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5568 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5572 g_slist_free (class_inits);
5575 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5576 link_bblock (cfg, bblock, tblock);
5577 if (sp != stack_start) {
5578 handle_stack_args (cfg, stack_start, sp - stack_start);
5580 CHECK_UNVERIFIABLE (cfg);
5582 bblock->next_bb = tblock;
5585 for (i = 0; i < bblock->in_scount; ++i) {
5586 if (cfg->verbose_level > 3)
5587 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5588 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5591 g_slist_free (class_inits);
5596 if (skip_dead_blocks) {
5597 int ip_offset = ip - header->code;
5599 if (ip_offset == bb->end)
5603 int op_size = mono_opcode_size (ip, end);
5604 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5606 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5608 if (ip_offset + op_size == bb->end) {
5609 MONO_INST_NEW (cfg, ins, OP_NOP);
5610 MONO_ADD_INS (bblock, ins);
5611 start_new_bblock = 1;
5619 * Sequence points are points where the debugger can place a breakpoint.
5620 * Currently, we generate these automatically at points where the IL
5623 if (seq_points && sp == stack_start) {
5624 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5625 MONO_ADD_INS (cfg->cbb, ins);
5628 bblock->real_offset = cfg->real_offset;
5630 if ((cfg->method == method) && cfg->coverage_info) {
5631 guint32 cil_offset = ip - header->code;
5632 cfg->coverage_info->data [cil_offset].cil_code = ip;
5634 /* TODO: Use an increment here */
5635 #if defined(TARGET_X86)
5636 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5637 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5639 MONO_ADD_INS (cfg->cbb, ins);
5641 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5646 if (cfg->verbose_level > 3)
5647 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5651 if (cfg->keep_cil_nops)
5652 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5654 MONO_INST_NEW (cfg, ins, OP_NOP);
5656 MONO_ADD_INS (bblock, ins);
5659 if (should_insert_brekpoint (cfg->method))
5660 MONO_INST_NEW (cfg, ins, OP_BREAK);
5662 MONO_INST_NEW (cfg, ins, OP_NOP);
5664 MONO_ADD_INS (bblock, ins);
5670 CHECK_STACK_OVF (1);
5671 n = (*ip)-CEE_LDARG_0;
5673 EMIT_NEW_ARGLOAD (cfg, ins, n);
5681 CHECK_STACK_OVF (1);
5682 n = (*ip)-CEE_LDLOC_0;
5684 EMIT_NEW_LOCLOAD (cfg, ins, n);
5693 n = (*ip)-CEE_STLOC_0;
5696 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5698 emit_stloc_ir (cfg, sp, header, n);
5705 CHECK_STACK_OVF (1);
5708 EMIT_NEW_ARGLOAD (cfg, ins, n);
5714 CHECK_STACK_OVF (1);
5717 NEW_ARGLOADA (cfg, ins, n);
5718 MONO_ADD_INS (cfg->cbb, ins);
5728 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5730 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5735 CHECK_STACK_OVF (1);
5738 EMIT_NEW_LOCLOAD (cfg, ins, n);
5742 case CEE_LDLOCA_S: {
5743 unsigned char *tmp_ip;
5745 CHECK_STACK_OVF (1);
5746 CHECK_LOCAL (ip [1]);
5748 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5754 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5763 CHECK_LOCAL (ip [1]);
5764 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5766 emit_stloc_ir (cfg, sp, header, ip [1]);
5771 CHECK_STACK_OVF (1);
5772 EMIT_NEW_PCONST (cfg, ins, NULL);
5773 ins->type = STACK_OBJ;
5778 CHECK_STACK_OVF (1);
5779 EMIT_NEW_ICONST (cfg, ins, -1);
5792 CHECK_STACK_OVF (1);
5793 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5799 CHECK_STACK_OVF (1);
5801 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5807 CHECK_STACK_OVF (1);
5808 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5814 CHECK_STACK_OVF (1);
5815 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5816 ins->type = STACK_I8;
5817 ins->dreg = alloc_dreg (cfg, STACK_I8);
5819 ins->inst_l = (gint64)read64 (ip);
5820 MONO_ADD_INS (bblock, ins);
5826 gboolean use_aotconst = FALSE;
5828 #ifdef TARGET_POWERPC
5829 /* FIXME: Clean this up */
5830 if (cfg->compile_aot)
5831 use_aotconst = TRUE;
5834 /* FIXME: we should really allocate this only late in the compilation process */
5835 f = mono_domain_alloc (cfg->domain, sizeof (float));
5837 CHECK_STACK_OVF (1);
5843 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5845 dreg = alloc_freg (cfg);
5846 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5847 ins->type = STACK_R8;
5849 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5850 ins->type = STACK_R8;
5851 ins->dreg = alloc_dreg (cfg, STACK_R8);
5853 MONO_ADD_INS (bblock, ins);
5863 gboolean use_aotconst = FALSE;
5865 #ifdef TARGET_POWERPC
5866 /* FIXME: Clean this up */
5867 if (cfg->compile_aot)
5868 use_aotconst = TRUE;
5871 /* FIXME: we should really allocate this only late in the compilation process */
5872 d = mono_domain_alloc (cfg->domain, sizeof (double));
5874 CHECK_STACK_OVF (1);
5880 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5882 dreg = alloc_freg (cfg);
5883 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5884 ins->type = STACK_R8;
5886 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5887 ins->type = STACK_R8;
5888 ins->dreg = alloc_dreg (cfg, STACK_R8);
5890 MONO_ADD_INS (bblock, ins);
5899 MonoInst *temp, *store;
5901 CHECK_STACK_OVF (1);
5905 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5906 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5908 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5911 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5924 if (sp [0]->type == STACK_R8)
5925 /* we need to pop the value from the x86 FP stack */
5926 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5935 if (stack_start != sp)
5937 token = read32 (ip + 1);
5938 /* FIXME: check the signature matches */
5939 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5944 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5945 GENERIC_SHARING_FAILURE (CEE_JMP);
5947 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5948 CHECK_CFG_EXCEPTION;
5950 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5952 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5955 /* Handle tail calls similarly to calls */
5956 n = fsig->param_count + fsig->hasthis;
5958 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5959 call->method = cmethod;
5960 call->tail_call = TRUE;
5961 call->signature = mono_method_signature (cmethod);
5962 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5963 call->inst.inst_p0 = cmethod;
5964 for (i = 0; i < n; ++i)
5965 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5967 mono_arch_emit_call (cfg, call);
5968 MONO_ADD_INS (bblock, (MonoInst*)call);
5971 for (i = 0; i < num_args; ++i)
5972 /* Prevent arguments from being optimized away */
5973 arg_array [i]->flags |= MONO_INST_VOLATILE;
5975 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5976 ins = (MonoInst*)call;
5977 ins->inst_p0 = cmethod;
5978 MONO_ADD_INS (bblock, ins);
5982 start_new_bblock = 1;
5987 case CEE_CALLVIRT: {
5988 MonoInst *addr = NULL;
5989 MonoMethodSignature *fsig = NULL;
5991 int virtual = *ip == CEE_CALLVIRT;
5992 int calli = *ip == CEE_CALLI;
5993 gboolean pass_imt_from_rgctx = FALSE;
5994 MonoInst *imt_arg = NULL;
5995 gboolean pass_vtable = FALSE;
5996 gboolean pass_mrgctx = FALSE;
5997 MonoInst *vtable_arg = NULL;
5998 gboolean check_this = FALSE;
5999 gboolean supported_tail_call = FALSE;
6002 token = read32 (ip + 1);
6009 if (method->wrapper_type != MONO_WRAPPER_NONE)
6010 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6012 fsig = mono_metadata_parse_signature (image, token);
6014 n = fsig->param_count + fsig->hasthis;
6016 if (method->dynamic && fsig->pinvoke) {
6020 * This is a call through a function pointer using a pinvoke
6021 * signature. Have to create a wrapper and call that instead.
6022 * FIXME: This is very slow, need to create a wrapper at JIT time
6023 * instead based on the signature.
6025 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6026 EMIT_NEW_PCONST (cfg, args [1], fsig);
6028 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6031 MonoMethod *cil_method;
6033 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6034 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6035 cil_method = cmethod;
6036 } else if (constrained_call) {
6037 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6039 * This is needed since get_method_constrained can't find
6040 * the method in klass representing a type var.
6041 * The type var is guaranteed to be a reference type in this
6044 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6045 cil_method = cmethod;
6046 g_assert (!cmethod->klass->valuetype);
6048 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6051 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6052 cil_method = cmethod;
6057 if (!dont_verify && !cfg->skip_visibility) {
6058 MonoMethod *target_method = cil_method;
6059 if (method->is_inflated) {
6060 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6062 if (!mono_method_can_access_method (method_definition, target_method) &&
6063 !mono_method_can_access_method (method, cil_method))
6064 METHOD_ACCESS_FAILURE;
6067 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6068 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6070 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6071 /* MS.NET seems to silently convert this to a callvirt */
6076 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6077 * converts to a callvirt.
6079 * tests/bug-515884.il is an example of this behavior
6081 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6082 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6083 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6087 if (!cmethod->klass->inited)
6088 if (!mono_class_init (cmethod->klass))
6091 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6092 mini_class_is_system_array (cmethod->klass)) {
6093 array_rank = cmethod->klass->rank;
6094 fsig = mono_method_signature (cmethod);
6096 fsig = mono_method_signature (cmethod);
6101 if (fsig->pinvoke) {
6102 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6103 check_for_pending_exc, FALSE);
6104 fsig = mono_method_signature (wrapper);
6105 } else if (constrained_call) {
6106 fsig = mono_method_signature (cmethod);
6108 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6112 mono_save_token_info (cfg, image, token, cil_method);
6114 n = fsig->param_count + fsig->hasthis;
6116 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6117 if (check_linkdemand (cfg, method, cmethod))
6119 CHECK_CFG_EXCEPTION;
6122 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6123 g_assert_not_reached ();
6126 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6129 if (!cfg->generic_sharing_context && cmethod)
6130 g_assert (!mono_method_check_context_used (cmethod));
6134 //g_assert (!virtual || fsig->hasthis);
6138 if (constrained_call) {
6140 * We have the `constrained.' prefix opcode.
6142 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6144 * The type parameter is instantiated as a valuetype,
6145 * but that type doesn't override the method we're
6146 * calling, so we need to box `this'.
6148 if (cfg->generic_sharing_context && mono_class_check_context_used (constrained_call))
6149 GENERIC_SHARING_FAILURE (CEE_CONSTRAINED_);
6151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6152 ins->klass = constrained_call;
6153 sp [0] = handle_box (cfg, ins, constrained_call);
6154 CHECK_CFG_EXCEPTION;
6155 } else if (!constrained_call->valuetype) {
6156 int dreg = alloc_preg (cfg);
6159 * The type parameter is instantiated as a reference
6160 * type. We have a managed pointer on the stack, so
6161 * we need to dereference it here.
6163 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6164 ins->type = STACK_OBJ;
6166 } else if (cmethod->klass->valuetype)
6168 constrained_call = NULL;
6171 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6175 * If the callee is a shared method, then its static cctor
6176 * might not get called after the call was patched.
6178 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6179 emit_generic_class_init (cfg, cmethod->klass);
6180 CHECK_TYPELOAD (cmethod->klass);
6183 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6184 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6185 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6186 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6187 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6190 * Pass vtable iff target method might
6191 * be shared, which means that sharing
6192 * is enabled for its class and its
6193 * context is sharable (and it's not a
6196 if (sharing_enabled && context_sharable &&
6197 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6201 if (cmethod && mini_method_get_context (cmethod) &&
6202 mini_method_get_context (cmethod)->method_inst) {
6203 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6204 MonoGenericContext *context = mini_method_get_context (cmethod);
6205 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6207 g_assert (!pass_vtable);
6209 if (sharing_enabled && context_sharable)
6213 if (cfg->generic_sharing_context && cmethod) {
6214 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6216 context_used = mono_method_check_context_used (cmethod);
6218 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6219 /* Generic method interface
6220 calls are resolved via a
6221 helper function and don't
6223 if (!cmethod_context || !cmethod_context->method_inst)
6224 pass_imt_from_rgctx = TRUE;
6228 * If a shared method calls another
6229 * shared method then the caller must
6230 * have a generic sharing context
6231 * because the magic trampoline
6232 * requires it. FIXME: We shouldn't
6233 * have to force the vtable/mrgctx
6234 * variable here. Instead there
6235 * should be a flag in the cfg to
6236 * request a generic sharing context.
6239 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6240 mono_get_vtable_var (cfg);
6245 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6247 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6249 CHECK_TYPELOAD (cmethod->klass);
6250 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6255 g_assert (!vtable_arg);
6257 if (!cfg->compile_aot) {
6259 * emit_get_rgctx_method () calls mono_class_vtable () so check
6260 * for type load errors before.
6262 mono_class_setup_vtable (cmethod->klass);
6263 CHECK_TYPELOAD (cmethod->klass);
6266 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6268 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6269 MONO_METHOD_IS_FINAL (cmethod)) {
6276 if (pass_imt_from_rgctx) {
6277 g_assert (!pass_vtable);
6280 imt_arg = emit_get_rgctx_method (cfg, context_used,
6281 cmethod, MONO_RGCTX_INFO_METHOD);
6285 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6287 /* Calling virtual generic methods */
6288 if (cmethod && virtual &&
6289 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6290 !(MONO_METHOD_IS_FINAL (cmethod) &&
6291 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6292 mono_method_signature (cmethod)->generic_param_count) {
6293 MonoInst *this_temp, *this_arg_temp, *store;
6294 MonoInst *iargs [4];
6296 g_assert (mono_method_signature (cmethod)->is_inflated);
6298 /* Prevent inlining of methods that contain indirect calls */
6301 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6302 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6303 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6304 g_assert (!imt_arg);
6306 g_assert (cmethod->is_inflated);
6307 imt_arg = emit_get_rgctx_method (cfg, context_used,
6308 cmethod, MONO_RGCTX_INFO_METHOD);
6309 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6313 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6314 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6315 MONO_ADD_INS (bblock, store);
6317 /* FIXME: This should be a managed pointer */
6318 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6320 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6321 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6322 cmethod, MONO_RGCTX_INFO_METHOD);
6323 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6324 addr = mono_emit_jit_icall (cfg,
6325 mono_helper_compile_generic_method, iargs);
6327 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6329 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6332 if (!MONO_TYPE_IS_VOID (fsig->ret))
6333 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6335 CHECK_CFG_EXCEPTION;
6342 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6343 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6345 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6349 /* FIXME: runtime generic context pointer for jumps? */
6350 /* FIXME: handle this for generic sharing eventually */
6351 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6354 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6357 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6358 /* Handle tail calls similarly to calls */
6359 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6361 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6362 call->tail_call = TRUE;
6363 call->method = cmethod;
6364 call->signature = mono_method_signature (cmethod);
6367 * We implement tail calls by storing the actual arguments into the
6368 * argument variables, then emitting a CEE_JMP.
6370 for (i = 0; i < n; ++i) {
6371 /* Prevent argument from being register allocated */
6372 arg_array [i]->flags |= MONO_INST_VOLATILE;
6373 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6377 ins = (MonoInst*)call;
6378 ins->inst_p0 = cmethod;
6379 ins->inst_p1 = arg_array [0];
6380 MONO_ADD_INS (bblock, ins);
6381 link_bblock (cfg, bblock, end_bblock);
6382 start_new_bblock = 1;
6384 CHECK_CFG_EXCEPTION;
6386 /* skip CEE_RET as well */
6392 /* Conversion to a JIT intrinsic */
6393 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6394 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6395 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6400 CHECK_CFG_EXCEPTION;
6408 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6409 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6410 mono_method_check_inlining (cfg, cmethod) &&
6411 !g_list_find (dont_inline, cmethod)) {
6413 gboolean allways = FALSE;
6415 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6416 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6417 /* Prevent inlining of methods that call wrappers */
6419 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6423 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6425 cfg->real_offset += 5;
6428 if (!MONO_TYPE_IS_VOID (fsig->ret))
6429 /* *sp is already set by inline_method */
6432 inline_costs += costs;
6438 inline_costs += 10 * num_calls++;
6440 /* Tail recursion elimination */
6441 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6442 gboolean has_vtargs = FALSE;
6445 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6448 /* keep it simple */
6449 for (i = fsig->param_count - 1; i >= 0; i--) {
6450 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6455 for (i = 0; i < n; ++i)
6456 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6457 MONO_INST_NEW (cfg, ins, OP_BR);
6458 MONO_ADD_INS (bblock, ins);
6459 tblock = start_bblock->out_bb [0];
6460 link_bblock (cfg, bblock, tblock);
6461 ins->inst_target_bb = tblock;
6462 start_new_bblock = 1;
6464 /* skip the CEE_RET, too */
6465 if (ip_in_bb (cfg, bblock, ip + 5))
6475 /* Generic sharing */
6476 /* FIXME: only do this for generic methods if
6477 they are not shared! */
6478 if (context_used && !imt_arg && !array_rank &&
6479 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6480 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6481 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6482 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6485 g_assert (cfg->generic_sharing_context && cmethod);
6489 * We are compiling a call to a
6490 * generic method from shared code,
6491 * which means that we have to look up
6492 * the method in the rgctx and do an
6495 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6498 /* Indirect calls */
6500 g_assert (!imt_arg);
6502 if (*ip == CEE_CALL)
6503 g_assert (context_used);
6504 else if (*ip == CEE_CALLI)
6505 g_assert (!vtable_arg);
6507 /* FIXME: what the hell is this??? */
6508 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6509 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6511 /* Prevent inlining of methods with indirect calls */
6515 #ifdef MONO_ARCH_RGCTX_REG
6517 int rgctx_reg = mono_alloc_preg (cfg);
6519 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6520 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6521 call = (MonoCallInst*)ins;
6522 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6523 cfg->uses_rgctx_reg = TRUE;
6524 call->rgctx_reg = TRUE;
6529 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6531 * Instead of emitting an indirect call, emit a direct call
6532 * with the contents of the aotconst as the patch info.
6534 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6536 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6537 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6540 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6543 if (!MONO_TYPE_IS_VOID (fsig->ret))
6544 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6546 CHECK_CFG_EXCEPTION;
6557 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6558 if (sp [fsig->param_count]->type == STACK_OBJ) {
6559 MonoInst *iargs [2];
6562 iargs [1] = sp [fsig->param_count];
6564 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6567 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6568 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6569 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6570 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6575 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6576 if (!cmethod->klass->element_class->valuetype && !readonly)
6577 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6578 CHECK_TYPELOAD (cmethod->klass);
6581 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6584 g_assert_not_reached ();
6587 CHECK_CFG_EXCEPTION;
6594 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6596 if (!MONO_TYPE_IS_VOID (fsig->ret))
6597 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6599 CHECK_CFG_EXCEPTION;
6609 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6611 } else if (imt_arg) {
6612 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6614 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6617 if (!MONO_TYPE_IS_VOID (fsig->ret))
6618 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6620 CHECK_CFG_EXCEPTION;
6627 if (cfg->method != method) {
6628 /* return from inlined method */
6630 * If in_count == 0, that means the ret is unreachable due to
6631 * being preceeded by a throw. In that case, inline_method () will
6632 * handle setting the return value
6633 * (test case: test_0_inline_throw ()).
6635 if (return_var && cfg->cbb->in_count) {
6639 //g_assert (returnvar != -1);
6640 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6641 cfg->ret_var_set = TRUE;
6645 MonoType *ret_type = mono_method_signature (method)->ret;
6649 * Place a seq point here too even through the IL stack is not
6650 * empty, so a step over on
6653 * will work correctly.
6655 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6656 MONO_ADD_INS (cfg->cbb, ins);
6659 g_assert (!return_var);
6662 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6665 if (!cfg->vret_addr) {
6668 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6670 EMIT_NEW_RETLOADA (cfg, ret_addr);
6672 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6673 ins->klass = mono_class_from_mono_type (ret_type);
6676 #ifdef MONO_ARCH_SOFT_FLOAT
6677 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6678 MonoInst *iargs [1];
6682 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6683 mono_arch_emit_setret (cfg, method, conv);
6685 mono_arch_emit_setret (cfg, method, *sp);
6688 mono_arch_emit_setret (cfg, method, *sp);
6693 if (sp != stack_start)
6695 MONO_INST_NEW (cfg, ins, OP_BR);
6697 ins->inst_target_bb = end_bblock;
6698 MONO_ADD_INS (bblock, ins);
6699 link_bblock (cfg, bblock, end_bblock);
6700 start_new_bblock = 1;
6704 MONO_INST_NEW (cfg, ins, OP_BR);
6706 target = ip + 1 + (signed char)(*ip);
6708 GET_BBLOCK (cfg, tblock, target);
6709 link_bblock (cfg, bblock, tblock);
6710 ins->inst_target_bb = tblock;
6711 if (sp != stack_start) {
6712 handle_stack_args (cfg, stack_start, sp - stack_start);
6714 CHECK_UNVERIFIABLE (cfg);
6716 MONO_ADD_INS (bblock, ins);
6717 start_new_bblock = 1;
6718 inline_costs += BRANCH_COST;
6732 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6734 target = ip + 1 + *(signed char*)ip;
6740 inline_costs += BRANCH_COST;
6744 MONO_INST_NEW (cfg, ins, OP_BR);
6747 target = ip + 4 + (gint32)read32(ip);
6749 GET_BBLOCK (cfg, tblock, target);
6750 link_bblock (cfg, bblock, tblock);
6751 ins->inst_target_bb = tblock;
6752 if (sp != stack_start) {
6753 handle_stack_args (cfg, stack_start, sp - stack_start);
6755 CHECK_UNVERIFIABLE (cfg);
6758 MONO_ADD_INS (bblock, ins);
6760 start_new_bblock = 1;
6761 inline_costs += BRANCH_COST;
6768 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6769 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6770 guint32 opsize = is_short ? 1 : 4;
6772 CHECK_OPSIZE (opsize);
6774 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6777 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6782 GET_BBLOCK (cfg, tblock, target);
6783 link_bblock (cfg, bblock, tblock);
6784 GET_BBLOCK (cfg, tblock, ip);
6785 link_bblock (cfg, bblock, tblock);
6787 if (sp != stack_start) {
6788 handle_stack_args (cfg, stack_start, sp - stack_start);
6789 CHECK_UNVERIFIABLE (cfg);
6792 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6793 cmp->sreg1 = sp [0]->dreg;
6794 type_from_op (cmp, sp [0], NULL);
6797 #if SIZEOF_REGISTER == 4
6798 if (cmp->opcode == OP_LCOMPARE_IMM) {
6799 /* Convert it to OP_LCOMPARE */
6800 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6801 ins->type = STACK_I8;
6802 ins->dreg = alloc_dreg (cfg, STACK_I8);
6804 MONO_ADD_INS (bblock, ins);
6805 cmp->opcode = OP_LCOMPARE;
6806 cmp->sreg2 = ins->dreg;
6809 MONO_ADD_INS (bblock, cmp);
6811 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6812 type_from_op (ins, sp [0], NULL);
6813 MONO_ADD_INS (bblock, ins);
6814 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6815 GET_BBLOCK (cfg, tblock, target);
6816 ins->inst_true_bb = tblock;
6817 GET_BBLOCK (cfg, tblock, ip);
6818 ins->inst_false_bb = tblock;
6819 start_new_bblock = 2;
6822 inline_costs += BRANCH_COST;
6837 MONO_INST_NEW (cfg, ins, *ip);
6839 target = ip + 4 + (gint32)read32(ip);
6845 inline_costs += BRANCH_COST;
6849 MonoBasicBlock **targets;
6850 MonoBasicBlock *default_bblock;
6851 MonoJumpInfoBBTable *table;
6852 int offset_reg = alloc_preg (cfg);
6853 int target_reg = alloc_preg (cfg);
6854 int table_reg = alloc_preg (cfg);
6855 int sum_reg = alloc_preg (cfg);
6856 gboolean use_op_switch;
6860 n = read32 (ip + 1);
6863 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6867 CHECK_OPSIZE (n * sizeof (guint32));
6868 target = ip + n * sizeof (guint32);
6870 GET_BBLOCK (cfg, default_bblock, target);
6872 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6873 for (i = 0; i < n; ++i) {
6874 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6875 targets [i] = tblock;
6879 if (sp != stack_start) {
6881 * Link the current bb with the targets as well, so handle_stack_args
6882 * will set their in_stack correctly.
6884 link_bblock (cfg, bblock, default_bblock);
6885 for (i = 0; i < n; ++i)
6886 link_bblock (cfg, bblock, targets [i]);
6888 handle_stack_args (cfg, stack_start, sp - stack_start);
6890 CHECK_UNVERIFIABLE (cfg);
6893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6894 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6897 for (i = 0; i < n; ++i)
6898 link_bblock (cfg, bblock, targets [i]);
6900 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6901 table->table = targets;
6902 table->table_size = n;
6904 use_op_switch = FALSE;
6906 /* ARM implements SWITCH statements differently */
6907 /* FIXME: Make it use the generic implementation */
6908 if (!cfg->compile_aot)
6909 use_op_switch = TRUE;
6912 if (COMPILE_LLVM (cfg))
6913 use_op_switch = TRUE;
6915 cfg->cbb->has_jump_table = 1;
6917 if (use_op_switch) {
6918 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6919 ins->sreg1 = src1->dreg;
6920 ins->inst_p0 = table;
6921 ins->inst_many_bb = targets;
6922 ins->klass = GUINT_TO_POINTER (n);
6923 MONO_ADD_INS (cfg->cbb, ins);
6925 if (sizeof (gpointer) == 8)
6926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6930 #if SIZEOF_REGISTER == 8
6931 /* The upper word might not be zero, and we add it to a 64 bit address later */
6932 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6935 if (cfg->compile_aot) {
6936 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6938 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6939 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6940 ins->inst_p0 = table;
6941 ins->dreg = table_reg;
6942 MONO_ADD_INS (cfg->cbb, ins);
6945 /* FIXME: Use load_memindex */
6946 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6948 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6950 start_new_bblock = 1;
6951 inline_costs += (BRANCH_COST * 2);
6971 dreg = alloc_freg (cfg);
6974 dreg = alloc_lreg (cfg);
6977 dreg = alloc_preg (cfg);
6980 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6981 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6982 ins->flags |= ins_flag;
6984 MONO_ADD_INS (bblock, ins);
6999 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7000 ins->flags |= ins_flag;
7002 MONO_ADD_INS (bblock, ins);
7004 #if HAVE_WRITE_BARRIERS
7005 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7006 MonoInst *dummy_use;
7007 /* insert call to write barrier */
7008 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7009 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7010 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7021 MONO_INST_NEW (cfg, ins, (*ip));
7023 ins->sreg1 = sp [0]->dreg;
7024 ins->sreg2 = sp [1]->dreg;
7025 type_from_op (ins, sp [0], sp [1]);
7027 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7029 /* Use the immediate opcodes if possible */
7030 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7031 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7032 if (imm_opcode != -1) {
7033 ins->opcode = imm_opcode;
7034 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7037 sp [1]->opcode = OP_NOP;
7041 MONO_ADD_INS ((cfg)->cbb, (ins));
7043 *sp++ = mono_decompose_opcode (cfg, ins);
7060 MONO_INST_NEW (cfg, ins, (*ip));
7062 ins->sreg1 = sp [0]->dreg;
7063 ins->sreg2 = sp [1]->dreg;
7064 type_from_op (ins, sp [0], sp [1]);
7066 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7067 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7069 /* FIXME: Pass opcode to is_inst_imm */
7071 /* Use the immediate opcodes if possible */
7072 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7075 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7076 if (imm_opcode != -1) {
7077 ins->opcode = imm_opcode;
7078 if (sp [1]->opcode == OP_I8CONST) {
7079 #if SIZEOF_REGISTER == 8
7080 ins->inst_imm = sp [1]->inst_l;
7082 ins->inst_ls_word = sp [1]->inst_ls_word;
7083 ins->inst_ms_word = sp [1]->inst_ms_word;
7087 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7090 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7091 if (sp [1]->next == NULL)
7092 sp [1]->opcode = OP_NOP;
7095 MONO_ADD_INS ((cfg)->cbb, (ins));
7097 *sp++ = mono_decompose_opcode (cfg, ins);
7110 case CEE_CONV_OVF_I8:
7111 case CEE_CONV_OVF_U8:
7115 /* Special case this earlier so we have long constants in the IR */
7116 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7117 int data = sp [-1]->inst_c0;
7118 sp [-1]->opcode = OP_I8CONST;
7119 sp [-1]->type = STACK_I8;
7120 #if SIZEOF_REGISTER == 8
7121 if ((*ip) == CEE_CONV_U8)
7122 sp [-1]->inst_c0 = (guint32)data;
7124 sp [-1]->inst_c0 = data;
7126 sp [-1]->inst_ls_word = data;
7127 if ((*ip) == CEE_CONV_U8)
7128 sp [-1]->inst_ms_word = 0;
7130 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7132 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7139 case CEE_CONV_OVF_I4:
7140 case CEE_CONV_OVF_I1:
7141 case CEE_CONV_OVF_I2:
7142 case CEE_CONV_OVF_I:
7143 case CEE_CONV_OVF_U:
7146 if (sp [-1]->type == STACK_R8) {
7147 ADD_UNOP (CEE_CONV_OVF_I8);
7154 case CEE_CONV_OVF_U1:
7155 case CEE_CONV_OVF_U2:
7156 case CEE_CONV_OVF_U4:
7159 if (sp [-1]->type == STACK_R8) {
7160 ADD_UNOP (CEE_CONV_OVF_U8);
7167 case CEE_CONV_OVF_I1_UN:
7168 case CEE_CONV_OVF_I2_UN:
7169 case CEE_CONV_OVF_I4_UN:
7170 case CEE_CONV_OVF_I8_UN:
7171 case CEE_CONV_OVF_U1_UN:
7172 case CEE_CONV_OVF_U2_UN:
7173 case CEE_CONV_OVF_U4_UN:
7174 case CEE_CONV_OVF_U8_UN:
7175 case CEE_CONV_OVF_I_UN:
7176 case CEE_CONV_OVF_U_UN:
7183 CHECK_CFG_EXCEPTION;
7187 case CEE_ADD_OVF_UN:
7189 case CEE_MUL_OVF_UN:
7191 case CEE_SUB_OVF_UN:
7199 token = read32 (ip + 1);
7200 klass = mini_get_class (method, token, generic_context);
7201 CHECK_TYPELOAD (klass);
7203 if (generic_class_is_reference_type (cfg, klass)) {
7204 MonoInst *store, *load;
7205 int dreg = alloc_preg (cfg);
7207 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7208 load->flags |= ins_flag;
7209 MONO_ADD_INS (cfg->cbb, load);
7211 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7212 store->flags |= ins_flag;
7213 MONO_ADD_INS (cfg->cbb, store);
7215 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7227 token = read32 (ip + 1);
7228 klass = mini_get_class (method, token, generic_context);
7229 CHECK_TYPELOAD (klass);
7231 /* Optimize the common ldobj+stloc combination */
7241 loc_index = ip [5] - CEE_STLOC_0;
7248 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7249 CHECK_LOCAL (loc_index);
7251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7252 ins->dreg = cfg->locals [loc_index]->dreg;
7258 /* Optimize the ldobj+stobj combination */
7259 /* The reference case ends up being a load+store anyway */
7260 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7265 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7272 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7281 CHECK_STACK_OVF (1);
7283 n = read32 (ip + 1);
7285 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7286 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7287 ins->type = STACK_OBJ;
7290 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7291 MonoInst *iargs [1];
7293 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7294 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7296 if (cfg->opt & MONO_OPT_SHARED) {
7297 MonoInst *iargs [3];
7299 if (cfg->compile_aot) {
7300 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7302 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7303 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7304 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7305 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7306 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7308 if (bblock->out_of_line) {
7309 MonoInst *iargs [2];
7311 if (image == mono_defaults.corlib) {
7313 * Avoid relocations in AOT and save some space by using a
7314 * version of helper_ldstr specialized to mscorlib.
7316 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7317 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7319 /* Avoid creating the string object */
7320 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7321 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7322 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7326 if (cfg->compile_aot) {
7327 NEW_LDSTRCONST (cfg, ins, image, n);
7329 MONO_ADD_INS (bblock, ins);
7332 NEW_PCONST (cfg, ins, NULL);
7333 ins->type = STACK_OBJ;
7334 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7336 MONO_ADD_INS (bblock, ins);
7345 MonoInst *iargs [2];
7346 MonoMethodSignature *fsig;
7349 MonoInst *vtable_arg = NULL;
7352 token = read32 (ip + 1);
7353 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7356 fsig = mono_method_get_signature (cmethod, image, token);
7360 mono_save_token_info (cfg, image, token, cmethod);
7362 if (!mono_class_init (cmethod->klass))
7365 if (cfg->generic_sharing_context)
7366 context_used = mono_method_check_context_used (cmethod);
7368 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7369 if (check_linkdemand (cfg, method, cmethod))
7371 CHECK_CFG_EXCEPTION;
7372 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7373 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7376 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7377 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7378 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7379 mono_class_vtable (cfg->domain, cmethod->klass);
7380 CHECK_TYPELOAD (cmethod->klass);
7382 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7383 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7386 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7387 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7389 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7391 CHECK_TYPELOAD (cmethod->klass);
7392 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7397 n = fsig->param_count;
7401 * Generate smaller code for the common newobj <exception> instruction in
7402 * argument checking code.
7404 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7405 is_exception_class (cmethod->klass) && n <= 2 &&
7406 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7407 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7408 MonoInst *iargs [3];
7410 g_assert (!vtable_arg);
7414 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7417 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7421 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7426 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7429 g_assert_not_reached ();
7437 /* move the args to allow room for 'this' in the first position */
7443 /* check_call_signature () requires sp[0] to be set */
7444 this_ins.type = STACK_OBJ;
7446 if (check_call_signature (cfg, fsig, sp))
7451 if (mini_class_is_system_array (cmethod->klass)) {
7452 g_assert (!vtable_arg);
7454 *sp = emit_get_rgctx_method (cfg, context_used,
7455 cmethod, MONO_RGCTX_INFO_METHOD);
7457 /* Avoid varargs in the common case */
7458 if (fsig->param_count == 1)
7459 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7460 else if (fsig->param_count == 2)
7461 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7462 else if (fsig->param_count == 3)
7463 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7465 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7466 } else if (cmethod->string_ctor) {
7467 g_assert (!context_used);
7468 g_assert (!vtable_arg);
7469 /* we simply pass a null pointer */
7470 EMIT_NEW_PCONST (cfg, *sp, NULL);
7471 /* now call the string ctor */
7472 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7474 MonoInst* callvirt_this_arg = NULL;
7476 if (cmethod->klass->valuetype) {
7477 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7478 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7479 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7484 * The code generated by mini_emit_virtual_call () expects
7485 * iargs [0] to be a boxed instance, but luckily the vcall
7486 * will be transformed into a normal call there.
7488 } else if (context_used) {
7492 if (cfg->opt & MONO_OPT_SHARED)
7493 rgctx_info = MONO_RGCTX_INFO_KLASS;
7495 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7496 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7498 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7501 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7503 CHECK_TYPELOAD (cmethod->klass);
7506 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7507 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7508 * As a workaround, we call class cctors before allocating objects.
7510 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7511 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7512 if (cfg->verbose_level > 2)
7513 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7514 class_inits = g_slist_prepend (class_inits, vtable);
7517 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7520 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7523 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7525 /* Now call the actual ctor */
7526 /* Avoid virtual calls to ctors if possible */
7527 if (cmethod->klass->marshalbyref)
7528 callvirt_this_arg = sp [0];
7530 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7531 mono_method_check_inlining (cfg, cmethod) &&
7532 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7533 !g_list_find (dont_inline, cmethod)) {
7536 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7537 cfg->real_offset += 5;
7540 inline_costs += costs - 5;
7543 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7545 } else if (context_used &&
7546 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7547 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7548 MonoInst *cmethod_addr;
7550 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7551 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7553 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7556 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7557 callvirt_this_arg, NULL, vtable_arg);
7561 if (alloc == NULL) {
7563 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7564 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7578 token = read32 (ip + 1);
7579 klass = mini_get_class (method, token, generic_context);
7580 CHECK_TYPELOAD (klass);
7581 if (sp [0]->type != STACK_OBJ)
7584 if (cfg->generic_sharing_context)
7585 context_used = mono_class_check_context_used (klass);
7587 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7594 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7596 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7600 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7601 MonoMethod *mono_castclass;
7602 MonoInst *iargs [1];
7605 mono_castclass = mono_marshal_get_castclass (klass);
7608 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7609 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7610 g_assert (costs > 0);
7613 cfg->real_offset += 5;
7618 inline_costs += costs;
7621 ins = handle_castclass (cfg, klass, *sp, context_used);
7622 CHECK_CFG_EXCEPTION;
7632 token = read32 (ip + 1);
7633 klass = mini_get_class (method, token, generic_context);
7634 CHECK_TYPELOAD (klass);
7635 if (sp [0]->type != STACK_OBJ)
7638 if (cfg->generic_sharing_context)
7639 context_used = mono_class_check_context_used (klass);
7641 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7648 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7650 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7654 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7655 MonoMethod *mono_isinst;
7656 MonoInst *iargs [1];
7659 mono_isinst = mono_marshal_get_isinst (klass);
7662 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7663 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7664 g_assert (costs > 0);
7667 cfg->real_offset += 5;
7672 inline_costs += costs;
7675 ins = handle_isinst (cfg, klass, *sp, context_used);
7676 CHECK_CFG_EXCEPTION;
7683 case CEE_UNBOX_ANY: {
7687 token = read32 (ip + 1);
7688 klass = mini_get_class (method, token, generic_context);
7689 CHECK_TYPELOAD (klass);
7691 mono_save_token_info (cfg, image, token, klass);
7693 if (cfg->generic_sharing_context)
7694 context_used = mono_class_check_context_used (klass);
7696 if (generic_class_is_reference_type (cfg, klass)) {
7697 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7699 MonoInst *iargs [2];
7704 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7705 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7709 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7710 MonoMethod *mono_castclass;
7711 MonoInst *iargs [1];
7714 mono_castclass = mono_marshal_get_castclass (klass);
7717 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7718 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7720 g_assert (costs > 0);
7723 cfg->real_offset += 5;
7727 inline_costs += costs;
7729 ins = handle_castclass (cfg, klass, *sp, 0);
7730 CHECK_CFG_EXCEPTION;
7738 if (mono_class_is_nullable (klass)) {
7739 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7746 ins = handle_unbox (cfg, klass, sp, context_used);
7752 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7765 token = read32 (ip + 1);
7766 klass = mini_get_class (method, token, generic_context);
7767 CHECK_TYPELOAD (klass);
7769 mono_save_token_info (cfg, image, token, klass);
7771 if (cfg->generic_sharing_context)
7772 context_used = mono_class_check_context_used (klass);
7774 if (generic_class_is_reference_type (cfg, klass)) {
7780 if (klass == mono_defaults.void_class)
7782 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7784 /* frequent check in generic code: box (struct), brtrue */
7785 if (!mono_class_is_nullable (klass) &&
7786 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7787 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7789 MONO_INST_NEW (cfg, ins, OP_BR);
7790 if (*ip == CEE_BRTRUE_S) {
7793 target = ip + 1 + (signed char)(*ip);
7798 target = ip + 4 + (gint)(read32 (ip));
7801 GET_BBLOCK (cfg, tblock, target);
7802 link_bblock (cfg, bblock, tblock);
7803 ins->inst_target_bb = tblock;
7804 GET_BBLOCK (cfg, tblock, ip);
7806 * This leads to some inconsistency, since the two bblocks are
7807 * not really connected, but it is needed for handling stack
7808 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7809 * FIXME: This should only be needed if sp != stack_start, but that
7810 * doesn't work for some reason (test failure in mcs/tests on x86).
7812 link_bblock (cfg, bblock, tblock);
7813 if (sp != stack_start) {
7814 handle_stack_args (cfg, stack_start, sp - stack_start);
7816 CHECK_UNVERIFIABLE (cfg);
7818 MONO_ADD_INS (bblock, ins);
7819 start_new_bblock = 1;
7827 if (cfg->opt & MONO_OPT_SHARED)
7828 rgctx_info = MONO_RGCTX_INFO_KLASS;
7830 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7831 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7832 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7834 *sp++ = handle_box (cfg, val, klass);
7837 CHECK_CFG_EXCEPTION;
7846 token = read32 (ip + 1);
7847 klass = mini_get_class (method, token, generic_context);
7848 CHECK_TYPELOAD (klass);
7850 mono_save_token_info (cfg, image, token, klass);
7852 if (cfg->generic_sharing_context)
7853 context_used = mono_class_check_context_used (klass);
7855 if (mono_class_is_nullable (klass)) {
7858 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7859 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7863 ins = handle_unbox (cfg, klass, sp, context_used);
7873 MonoClassField *field;
7877 if (*ip == CEE_STFLD) {
7884 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7886 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7889 token = read32 (ip + 1);
7890 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7891 field = mono_method_get_wrapper_data (method, token);
7892 klass = field->parent;
7895 field = mono_field_from_token (image, token, &klass, generic_context);
7899 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7900 FIELD_ACCESS_FAILURE;
7901 mono_class_init (klass);
7903 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7904 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7905 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7906 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7909 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7910 if (*ip == CEE_STFLD) {
7911 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7913 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7914 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7915 MonoInst *iargs [5];
7918 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7919 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7920 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7924 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7925 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7926 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7927 g_assert (costs > 0);
7929 cfg->real_offset += 5;
7932 inline_costs += costs;
7934 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7939 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7941 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7943 #if HAVE_WRITE_BARRIERS
7944 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7945 /* insert call to write barrier */
7946 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7947 MonoInst *iargs [2], *dummy_use;
7950 dreg = alloc_preg (cfg);
7951 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7953 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7955 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7959 store->flags |= ins_flag;
7966 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7967 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7968 MonoInst *iargs [4];
7971 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7972 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7973 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7974 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7975 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7976 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7978 g_assert (costs > 0);
7980 cfg->real_offset += 5;
7984 inline_costs += costs;
7986 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7990 if (sp [0]->type == STACK_VTYPE) {
7993 /* Have to compute the address of the variable */
7995 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7997 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7999 g_assert (var->klass == klass);
8001 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8005 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8007 if (*ip == CEE_LDFLDA) {
8008 dreg = alloc_preg (cfg);
8010 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8011 ins->klass = mono_class_from_mono_type (field->type);
8012 ins->type = STACK_MP;
8017 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8018 load->flags |= ins_flag;
8019 load->flags |= MONO_INST_FAULT;
8030 MonoClassField *field;
8031 gpointer addr = NULL;
8032 gboolean is_special_static;
8035 token = read32 (ip + 1);
8037 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8038 field = mono_method_get_wrapper_data (method, token);
8039 klass = field->parent;
8042 field = mono_field_from_token (image, token, &klass, generic_context);
8045 mono_class_init (klass);
8046 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8047 FIELD_ACCESS_FAILURE;
8049 /* if the class is Critical then transparent code cannot access it's fields */
8050 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8051 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8054 * We can only support shared generic static
8055 * field access on architectures where the
8056 * trampoline code has been extended to handle
8057 * the generic class init.
8059 #ifndef MONO_ARCH_VTABLE_REG
8060 GENERIC_SHARING_FAILURE (*ip);
8063 if (cfg->generic_sharing_context)
8064 context_used = mono_class_check_context_used (klass);
8066 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8068 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8069 * to be called here.
8071 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8072 mono_class_vtable (cfg->domain, klass);
8073 CHECK_TYPELOAD (klass);
8075 mono_domain_lock (cfg->domain);
8076 if (cfg->domain->special_static_fields)
8077 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8078 mono_domain_unlock (cfg->domain);
8080 is_special_static = mono_class_field_is_special_static (field);
8082 /* Generate IR to compute the field address */
8083 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8085 * Fast access to TLS data
8086 * Inline version of get_thread_static_data () in
8090 int idx, static_data_reg, array_reg, dreg;
8091 MonoInst *thread_ins;
8093 // offset &= 0x7fffffff;
8094 // idx = (offset >> 24) - 1;
8095 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8097 thread_ins = mono_get_thread_intrinsic (cfg);
8098 MONO_ADD_INS (cfg->cbb, thread_ins);
8099 static_data_reg = alloc_ireg (cfg);
8100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8102 if (cfg->compile_aot) {
8103 int offset_reg, offset2_reg, idx_reg;
8105 /* For TLS variables, this will return the TLS offset */
8106 EMIT_NEW_SFLDACONST (cfg, ins, field);
8107 offset_reg = ins->dreg;
8108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8109 idx_reg = alloc_ireg (cfg);
8110 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8111 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8113 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8114 array_reg = alloc_ireg (cfg);
8115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8116 offset2_reg = alloc_ireg (cfg);
8117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8118 dreg = alloc_ireg (cfg);
8119 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8121 offset = (gsize)addr & 0x7fffffff;
8122 idx = (offset >> 24) - 1;
8124 array_reg = alloc_ireg (cfg);
8125 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8126 dreg = alloc_ireg (cfg);
8127 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8129 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8130 (cfg->compile_aot && is_special_static) ||
8131 (context_used && is_special_static)) {
8132 MonoInst *iargs [2];
8134 g_assert (field->parent);
8135 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8137 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8138 field, MONO_RGCTX_INFO_CLASS_FIELD);
8140 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8142 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8143 } else if (context_used) {
8144 MonoInst *static_data;
8147 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8148 method->klass->name_space, method->klass->name, method->name,
8149 depth, field->offset);
8152 if (mono_class_needs_cctor_run (klass, method)) {
8156 vtable = emit_get_rgctx_klass (cfg, context_used,
8157 klass, MONO_RGCTX_INFO_VTABLE);
8159 // FIXME: This doesn't work since it tries to pass the argument
8160 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8162 * The vtable pointer is always passed in a register regardless of
8163 * the calling convention, so assign it manually, and make a call
8164 * using a signature without parameters.
8166 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8167 #ifdef MONO_ARCH_VTABLE_REG
8168 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8169 cfg->uses_vtable_reg = TRUE;
8176 * The pointer we're computing here is
8178 * super_info.static_data + field->offset
8180 static_data = emit_get_rgctx_klass (cfg, context_used,
8181 klass, MONO_RGCTX_INFO_STATIC_DATA);
8183 if (field->offset == 0) {
8186 int addr_reg = mono_alloc_preg (cfg);
8187 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8189 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8190 MonoInst *iargs [2];
8192 g_assert (field->parent);
8193 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8194 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8195 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8197 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8199 CHECK_TYPELOAD (klass);
8201 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8202 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8203 if (cfg->verbose_level > 2)
8204 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8205 class_inits = g_slist_prepend (class_inits, vtable);
8207 if (cfg->run_cctors) {
8209 /* This makes so that inline cannot trigger */
8210 /* .cctors: too many apps depend on them */
8211 /* running with a specific order... */
8212 if (! vtable->initialized)
8214 ex = mono_runtime_class_init_full (vtable, FALSE);
8216 set_exception_object (cfg, ex);
8217 goto exception_exit;
8221 addr = (char*)vtable->data + field->offset;
8223 if (cfg->compile_aot)
8224 EMIT_NEW_SFLDACONST (cfg, ins, field);
8226 EMIT_NEW_PCONST (cfg, ins, addr);
8228 MonoInst *iargs [1];
8229 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8230 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8234 /* Generate IR to do the actual load/store operation */
8236 if (*ip == CEE_LDSFLDA) {
8237 ins->klass = mono_class_from_mono_type (field->type);
8238 ins->type = STACK_PTR;
8240 } else if (*ip == CEE_STSFLD) {
8245 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8246 store->flags |= ins_flag;
8248 gboolean is_const = FALSE;
8249 MonoVTable *vtable = NULL;
8251 if (!context_used) {
8252 vtable = mono_class_vtable (cfg->domain, klass);
8253 CHECK_TYPELOAD (klass);
8255 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8256 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8257 gpointer addr = (char*)vtable->data + field->offset;
8258 int ro_type = field->type->type;
8259 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8260 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8262 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8265 case MONO_TYPE_BOOLEAN:
8267 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8271 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8274 case MONO_TYPE_CHAR:
8276 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8280 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8285 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8289 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8292 #ifndef HAVE_MOVING_COLLECTOR
8295 case MONO_TYPE_STRING:
8296 case MONO_TYPE_OBJECT:
8297 case MONO_TYPE_CLASS:
8298 case MONO_TYPE_SZARRAY:
8300 case MONO_TYPE_FNPTR:
8301 case MONO_TYPE_ARRAY:
8302 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8303 type_to_eval_stack_type ((cfg), field->type, *sp);
8309 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8314 case MONO_TYPE_VALUETYPE:
8324 CHECK_STACK_OVF (1);
8326 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8327 load->flags |= ins_flag;
8340 token = read32 (ip + 1);
8341 klass = mini_get_class (method, token, generic_context);
8342 CHECK_TYPELOAD (klass);
8343 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8344 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8355 const char *data_ptr;
8357 guint32 field_token;
8363 token = read32 (ip + 1);
8365 klass = mini_get_class (method, token, generic_context);
8366 CHECK_TYPELOAD (klass);
8368 if (cfg->generic_sharing_context)
8369 context_used = mono_class_check_context_used (klass);
8371 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8372 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8373 ins->sreg1 = sp [0]->dreg;
8374 ins->type = STACK_I4;
8375 ins->dreg = alloc_ireg (cfg);
8376 MONO_ADD_INS (cfg->cbb, ins);
8377 *sp = mono_decompose_opcode (cfg, ins);
8382 MonoClass *array_class = mono_array_class_get (klass, 1);
8383 /* FIXME: we cannot get a managed
8384 allocator because we can't get the
8385 open generic class's vtable. We
8386 have the same problem in
8387 handle_alloc_from_inst(). This
8388 needs to be solved so that we can
8389 have managed allocs of shared
8392 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8393 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8395 MonoMethod *managed_alloc = NULL;
8397 /* FIXME: Decompose later to help abcrem */
8400 args [0] = emit_get_rgctx_klass (cfg, context_used,
8401 array_class, MONO_RGCTX_INFO_VTABLE);
8406 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8408 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8410 if (cfg->opt & MONO_OPT_SHARED) {
8411 /* Decompose now to avoid problems with references to the domainvar */
8412 MonoInst *iargs [3];
8414 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8415 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8418 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8420 /* Decompose later since it is needed by abcrem */
8421 MonoClass *array_type = mono_array_class_get (klass, 1);
8422 mono_class_vtable (cfg->domain, array_type);
8423 CHECK_TYPELOAD (array_type);
8425 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8426 ins->dreg = alloc_preg (cfg);
8427 ins->sreg1 = sp [0]->dreg;
8428 ins->inst_newa_class = klass;
8429 ins->type = STACK_OBJ;
8431 MONO_ADD_INS (cfg->cbb, ins);
8432 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8433 cfg->cbb->has_array_access = TRUE;
8435 /* Needed so mono_emit_load_get_addr () gets called */
8436 mono_get_got_var (cfg);
8446 * we inline/optimize the initialization sequence if possible.
8447 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8448 * for small sizes open code the memcpy
8449 * ensure the rva field is big enough
8451 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8452 MonoMethod *memcpy_method = get_memcpy_method ();
8453 MonoInst *iargs [3];
8454 int add_reg = alloc_preg (cfg);
8456 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8457 if (cfg->compile_aot) {
8458 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8460 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8462 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8463 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8472 if (sp [0]->type != STACK_OBJ)
8475 dreg = alloc_preg (cfg);
8476 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8477 ins->dreg = alloc_preg (cfg);
8478 ins->sreg1 = sp [0]->dreg;
8479 ins->type = STACK_I4;
8480 MONO_ADD_INS (cfg->cbb, ins);
8481 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8482 cfg->cbb->has_array_access = TRUE;
8490 if (sp [0]->type != STACK_OBJ)
8493 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8495 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8496 CHECK_TYPELOAD (klass);
8497 /* we need to make sure that this array is exactly the type it needs
8498 * to be for correctness. the wrappers are lax with their usage
8499 * so we need to ignore them here
8501 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8502 MonoClass *array_class = mono_array_class_get (klass, 1);
8503 mini_emit_check_array_type (cfg, sp [0], array_class);
8504 CHECK_TYPELOAD (array_class);
8508 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8523 case CEE_LDELEM_REF: {
8529 if (*ip == CEE_LDELEM) {
8531 token = read32 (ip + 1);
8532 klass = mini_get_class (method, token, generic_context);
8533 CHECK_TYPELOAD (klass);
8534 mono_class_init (klass);
8537 klass = array_access_to_klass (*ip);
8539 if (sp [0]->type != STACK_OBJ)
8542 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8544 if (sp [1]->opcode == OP_ICONST) {
8545 int array_reg = sp [0]->dreg;
8546 int index_reg = sp [1]->dreg;
8547 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8549 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8550 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8552 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8553 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8556 if (*ip == CEE_LDELEM)
8569 case CEE_STELEM_REF:
8576 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8578 if (*ip == CEE_STELEM) {
8580 token = read32 (ip + 1);
8581 klass = mini_get_class (method, token, generic_context);
8582 CHECK_TYPELOAD (klass);
8583 mono_class_init (klass);
8586 klass = array_access_to_klass (*ip);
8588 if (sp [0]->type != STACK_OBJ)
8591 /* storing a NULL doesn't need any of the complex checks in stelemref */
8592 if (generic_class_is_reference_type (cfg, klass) &&
8593 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8594 MonoMethod* helper = mono_marshal_get_stelemref ();
8595 MonoInst *iargs [3];
8597 if (sp [0]->type != STACK_OBJ)
8599 if (sp [2]->type != STACK_OBJ)
8606 mono_emit_method_call (cfg, helper, iargs, NULL);
8608 if (sp [1]->opcode == OP_ICONST) {
8609 int array_reg = sp [0]->dreg;
8610 int index_reg = sp [1]->dreg;
8611 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8613 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8614 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8616 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8617 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8621 if (*ip == CEE_STELEM)
8628 case CEE_CKFINITE: {
8632 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8633 ins->sreg1 = sp [0]->dreg;
8634 ins->dreg = alloc_freg (cfg);
8635 ins->type = STACK_R8;
8636 MONO_ADD_INS (bblock, ins);
8638 *sp++ = mono_decompose_opcode (cfg, ins);
8643 case CEE_REFANYVAL: {
8644 MonoInst *src_var, *src;
8646 int klass_reg = alloc_preg (cfg);
8647 int dreg = alloc_preg (cfg);
8650 MONO_INST_NEW (cfg, ins, *ip);
8653 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8654 CHECK_TYPELOAD (klass);
8655 mono_class_init (klass);
8657 if (cfg->generic_sharing_context)
8658 context_used = mono_class_check_context_used (klass);
8661 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8663 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8664 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8668 MonoInst *klass_ins;
8670 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8671 klass, MONO_RGCTX_INFO_KLASS);
8674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8675 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8677 mini_emit_class_check (cfg, klass_reg, klass);
8679 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8680 ins->type = STACK_MP;
8685 case CEE_MKREFANY: {
8686 MonoInst *loc, *addr;
8689 MONO_INST_NEW (cfg, ins, *ip);
8692 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8693 CHECK_TYPELOAD (klass);
8694 mono_class_init (klass);
8696 if (cfg->generic_sharing_context)
8697 context_used = mono_class_check_context_used (klass);
8699 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8700 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8703 MonoInst *const_ins;
8704 int type_reg = alloc_preg (cfg);
8706 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8708 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8710 } else if (cfg->compile_aot) {
8711 int const_reg = alloc_preg (cfg);
8712 int type_reg = alloc_preg (cfg);
8714 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8715 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8719 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8720 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8724 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8725 ins->type = STACK_VTYPE;
8726 ins->klass = mono_defaults.typed_reference_class;
8733 MonoClass *handle_class;
8735 CHECK_STACK_OVF (1);
8738 n = read32 (ip + 1);
8740 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8741 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8742 handle = mono_method_get_wrapper_data (method, n);
8743 handle_class = mono_method_get_wrapper_data (method, n + 1);
8744 if (handle_class == mono_defaults.typehandle_class)
8745 handle = &((MonoClass*)handle)->byval_arg;
8748 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8752 mono_class_init (handle_class);
8753 if (cfg->generic_sharing_context) {
8754 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8755 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8756 /* This case handles ldtoken
8757 of an open type, like for
8760 } else if (handle_class == mono_defaults.typehandle_class) {
8761 /* If we get a MONO_TYPE_CLASS
8762 then we need to provide the
8764 instantiation of it. */
8765 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8768 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8769 } else if (handle_class == mono_defaults.fieldhandle_class)
8770 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8771 else if (handle_class == mono_defaults.methodhandle_class)
8772 context_used = mono_method_check_context_used (handle);
8774 g_assert_not_reached ();
8777 if ((cfg->opt & MONO_OPT_SHARED) &&
8778 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8779 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8780 MonoInst *addr, *vtvar, *iargs [3];
8781 int method_context_used;
8783 if (cfg->generic_sharing_context)
8784 method_context_used = mono_method_check_context_used (method);
8786 method_context_used = 0;
8788 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8790 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8791 EMIT_NEW_ICONST (cfg, iargs [1], n);
8792 if (method_context_used) {
8793 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8794 method, MONO_RGCTX_INFO_METHOD);
8795 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8797 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8798 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8800 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8804 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8806 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8807 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8808 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8809 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8810 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8811 MonoClass *tclass = mono_class_from_mono_type (handle);
8813 mono_class_init (tclass);
8815 ins = emit_get_rgctx_klass (cfg, context_used,
8816 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8817 } else if (cfg->compile_aot) {
8818 if (method->wrapper_type) {
8819 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8820 /* Special case for static synchronized wrappers */
8821 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8823 /* FIXME: n is not a normal token */
8824 cfg->disable_aot = TRUE;
8825 EMIT_NEW_PCONST (cfg, ins, NULL);
8828 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8831 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8833 ins->type = STACK_OBJ;
8834 ins->klass = cmethod->klass;
8837 MonoInst *addr, *vtvar;
8839 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8842 if (handle_class == mono_defaults.typehandle_class) {
8843 ins = emit_get_rgctx_klass (cfg, context_used,
8844 mono_class_from_mono_type (handle),
8845 MONO_RGCTX_INFO_TYPE);
8846 } else if (handle_class == mono_defaults.methodhandle_class) {
8847 ins = emit_get_rgctx_method (cfg, context_used,
8848 handle, MONO_RGCTX_INFO_METHOD);
8849 } else if (handle_class == mono_defaults.fieldhandle_class) {
8850 ins = emit_get_rgctx_field (cfg, context_used,
8851 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8853 g_assert_not_reached ();
8855 } else if (cfg->compile_aot) {
8856 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8858 EMIT_NEW_PCONST (cfg, ins, handle);
8860 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8862 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8872 MONO_INST_NEW (cfg, ins, OP_THROW);
8874 ins->sreg1 = sp [0]->dreg;
8876 bblock->out_of_line = TRUE;
8877 MONO_ADD_INS (bblock, ins);
8878 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8879 MONO_ADD_INS (bblock, ins);
8882 link_bblock (cfg, bblock, end_bblock);
8883 start_new_bblock = 1;
8885 case CEE_ENDFINALLY:
8886 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8887 MONO_ADD_INS (bblock, ins);
8889 start_new_bblock = 1;
8892 * Control will leave the method so empty the stack, otherwise
8893 * the next basic block will start with a nonempty stack.
8895 while (sp != stack_start) {
8903 if (*ip == CEE_LEAVE) {
8905 target = ip + 5 + (gint32)read32(ip + 1);
8908 target = ip + 2 + (signed char)(ip [1]);
8911 /* empty the stack */
8912 while (sp != stack_start) {
8917 * If this leave statement is in a catch block, check for a
8918 * pending exception, and rethrow it if necessary.
8919 * We avoid doing this in runtime invoke wrappers, since those are called
8920 * by native code which excepts the wrapper to catch all exceptions.
8922 for (i = 0; i < header->num_clauses; ++i) {
8923 MonoExceptionClause *clause = &header->clauses [i];
8926 * Use <= in the final comparison to handle clauses with multiple
8927 * leave statements, like in bug #78024.
8928 * The ordering of the exception clauses guarantees that we find the
8931 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8933 MonoBasicBlock *dont_throw;
8938 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8941 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8943 NEW_BBLOCK (cfg, dont_throw);
8946 * Currently, we allways rethrow the abort exception, despite the
8947 * fact that this is not correct. See thread6.cs for an example.
8948 * But propagating the abort exception is more important than
8949 * getting the sematics right.
8951 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8953 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8955 MONO_START_BB (cfg, dont_throw);
8960 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8962 MonoExceptionClause *clause;
8964 for (tmp = handlers; tmp; tmp = tmp->next) {
8966 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8968 link_bblock (cfg, bblock, tblock);
8969 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8970 ins->inst_target_bb = tblock;
8971 ins->inst_eh_block = clause;
8972 MONO_ADD_INS (bblock, ins);
8973 bblock->has_call_handler = 1;
8974 if (COMPILE_LLVM (cfg)) {
8975 MonoBasicBlock *target_bb;
8978 * Link the finally bblock with the target, since it will
8979 * conceptually branch there.
8980 * FIXME: Have to link the bblock containing the endfinally.
8982 GET_BBLOCK (cfg, target_bb, target);
8983 link_bblock (cfg, tblock, target_bb);
8986 g_list_free (handlers);
8989 MONO_INST_NEW (cfg, ins, OP_BR);
8990 MONO_ADD_INS (bblock, ins);
8991 GET_BBLOCK (cfg, tblock, target);
8992 link_bblock (cfg, bblock, tblock);
8993 ins->inst_target_bb = tblock;
8994 start_new_bblock = 1;
8996 if (*ip == CEE_LEAVE)
9005 * Mono specific opcodes
9007 case MONO_CUSTOM_PREFIX: {
9009 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9013 case CEE_MONO_ICALL: {
9015 MonoJitICallInfo *info;
9017 token = read32 (ip + 2);
9018 func = mono_method_get_wrapper_data (method, token);
9019 info = mono_find_jit_icall_by_addr (func);
9022 CHECK_STACK (info->sig->param_count);
9023 sp -= info->sig->param_count;
9025 ins = mono_emit_jit_icall (cfg, info->func, sp);
9026 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9030 inline_costs += 10 * num_calls++;
9034 case CEE_MONO_LDPTR: {
9037 CHECK_STACK_OVF (1);
9039 token = read32 (ip + 2);
9041 ptr = mono_method_get_wrapper_data (method, token);
9042 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9043 MonoJitICallInfo *callinfo;
9044 const char *icall_name;
9046 icall_name = method->name + strlen ("__icall_wrapper_");
9047 g_assert (icall_name);
9048 callinfo = mono_find_jit_icall_by_name (icall_name);
9049 g_assert (callinfo);
9051 if (ptr == callinfo->func) {
9052 /* Will be transformed into an AOTCONST later */
9053 EMIT_NEW_PCONST (cfg, ins, ptr);
9059 /* FIXME: Generalize this */
9060 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9061 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9066 EMIT_NEW_PCONST (cfg, ins, ptr);
9069 inline_costs += 10 * num_calls++;
9070 /* Can't embed random pointers into AOT code */
9071 cfg->disable_aot = 1;
9074 case CEE_MONO_ICALL_ADDR: {
9075 MonoMethod *cmethod;
9078 CHECK_STACK_OVF (1);
9080 token = read32 (ip + 2);
9082 cmethod = mono_method_get_wrapper_data (method, token);
9084 if (cfg->compile_aot) {
9085 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9087 ptr = mono_lookup_internal_call (cmethod);
9089 EMIT_NEW_PCONST (cfg, ins, ptr);
9095 case CEE_MONO_VTADDR: {
9096 MonoInst *src_var, *src;
9102 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9103 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9108 case CEE_MONO_NEWOBJ: {
9109 MonoInst *iargs [2];
9111 CHECK_STACK_OVF (1);
9113 token = read32 (ip + 2);
9114 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9115 mono_class_init (klass);
9116 NEW_DOMAINCONST (cfg, iargs [0]);
9117 MONO_ADD_INS (cfg->cbb, iargs [0]);
9118 NEW_CLASSCONST (cfg, iargs [1], klass);
9119 MONO_ADD_INS (cfg->cbb, iargs [1]);
9120 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9122 inline_costs += 10 * num_calls++;
9125 case CEE_MONO_OBJADDR:
9128 MONO_INST_NEW (cfg, ins, OP_MOVE);
9129 ins->dreg = alloc_preg (cfg);
9130 ins->sreg1 = sp [0]->dreg;
9131 ins->type = STACK_MP;
9132 MONO_ADD_INS (cfg->cbb, ins);
9136 case CEE_MONO_LDNATIVEOBJ:
9138 * Similar to LDOBJ, but instead load the unmanaged
9139 * representation of the vtype to the stack.
9144 token = read32 (ip + 2);
9145 klass = mono_method_get_wrapper_data (method, token);
9146 g_assert (klass->valuetype);
9147 mono_class_init (klass);
9150 MonoInst *src, *dest, *temp;
9153 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9154 temp->backend.is_pinvoke = 1;
9155 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9156 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9158 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9159 dest->type = STACK_VTYPE;
9160 dest->klass = klass;
9166 case CEE_MONO_RETOBJ: {
9168 * Same as RET, but return the native representation of a vtype
9171 g_assert (cfg->ret);
9172 g_assert (mono_method_signature (method)->pinvoke);
9177 token = read32 (ip + 2);
9178 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9180 if (!cfg->vret_addr) {
9181 g_assert (cfg->ret_var_is_local);
9183 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9185 EMIT_NEW_RETLOADA (cfg, ins);
9187 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9189 if (sp != stack_start)
9192 MONO_INST_NEW (cfg, ins, OP_BR);
9193 ins->inst_target_bb = end_bblock;
9194 MONO_ADD_INS (bblock, ins);
9195 link_bblock (cfg, bblock, end_bblock);
9196 start_new_bblock = 1;
9200 case CEE_MONO_CISINST:
9201 case CEE_MONO_CCASTCLASS: {
9206 token = read32 (ip + 2);
9207 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9208 if (ip [1] == CEE_MONO_CISINST)
9209 ins = handle_cisinst (cfg, klass, sp [0]);
9211 ins = handle_ccastclass (cfg, klass, sp [0]);
9217 case CEE_MONO_SAVE_LMF:
9218 case CEE_MONO_RESTORE_LMF:
9219 #ifdef MONO_ARCH_HAVE_LMF_OPS
9220 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9221 MONO_ADD_INS (bblock, ins);
9222 cfg->need_lmf_area = TRUE;
9226 case CEE_MONO_CLASSCONST:
9227 CHECK_STACK_OVF (1);
9229 token = read32 (ip + 2);
9230 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9233 inline_costs += 10 * num_calls++;
9235 case CEE_MONO_NOT_TAKEN:
9236 bblock->out_of_line = TRUE;
9240 CHECK_STACK_OVF (1);
9242 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9243 ins->dreg = alloc_preg (cfg);
9244 ins->inst_offset = (gint32)read32 (ip + 2);
9245 ins->type = STACK_PTR;
9246 MONO_ADD_INS (bblock, ins);
9250 case CEE_MONO_DYN_CALL: {
9253 /* It would be easier to call a trampoline, but that would put an
9254 * extra frame on the stack, confusing exception handling. So
9255 * implement it inline using an opcode for now.
9258 if (!cfg->dyn_call_var) {
9259 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9260 /* prevent it from being register allocated */
9261 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9264 /* Has to use a call inst since it local regalloc expects it */
9265 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9266 ins = (MonoInst*)call;
9268 ins->sreg1 = sp [0]->dreg;
9269 ins->sreg2 = sp [1]->dreg;
9270 MONO_ADD_INS (bblock, ins);
9272 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9273 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9277 inline_costs += 10 * num_calls++;
9282 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9292 /* somewhat similar to LDTOKEN */
9293 MonoInst *addr, *vtvar;
9294 CHECK_STACK_OVF (1);
9295 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9297 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9298 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9300 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9301 ins->type = STACK_VTYPE;
9302 ins->klass = mono_defaults.argumenthandle_class;
9315 * The following transforms:
9316 * CEE_CEQ into OP_CEQ
9317 * CEE_CGT into OP_CGT
9318 * CEE_CGT_UN into OP_CGT_UN
9319 * CEE_CLT into OP_CLT
9320 * CEE_CLT_UN into OP_CLT_UN
9322 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9324 MONO_INST_NEW (cfg, ins, cmp->opcode);
9326 cmp->sreg1 = sp [0]->dreg;
9327 cmp->sreg2 = sp [1]->dreg;
9328 type_from_op (cmp, sp [0], sp [1]);
9330 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9331 cmp->opcode = OP_LCOMPARE;
9332 else if (sp [0]->type == STACK_R8)
9333 cmp->opcode = OP_FCOMPARE;
9335 cmp->opcode = OP_ICOMPARE;
9336 MONO_ADD_INS (bblock, cmp);
9337 ins->type = STACK_I4;
9338 ins->dreg = alloc_dreg (cfg, ins->type);
9339 type_from_op (ins, sp [0], sp [1]);
9341 if (cmp->opcode == OP_FCOMPARE) {
9343 * The backends expect the fceq opcodes to do the
9346 cmp->opcode = OP_NOP;
9347 ins->sreg1 = cmp->sreg1;
9348 ins->sreg2 = cmp->sreg2;
9350 MONO_ADD_INS (bblock, ins);
9357 MonoMethod *cil_method;
9358 gboolean needs_static_rgctx_invoke;
9360 CHECK_STACK_OVF (1);
9362 n = read32 (ip + 2);
9363 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9366 mono_class_init (cmethod->klass);
9368 mono_save_token_info (cfg, image, n, cmethod);
9370 if (cfg->generic_sharing_context)
9371 context_used = mono_method_check_context_used (cmethod);
9373 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9375 cil_method = cmethod;
9376 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9377 METHOD_ACCESS_FAILURE;
9379 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9380 if (check_linkdemand (cfg, method, cmethod))
9382 CHECK_CFG_EXCEPTION;
9383 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9384 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9388 * Optimize the common case of ldftn+delegate creation
9390 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9391 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9392 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9394 int invoke_context_used = 0;
9396 invoke = mono_get_delegate_invoke (ctor_method->klass);
9397 if (!invoke || !mono_method_signature (invoke))
9400 if (cfg->generic_sharing_context)
9401 invoke_context_used = mono_method_check_context_used (invoke);
9403 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9404 /* FIXME: SGEN support */
9405 if (invoke_context_used == 0) {
9406 MonoInst *target_ins;
9409 if (cfg->verbose_level > 3)
9410 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9411 target_ins = sp [-1];
9413 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9414 CHECK_CFG_EXCEPTION;
9423 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9424 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9428 inline_costs += 10 * num_calls++;
9431 case CEE_LDVIRTFTN: {
9436 n = read32 (ip + 2);
9437 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9440 mono_class_init (cmethod->klass);
9442 if (cfg->generic_sharing_context)
9443 context_used = mono_method_check_context_used (cmethod);
9445 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9446 if (check_linkdemand (cfg, method, cmethod))
9448 CHECK_CFG_EXCEPTION;
9449 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9450 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9456 args [1] = emit_get_rgctx_method (cfg, context_used,
9457 cmethod, MONO_RGCTX_INFO_METHOD);
9460 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9462 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9465 inline_costs += 10 * num_calls++;
9469 CHECK_STACK_OVF (1);
9471 n = read16 (ip + 2);
9473 EMIT_NEW_ARGLOAD (cfg, ins, n);
9478 CHECK_STACK_OVF (1);
9480 n = read16 (ip + 2);
9482 NEW_ARGLOADA (cfg, ins, n);
9483 MONO_ADD_INS (cfg->cbb, ins);
9491 n = read16 (ip + 2);
9493 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9495 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9499 CHECK_STACK_OVF (1);
9501 n = read16 (ip + 2);
9503 EMIT_NEW_LOCLOAD (cfg, ins, n);
9508 unsigned char *tmp_ip;
9509 CHECK_STACK_OVF (1);
9511 n = read16 (ip + 2);
9514 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9520 EMIT_NEW_LOCLOADA (cfg, ins, n);
9529 n = read16 (ip + 2);
9531 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9533 emit_stloc_ir (cfg, sp, header, n);
9540 if (sp != stack_start)
9542 if (cfg->method != method)
9544 * Inlining this into a loop in a parent could lead to
9545 * stack overflows which is different behavior than the
9546 * non-inlined case, thus disable inlining in this case.
9548 goto inline_failure;
9550 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9551 ins->dreg = alloc_preg (cfg);
9552 ins->sreg1 = sp [0]->dreg;
9553 ins->type = STACK_PTR;
9554 MONO_ADD_INS (cfg->cbb, ins);
9556 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9558 ins->flags |= MONO_INST_INIT;
9563 case CEE_ENDFILTER: {
9564 MonoExceptionClause *clause, *nearest;
9565 int cc, nearest_num;
9569 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9571 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9572 ins->sreg1 = (*sp)->dreg;
9573 MONO_ADD_INS (bblock, ins);
9574 start_new_bblock = 1;
9579 for (cc = 0; cc < header->num_clauses; ++cc) {
9580 clause = &header->clauses [cc];
9581 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9582 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9583 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9589 if ((ip - header->code) != nearest->handler_offset)
9594 case CEE_UNALIGNED_:
9595 ins_flag |= MONO_INST_UNALIGNED;
9596 /* FIXME: record alignment? we can assume 1 for now */
9601 ins_flag |= MONO_INST_VOLATILE;
9605 ins_flag |= MONO_INST_TAILCALL;
9606 cfg->flags |= MONO_CFG_HAS_TAIL;
9607 /* Can't inline tail calls at this time */
9608 inline_costs += 100000;
9615 token = read32 (ip + 2);
9616 klass = mini_get_class (method, token, generic_context);
9617 CHECK_TYPELOAD (klass);
9618 if (generic_class_is_reference_type (cfg, klass))
9619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9621 mini_emit_initobj (cfg, *sp, NULL, klass);
9625 case CEE_CONSTRAINED_:
9627 token = read32 (ip + 2);
9628 if (method->wrapper_type != MONO_WRAPPER_NONE)
9629 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9631 constrained_call = mono_class_get_full (image, token, generic_context);
9632 CHECK_TYPELOAD (constrained_call);
9637 MonoInst *iargs [3];
9641 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9642 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9643 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9644 /* emit_memset only works when val == 0 */
9645 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9650 if (ip [1] == CEE_CPBLK) {
9651 MonoMethod *memcpy_method = get_memcpy_method ();
9652 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9654 MonoMethod *memset_method = get_memset_method ();
9655 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9665 ins_flag |= MONO_INST_NOTYPECHECK;
9667 ins_flag |= MONO_INST_NORANGECHECK;
9668 /* we ignore the no-nullcheck for now since we
9669 * really do it explicitly only when doing callvirt->call
9675 int handler_offset = -1;
9677 for (i = 0; i < header->num_clauses; ++i) {
9678 MonoExceptionClause *clause = &header->clauses [i];
9679 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9680 handler_offset = clause->handler_offset;
9685 bblock->flags |= BB_EXCEPTION_UNSAFE;
9687 g_assert (handler_offset != -1);
9689 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9690 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9691 ins->sreg1 = load->dreg;
9692 MONO_ADD_INS (bblock, ins);
9694 link_bblock (cfg, bblock, end_bblock);
9695 start_new_bblock = 1;
9703 CHECK_STACK_OVF (1);
9705 token = read32 (ip + 2);
9706 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9707 MonoType *type = mono_type_create_from_typespec (image, token);
9708 token = mono_type_size (type, &ialign);
9710 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9711 CHECK_TYPELOAD (klass);
9712 mono_class_init (klass);
9713 token = mono_class_value_size (klass, &align);
9715 EMIT_NEW_ICONST (cfg, ins, token);
9720 case CEE_REFANYTYPE: {
9721 MonoInst *src_var, *src;
9727 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9729 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9730 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9731 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9749 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9759 g_warning ("opcode 0x%02x not handled", *ip);
9763 if (start_new_bblock != 1)
9766 bblock->cil_length = ip - bblock->cil_code;
9767 bblock->next_bb = end_bblock;
9769 if (cfg->method == method && cfg->domainvar) {
9771 MonoInst *get_domain;
9773 cfg->cbb = init_localsbb;
9775 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9776 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9779 get_domain->dreg = alloc_preg (cfg);
9780 MONO_ADD_INS (cfg->cbb, get_domain);
9782 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9783 MONO_ADD_INS (cfg->cbb, store);
9786 #ifdef TARGET_POWERPC
9787 if (cfg->compile_aot)
9788 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9789 mono_get_got_var (cfg);
9792 if (cfg->method == method && cfg->got_var)
9793 mono_emit_load_got_addr (cfg);
9798 cfg->cbb = init_localsbb;
9800 for (i = 0; i < header->num_locals; ++i) {
9801 MonoType *ptype = header->locals [i];
9802 int t = ptype->type;
9803 dreg = cfg->locals [i]->dreg;
9805 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9806 t = mono_class_enum_basetype (ptype->data.klass)->type;
9808 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9809 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9810 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9811 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9812 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9813 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9814 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9815 ins->type = STACK_R8;
9816 ins->inst_p0 = (void*)&r8_0;
9817 ins->dreg = alloc_dreg (cfg, STACK_R8);
9818 MONO_ADD_INS (init_localsbb, ins);
9819 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9820 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9821 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9822 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9824 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9829 if (cfg->init_ref_vars && cfg->method == method) {
9830 /* Emit initialization for ref vars */
9831 // FIXME: Avoid duplication initialization for IL locals.
9832 for (i = 0; i < cfg->num_varinfo; ++i) {
9833 MonoInst *ins = cfg->varinfo [i];
9835 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9836 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9840 /* Add a sequence point for method entry/exit events */
9842 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9843 MONO_ADD_INS (init_localsbb, ins);
9844 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9845 MONO_ADD_INS (cfg->bb_exit, ins);
9850 if (cfg->method == method) {
9852 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9853 bb->region = mono_find_block_region (cfg, bb->real_offset);
9855 mono_create_spvar_for_region (cfg, bb->region);
9856 if (cfg->verbose_level > 2)
9857 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9861 g_slist_free (class_inits);
9862 dont_inline = g_list_remove (dont_inline, method);
9864 if (inline_costs < 0) {
9867 /* Method is too large */
9868 mname = mono_method_full_name (method, TRUE);
9869 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9870 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9872 mono_metadata_free_mh (header);
9873 mono_basic_block_free (original_bb);
9877 if ((cfg->verbose_level > 2) && (cfg->method == method))
9878 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9880 mono_metadata_free_mh (header);
9881 mono_basic_block_free (original_bb);
9882 return inline_costs;
9885 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9892 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9896 set_exception_type_from_invalid_il (cfg, method, ip);
9900 g_slist_free (class_inits);
9901 mono_basic_block_free (original_bb);
9902 dont_inline = g_list_remove (dont_inline, method);
9903 mono_metadata_free_mh (header);
9908 store_membase_reg_to_store_membase_imm (int opcode)
9911 case OP_STORE_MEMBASE_REG:
9912 return OP_STORE_MEMBASE_IMM;
9913 case OP_STOREI1_MEMBASE_REG:
9914 return OP_STOREI1_MEMBASE_IMM;
9915 case OP_STOREI2_MEMBASE_REG:
9916 return OP_STOREI2_MEMBASE_IMM;
9917 case OP_STOREI4_MEMBASE_REG:
9918 return OP_STOREI4_MEMBASE_IMM;
9919 case OP_STOREI8_MEMBASE_REG:
9920 return OP_STOREI8_MEMBASE_IMM;
9922 g_assert_not_reached ();
9928 #endif /* DISABLE_JIT */
9931 mono_op_to_op_imm (int opcode)
9941 return OP_IDIV_UN_IMM;
9945 return OP_IREM_UN_IMM;
9959 return OP_ISHR_UN_IMM;
9976 return OP_LSHR_UN_IMM;
9979 return OP_COMPARE_IMM;
9981 return OP_ICOMPARE_IMM;
9983 return OP_LCOMPARE_IMM;
9985 case OP_STORE_MEMBASE_REG:
9986 return OP_STORE_MEMBASE_IMM;
9987 case OP_STOREI1_MEMBASE_REG:
9988 return OP_STOREI1_MEMBASE_IMM;
9989 case OP_STOREI2_MEMBASE_REG:
9990 return OP_STOREI2_MEMBASE_IMM;
9991 case OP_STOREI4_MEMBASE_REG:
9992 return OP_STOREI4_MEMBASE_IMM;
9994 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9996 return OP_X86_PUSH_IMM;
9997 case OP_X86_COMPARE_MEMBASE_REG:
9998 return OP_X86_COMPARE_MEMBASE_IMM;
10000 #if defined(TARGET_AMD64)
10001 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10002 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10004 case OP_VOIDCALL_REG:
10005 return OP_VOIDCALL;
10013 return OP_LOCALLOC_IMM;
10020 ldind_to_load_membase (int opcode)
10024 return OP_LOADI1_MEMBASE;
10026 return OP_LOADU1_MEMBASE;
10028 return OP_LOADI2_MEMBASE;
10030 return OP_LOADU2_MEMBASE;
10032 return OP_LOADI4_MEMBASE;
10034 return OP_LOADU4_MEMBASE;
10036 return OP_LOAD_MEMBASE;
10037 case CEE_LDIND_REF:
10038 return OP_LOAD_MEMBASE;
10040 return OP_LOADI8_MEMBASE;
10042 return OP_LOADR4_MEMBASE;
10044 return OP_LOADR8_MEMBASE;
10046 g_assert_not_reached ();
10053 stind_to_store_membase (int opcode)
10057 return OP_STOREI1_MEMBASE_REG;
10059 return OP_STOREI2_MEMBASE_REG;
10061 return OP_STOREI4_MEMBASE_REG;
10063 case CEE_STIND_REF:
10064 return OP_STORE_MEMBASE_REG;
10066 return OP_STOREI8_MEMBASE_REG;
10068 return OP_STORER4_MEMBASE_REG;
10070 return OP_STORER8_MEMBASE_REG;
10072 g_assert_not_reached ();
10079 mono_load_membase_to_load_mem (int opcode)
10081 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10082 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10084 case OP_LOAD_MEMBASE:
10085 return OP_LOAD_MEM;
10086 case OP_LOADU1_MEMBASE:
10087 return OP_LOADU1_MEM;
10088 case OP_LOADU2_MEMBASE:
10089 return OP_LOADU2_MEM;
10090 case OP_LOADI4_MEMBASE:
10091 return OP_LOADI4_MEM;
10092 case OP_LOADU4_MEMBASE:
10093 return OP_LOADU4_MEM;
10094 #if SIZEOF_REGISTER == 8
10095 case OP_LOADI8_MEMBASE:
10096 return OP_LOADI8_MEM;
10105 op_to_op_dest_membase (int store_opcode, int opcode)
10107 #if defined(TARGET_X86)
10108 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10113 return OP_X86_ADD_MEMBASE_REG;
10115 return OP_X86_SUB_MEMBASE_REG;
10117 return OP_X86_AND_MEMBASE_REG;
10119 return OP_X86_OR_MEMBASE_REG;
10121 return OP_X86_XOR_MEMBASE_REG;
10124 return OP_X86_ADD_MEMBASE_IMM;
10127 return OP_X86_SUB_MEMBASE_IMM;
10130 return OP_X86_AND_MEMBASE_IMM;
10133 return OP_X86_OR_MEMBASE_IMM;
10136 return OP_X86_XOR_MEMBASE_IMM;
10142 #if defined(TARGET_AMD64)
10143 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10148 return OP_X86_ADD_MEMBASE_REG;
10150 return OP_X86_SUB_MEMBASE_REG;
10152 return OP_X86_AND_MEMBASE_REG;
10154 return OP_X86_OR_MEMBASE_REG;
10156 return OP_X86_XOR_MEMBASE_REG;
10158 return OP_X86_ADD_MEMBASE_IMM;
10160 return OP_X86_SUB_MEMBASE_IMM;
10162 return OP_X86_AND_MEMBASE_IMM;
10164 return OP_X86_OR_MEMBASE_IMM;
10166 return OP_X86_XOR_MEMBASE_IMM;
10168 return OP_AMD64_ADD_MEMBASE_REG;
10170 return OP_AMD64_SUB_MEMBASE_REG;
10172 return OP_AMD64_AND_MEMBASE_REG;
10174 return OP_AMD64_OR_MEMBASE_REG;
10176 return OP_AMD64_XOR_MEMBASE_REG;
10179 return OP_AMD64_ADD_MEMBASE_IMM;
10182 return OP_AMD64_SUB_MEMBASE_IMM;
10185 return OP_AMD64_AND_MEMBASE_IMM;
10188 return OP_AMD64_OR_MEMBASE_IMM;
10191 return OP_AMD64_XOR_MEMBASE_IMM;
10201 op_to_op_store_membase (int store_opcode, int opcode)
10203 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10206 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10207 return OP_X86_SETEQ_MEMBASE;
10209 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10210 return OP_X86_SETNE_MEMBASE;
10218 op_to_op_src1_membase (int load_opcode, int opcode)
10221 /* FIXME: This has sign extension issues */
10223 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10224 return OP_X86_COMPARE_MEMBASE8_IMM;
10227 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10232 return OP_X86_PUSH_MEMBASE;
10233 case OP_COMPARE_IMM:
10234 case OP_ICOMPARE_IMM:
10235 return OP_X86_COMPARE_MEMBASE_IMM;
10238 return OP_X86_COMPARE_MEMBASE_REG;
10242 #ifdef TARGET_AMD64
10243 /* FIXME: This has sign extension issues */
10245 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10246 return OP_X86_COMPARE_MEMBASE8_IMM;
10251 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10252 return OP_X86_PUSH_MEMBASE;
10254 /* FIXME: This only works for 32 bit immediates
10255 case OP_COMPARE_IMM:
10256 case OP_LCOMPARE_IMM:
10257 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10258 return OP_AMD64_COMPARE_MEMBASE_IMM;
10260 case OP_ICOMPARE_IMM:
10261 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10262 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10266 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10267 return OP_AMD64_COMPARE_MEMBASE_REG;
10270 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10271 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10280 op_to_op_src2_membase (int load_opcode, int opcode)
10283 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10289 return OP_X86_COMPARE_REG_MEMBASE;
10291 return OP_X86_ADD_REG_MEMBASE;
10293 return OP_X86_SUB_REG_MEMBASE;
10295 return OP_X86_AND_REG_MEMBASE;
10297 return OP_X86_OR_REG_MEMBASE;
10299 return OP_X86_XOR_REG_MEMBASE;
10303 #ifdef TARGET_AMD64
10306 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10307 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10311 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10312 return OP_AMD64_COMPARE_REG_MEMBASE;
10315 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10316 return OP_X86_ADD_REG_MEMBASE;
10318 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10319 return OP_X86_SUB_REG_MEMBASE;
10321 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10322 return OP_X86_AND_REG_MEMBASE;
10324 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10325 return OP_X86_OR_REG_MEMBASE;
10327 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10328 return OP_X86_XOR_REG_MEMBASE;
10330 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10331 return OP_AMD64_ADD_REG_MEMBASE;
10333 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10334 return OP_AMD64_SUB_REG_MEMBASE;
10336 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10337 return OP_AMD64_AND_REG_MEMBASE;
10339 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10340 return OP_AMD64_OR_REG_MEMBASE;
10342 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10343 return OP_AMD64_XOR_REG_MEMBASE;
10351 mono_op_to_op_imm_noemul (int opcode)
10354 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10359 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10367 return mono_op_to_op_imm (opcode);
10371 #ifndef DISABLE_JIT
10374 * mono_handle_global_vregs:
10376 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10380 mono_handle_global_vregs (MonoCompile *cfg)
10382 gint32 *vreg_to_bb;
10383 MonoBasicBlock *bb;
10386 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10388 #ifdef MONO_ARCH_SIMD_INTRINSICS
10389 if (cfg->uses_simd_intrinsics)
10390 mono_simd_simplify_indirection (cfg);
10393 /* Find local vregs used in more than one bb */
10394 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10395 MonoInst *ins = bb->code;
10396 int block_num = bb->block_num;
10398 if (cfg->verbose_level > 2)
10399 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10402 for (; ins; ins = ins->next) {
10403 const char *spec = INS_INFO (ins->opcode);
10404 int regtype = 0, regindex;
10407 if (G_UNLIKELY (cfg->verbose_level > 2))
10408 mono_print_ins (ins);
10410 g_assert (ins->opcode >= MONO_CEE_LAST);
10412 for (regindex = 0; regindex < 4; regindex ++) {
10415 if (regindex == 0) {
10416 regtype = spec [MONO_INST_DEST];
10417 if (regtype == ' ')
10420 } else if (regindex == 1) {
10421 regtype = spec [MONO_INST_SRC1];
10422 if (regtype == ' ')
10425 } else if (regindex == 2) {
10426 regtype = spec [MONO_INST_SRC2];
10427 if (regtype == ' ')
10430 } else if (regindex == 3) {
10431 regtype = spec [MONO_INST_SRC3];
10432 if (regtype == ' ')
10437 #if SIZEOF_REGISTER == 4
10438 /* In the LLVM case, the long opcodes are not decomposed */
10439 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10441 * Since some instructions reference the original long vreg,
10442 * and some reference the two component vregs, it is quite hard
10443 * to determine when it needs to be global. So be conservative.
10445 if (!get_vreg_to_inst (cfg, vreg)) {
10446 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10448 if (cfg->verbose_level > 2)
10449 printf ("LONG VREG R%d made global.\n", vreg);
10453 * Make the component vregs volatile since the optimizations can
10454 * get confused otherwise.
10456 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10457 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10461 g_assert (vreg != -1);
10463 prev_bb = vreg_to_bb [vreg];
10464 if (prev_bb == 0) {
10465 /* 0 is a valid block num */
10466 vreg_to_bb [vreg] = block_num + 1;
10467 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10468 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10471 if (!get_vreg_to_inst (cfg, vreg)) {
10472 if (G_UNLIKELY (cfg->verbose_level > 2))
10473 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10477 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10480 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10483 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10486 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10489 g_assert_not_reached ();
10493 /* Flag as having been used in more than one bb */
10494 vreg_to_bb [vreg] = -1;
10500 /* If a variable is used in only one bblock, convert it into a local vreg */
10501 for (i = 0; i < cfg->num_varinfo; i++) {
10502 MonoInst *var = cfg->varinfo [i];
10503 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10505 switch (var->type) {
10511 #if SIZEOF_REGISTER == 8
10514 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10515 /* Enabling this screws up the fp stack on x86 */
10518 /* Arguments are implicitly global */
10519 /* Putting R4 vars into registers doesn't work currently */
10520 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10522 * Make that the variable's liveness interval doesn't contain a call, since
10523 * that would cause the lvreg to be spilled, making the whole optimization
10526 /* This is too slow for JIT compilation */
10528 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10530 int def_index, call_index, ins_index;
10531 gboolean spilled = FALSE;
10536 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10537 const char *spec = INS_INFO (ins->opcode);
10539 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10540 def_index = ins_index;
10542 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10543 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10544 if (call_index > def_index) {
10550 if (MONO_IS_CALL (ins))
10551 call_index = ins_index;
10561 if (G_UNLIKELY (cfg->verbose_level > 2))
10562 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10563 var->flags |= MONO_INST_IS_DEAD;
10564 cfg->vreg_to_inst [var->dreg] = NULL;
10571 * Compress the varinfo and vars tables so the liveness computation is faster and
10572 * takes up less space.
10575 for (i = 0; i < cfg->num_varinfo; ++i) {
10576 MonoInst *var = cfg->varinfo [i];
10577 if (pos < i && cfg->locals_start == i)
10578 cfg->locals_start = pos;
10579 if (!(var->flags & MONO_INST_IS_DEAD)) {
10581 cfg->varinfo [pos] = cfg->varinfo [i];
10582 cfg->varinfo [pos]->inst_c0 = pos;
10583 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10584 cfg->vars [pos].idx = pos;
10585 #if SIZEOF_REGISTER == 4
10586 if (cfg->varinfo [pos]->type == STACK_I8) {
10587 /* Modify the two component vars too */
10590 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10591 var1->inst_c0 = pos;
10592 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10593 var1->inst_c0 = pos;
10600 cfg->num_varinfo = pos;
10601 if (cfg->locals_start > cfg->num_varinfo)
10602 cfg->locals_start = cfg->num_varinfo;
10606 * mono_spill_global_vars:
10608 * Generate spill code for variables which are not allocated to registers,
10609 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10610 * code is generated which could be optimized by the local optimization passes.
10613 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10615 MonoBasicBlock *bb;
10617 int orig_next_vreg;
10618 guint32 *vreg_to_lvreg;
10620 guint32 i, lvregs_len;
10621 gboolean dest_has_lvreg = FALSE;
10622 guint32 stacktypes [128];
10623 MonoInst **live_range_start, **live_range_end;
10624 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10626 *need_local_opts = FALSE;
10628 memset (spec2, 0, sizeof (spec2));
10630 /* FIXME: Move this function to mini.c */
10631 stacktypes ['i'] = STACK_PTR;
10632 stacktypes ['l'] = STACK_I8;
10633 stacktypes ['f'] = STACK_R8;
10634 #ifdef MONO_ARCH_SIMD_INTRINSICS
10635 stacktypes ['x'] = STACK_VTYPE;
10638 #if SIZEOF_REGISTER == 4
10639 /* Create MonoInsts for longs */
10640 for (i = 0; i < cfg->num_varinfo; i++) {
10641 MonoInst *ins = cfg->varinfo [i];
10643 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10644 switch (ins->type) {
10649 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10652 g_assert (ins->opcode == OP_REGOFFSET);
10654 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10656 tree->opcode = OP_REGOFFSET;
10657 tree->inst_basereg = ins->inst_basereg;
10658 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10660 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10662 tree->opcode = OP_REGOFFSET;
10663 tree->inst_basereg = ins->inst_basereg;
10664 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10674 /* FIXME: widening and truncation */
10677 * As an optimization, when a variable allocated to the stack is first loaded into
10678 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10679 * the variable again.
10681 orig_next_vreg = cfg->next_vreg;
10682 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10683 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10687 * These arrays contain the first and last instructions accessing a given
10689 * Since we emit bblocks in the same order we process them here, and we
10690 * don't split live ranges, these will precisely describe the live range of
10691 * the variable, i.e. the instruction range where a valid value can be found
10692 * in the variables location.
10693 * The live range is computed using the liveness info computed by the liveness pass.
10694 * We can't use vmv->range, since that is an abstract live range, and we need
10695 * one which is instruction precise.
10696 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10698 /* FIXME: Only do this if debugging info is requested */
10699 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10700 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10701 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10702 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10704 /* Add spill loads/stores */
10705 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10708 if (cfg->verbose_level > 2)
10709 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10711 /* Clear vreg_to_lvreg array */
10712 for (i = 0; i < lvregs_len; i++)
10713 vreg_to_lvreg [lvregs [i]] = 0;
10717 MONO_BB_FOR_EACH_INS (bb, ins) {
10718 const char *spec = INS_INFO (ins->opcode);
10719 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10720 gboolean store, no_lvreg;
10721 int sregs [MONO_MAX_SRC_REGS];
10723 if (G_UNLIKELY (cfg->verbose_level > 2))
10724 mono_print_ins (ins);
10726 if (ins->opcode == OP_NOP)
10730 * We handle LDADDR here as well, since it can only be decomposed
10731 * when variable addresses are known.
10733 if (ins->opcode == OP_LDADDR) {
10734 MonoInst *var = ins->inst_p0;
10736 if (var->opcode == OP_VTARG_ADDR) {
10737 /* Happens on SPARC/S390 where vtypes are passed by reference */
10738 MonoInst *vtaddr = var->inst_left;
10739 if (vtaddr->opcode == OP_REGVAR) {
10740 ins->opcode = OP_MOVE;
10741 ins->sreg1 = vtaddr->dreg;
10743 else if (var->inst_left->opcode == OP_REGOFFSET) {
10744 ins->opcode = OP_LOAD_MEMBASE;
10745 ins->inst_basereg = vtaddr->inst_basereg;
10746 ins->inst_offset = vtaddr->inst_offset;
10750 g_assert (var->opcode == OP_REGOFFSET);
10752 ins->opcode = OP_ADD_IMM;
10753 ins->sreg1 = var->inst_basereg;
10754 ins->inst_imm = var->inst_offset;
10757 *need_local_opts = TRUE;
10758 spec = INS_INFO (ins->opcode);
10761 if (ins->opcode < MONO_CEE_LAST) {
10762 mono_print_ins (ins);
10763 g_assert_not_reached ();
10767 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10771 if (MONO_IS_STORE_MEMBASE (ins)) {
10772 tmp_reg = ins->dreg;
10773 ins->dreg = ins->sreg2;
10774 ins->sreg2 = tmp_reg;
10777 spec2 [MONO_INST_DEST] = ' ';
10778 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10779 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10780 spec2 [MONO_INST_SRC3] = ' ';
10782 } else if (MONO_IS_STORE_MEMINDEX (ins))
10783 g_assert_not_reached ();
10788 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10789 printf ("\t %.3s %d", spec, ins->dreg);
10790 num_sregs = mono_inst_get_src_registers (ins, sregs);
10791 for (srcindex = 0; srcindex < 3; ++srcindex)
10792 printf (" %d", sregs [srcindex]);
10799 regtype = spec [MONO_INST_DEST];
10800 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10803 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10804 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10805 MonoInst *store_ins;
10807 MonoInst *def_ins = ins;
10808 int dreg = ins->dreg; /* The original vreg */
10810 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10812 if (var->opcode == OP_REGVAR) {
10813 ins->dreg = var->dreg;
10814 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10816 * Instead of emitting a load+store, use a _membase opcode.
10818 g_assert (var->opcode == OP_REGOFFSET);
10819 if (ins->opcode == OP_MOVE) {
10823 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10824 ins->inst_basereg = var->inst_basereg;
10825 ins->inst_offset = var->inst_offset;
10828 spec = INS_INFO (ins->opcode);
10832 g_assert (var->opcode == OP_REGOFFSET);
10834 prev_dreg = ins->dreg;
10836 /* Invalidate any previous lvreg for this vreg */
10837 vreg_to_lvreg [ins->dreg] = 0;
10841 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10843 store_opcode = OP_STOREI8_MEMBASE_REG;
10846 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10848 if (regtype == 'l') {
10849 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10850 mono_bblock_insert_after_ins (bb, ins, store_ins);
10851 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10852 mono_bblock_insert_after_ins (bb, ins, store_ins);
10853 def_ins = store_ins;
10856 g_assert (store_opcode != OP_STOREV_MEMBASE);
10858 /* Try to fuse the store into the instruction itself */
10859 /* FIXME: Add more instructions */
10860 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10861 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10862 ins->inst_imm = ins->inst_c0;
10863 ins->inst_destbasereg = var->inst_basereg;
10864 ins->inst_offset = var->inst_offset;
10865 spec = INS_INFO (ins->opcode);
10866 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10867 ins->opcode = store_opcode;
10868 ins->inst_destbasereg = var->inst_basereg;
10869 ins->inst_offset = var->inst_offset;
10873 tmp_reg = ins->dreg;
10874 ins->dreg = ins->sreg2;
10875 ins->sreg2 = tmp_reg;
10878 spec2 [MONO_INST_DEST] = ' ';
10879 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10880 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10881 spec2 [MONO_INST_SRC3] = ' ';
10883 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10884 // FIXME: The backends expect the base reg to be in inst_basereg
10885 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10887 ins->inst_basereg = var->inst_basereg;
10888 ins->inst_offset = var->inst_offset;
10889 spec = INS_INFO (ins->opcode);
10891 /* printf ("INS: "); mono_print_ins (ins); */
10892 /* Create a store instruction */
10893 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10895 /* Insert it after the instruction */
10896 mono_bblock_insert_after_ins (bb, ins, store_ins);
10898 def_ins = store_ins;
10901 * We can't assign ins->dreg to var->dreg here, since the
10902 * sregs could use it. So set a flag, and do it after
10905 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10906 dest_has_lvreg = TRUE;
10911 if (def_ins && !live_range_start [dreg]) {
10912 live_range_start [dreg] = def_ins;
10913 live_range_start_bb [dreg] = bb;
10920 num_sregs = mono_inst_get_src_registers (ins, sregs);
10921 for (srcindex = 0; srcindex < 3; ++srcindex) {
10922 regtype = spec [MONO_INST_SRC1 + srcindex];
10923 sreg = sregs [srcindex];
10925 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10926 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10927 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10928 MonoInst *use_ins = ins;
10929 MonoInst *load_ins;
10930 guint32 load_opcode;
10932 if (var->opcode == OP_REGVAR) {
10933 sregs [srcindex] = var->dreg;
10934 //mono_inst_set_src_registers (ins, sregs);
10935 live_range_end [sreg] = use_ins;
10936 live_range_end_bb [sreg] = bb;
10940 g_assert (var->opcode == OP_REGOFFSET);
10942 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10944 g_assert (load_opcode != OP_LOADV_MEMBASE);
10946 if (vreg_to_lvreg [sreg]) {
10947 g_assert (vreg_to_lvreg [sreg] != -1);
10949 /* The variable is already loaded to an lvreg */
10950 if (G_UNLIKELY (cfg->verbose_level > 2))
10951 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10952 sregs [srcindex] = vreg_to_lvreg [sreg];
10953 //mono_inst_set_src_registers (ins, sregs);
10957 /* Try to fuse the load into the instruction */
10958 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10959 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10960 sregs [0] = var->inst_basereg;
10961 //mono_inst_set_src_registers (ins, sregs);
10962 ins->inst_offset = var->inst_offset;
10963 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10964 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10965 sregs [1] = var->inst_basereg;
10966 //mono_inst_set_src_registers (ins, sregs);
10967 ins->inst_offset = var->inst_offset;
10969 if (MONO_IS_REAL_MOVE (ins)) {
10970 ins->opcode = OP_NOP;
10973 //printf ("%d ", srcindex); mono_print_ins (ins);
10975 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10977 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10978 if (var->dreg == prev_dreg) {
10980 * sreg refers to the value loaded by the load
10981 * emitted below, but we need to use ins->dreg
10982 * since it refers to the store emitted earlier.
10986 g_assert (sreg != -1);
10987 vreg_to_lvreg [var->dreg] = sreg;
10988 g_assert (lvregs_len < 1024);
10989 lvregs [lvregs_len ++] = var->dreg;
10993 sregs [srcindex] = sreg;
10994 //mono_inst_set_src_registers (ins, sregs);
10996 if (regtype == 'l') {
10997 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10998 mono_bblock_insert_before_ins (bb, ins, load_ins);
10999 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11000 mono_bblock_insert_before_ins (bb, ins, load_ins);
11001 use_ins = load_ins;
11004 #if SIZEOF_REGISTER == 4
11005 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11007 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11008 mono_bblock_insert_before_ins (bb, ins, load_ins);
11009 use_ins = load_ins;
11013 if (var->dreg < orig_next_vreg) {
11014 live_range_end [var->dreg] = use_ins;
11015 live_range_end_bb [var->dreg] = bb;
11019 mono_inst_set_src_registers (ins, sregs);
11021 if (dest_has_lvreg) {
11022 g_assert (ins->dreg != -1);
11023 vreg_to_lvreg [prev_dreg] = ins->dreg;
11024 g_assert (lvregs_len < 1024);
11025 lvregs [lvregs_len ++] = prev_dreg;
11026 dest_has_lvreg = FALSE;
11030 tmp_reg = ins->dreg;
11031 ins->dreg = ins->sreg2;
11032 ins->sreg2 = tmp_reg;
11035 if (MONO_IS_CALL (ins)) {
11036 /* Clear vreg_to_lvreg array */
11037 for (i = 0; i < lvregs_len; i++)
11038 vreg_to_lvreg [lvregs [i]] = 0;
11040 } else if (ins->opcode == OP_NOP) {
11042 MONO_INST_NULLIFY_SREGS (ins);
11045 if (cfg->verbose_level > 2)
11046 mono_print_ins_index (1, ins);
11049 /* Extend the live range based on the liveness info */
11050 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11051 for (i = 0; i < cfg->num_varinfo; i ++) {
11052 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11054 if (vreg_is_volatile (cfg, vi->vreg))
11055 /* The liveness info is incomplete */
11058 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11059 /* Live from at least the first ins of this bb */
11060 live_range_start [vi->vreg] = bb->code;
11061 live_range_start_bb [vi->vreg] = bb;
11064 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11065 /* Live at least until the last ins of this bb */
11066 live_range_end [vi->vreg] = bb->last_ins;
11067 live_range_end_bb [vi->vreg] = bb;
11073 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11075 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11076 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11078 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11079 for (i = 0; i < cfg->num_varinfo; ++i) {
11080 int vreg = MONO_VARINFO (cfg, i)->vreg;
11083 if (live_range_start [vreg]) {
11084 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11086 ins->inst_c1 = vreg;
11087 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11089 if (live_range_end [vreg]) {
11090 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11092 ins->inst_c1 = vreg;
11093 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11094 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11096 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11102 g_free (live_range_start);
11103 g_free (live_range_end);
11104 g_free (live_range_start_bb);
11105 g_free (live_range_end_bb);
11110 * - use 'iadd' instead of 'int_add'
11111 * - handling ovf opcodes: decompose in method_to_ir.
11112 * - unify iregs/fregs
11113 * -> partly done, the missing parts are:
11114 * - a more complete unification would involve unifying the hregs as well, so
11115 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11116 * would no longer map to the machine hregs, so the code generators would need to
11117 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11118 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11119 * fp/non-fp branches speeds it up by about 15%.
11120 * - use sext/zext opcodes instead of shifts
11122 * - get rid of TEMPLOADs if possible and use vregs instead
11123 * - clean up usage of OP_P/OP_ opcodes
11124 * - cleanup usage of DUMMY_USE
11125 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11127 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11128 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11129 * - make sure handle_stack_args () is called before the branch is emitted
11130 * - when the new IR is done, get rid of all unused stuff
11131 * - COMPARE/BEQ as separate instructions or unify them ?
11132 * - keeping them separate allows specialized compare instructions like
11133 * compare_imm, compare_membase
11134 * - most back ends unify fp compare+branch, fp compare+ceq
11135 * - integrate mono_save_args into inline_method
11136 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11137 * - handle long shift opts on 32 bit platforms somehow: they require
11138 * 3 sregs (2 for arg1 and 1 for arg2)
11139 * - make byref a 'normal' type.
11140 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11141 * variable if needed.
11142 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11143 * like inline_method.
11144 * - remove inlining restrictions
11145 * - fix LNEG and enable cfold of INEG
11146 * - generalize x86 optimizations like ldelema as a peephole optimization
11147 * - add store_mem_imm for amd64
11148 * - optimize the loading of the interruption flag in the managed->native wrappers
11149 * - avoid special handling of OP_NOP in passes
11150 * - move code inserting instructions into one function/macro.
11151 * - try a coalescing phase after liveness analysis
11152 * - add float -> vreg conversion + local optimizations on !x86
11153 * - figure out how to handle decomposed branches during optimizations, ie.
11154 * compare+branch, op_jump_table+op_br etc.
11155 * - promote RuntimeXHandles to vregs
11156 * - vtype cleanups:
11157 * - add a NEW_VARLOADA_VREG macro
11158 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11159 * accessing vtype fields.
11160 * - get rid of I8CONST on 64 bit platforms
11161 * - dealing with the increase in code size due to branches created during opcode
11163 * - use extended basic blocks
11164 * - all parts of the JIT
11165 * - handle_global_vregs () && local regalloc
11166 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11167 * - sources of increase in code size:
11170 * - isinst and castclass
11171 * - lvregs not allocated to global registers even if used multiple times
11172 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11174 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11175 * - add all micro optimizations from the old JIT
11176 * - put tree optimizations into the deadce pass
11177 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11178 * specific function.
11179 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11180 * fcompare + branchCC.
11181 * - create a helper function for allocating a stack slot, taking into account
11182 * MONO_CFG_HAS_SPILLUP.
11184 * - merge the ia64 switch changes.
11185 * - optimize mono_regstate2_alloc_int/float.
11186 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11187 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11188 * parts of the tree could be separated by other instructions, killing the tree
11189 * arguments, or stores killing loads etc. Also, should we fold loads into other
11190 * instructions if the result of the load is used multiple times ?
11191 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11192 * - LAST MERGE: 108395.
11193 * - when returning vtypes in registers, generate IR and append it to the end of the
11194 * last bb instead of doing it in the epilog.
11195 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11203 - When to decompose opcodes:
11204 - earlier: this makes some optimizations hard to implement, since the low level IR
11205 no longer contains the neccessary information. But it is easier to do.
11206 - later: harder to implement, enables more optimizations.
11207 - Branches inside bblocks:
11208 - created when decomposing complex opcodes.
11209 - branches to another bblock: harmless, but not tracked by the branch
11210 optimizations, so need to branch to a label at the start of the bblock.
11211 - branches to inside the same bblock: very problematic, trips up the local
11212 reg allocator. Can be fixed by spitting the current bblock, but that is a
11213 complex operation, since some local vregs can become global vregs etc.
11214 - Local/global vregs:
11215 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11216 local register allocator.
11217 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11218 structure, created by mono_create_var (). Assigned to hregs or the stack by
11219 the global register allocator.
11220 - When to do optimizations like alu->alu_imm:
11221 - earlier -> saves work later on since the IR will be smaller/simpler
11222 - later -> can work on more instructions
11223 - Handling of valuetypes:
11224 - When a vtype is pushed on the stack, a new temporary is created, an
11225 instruction computing its address (LDADDR) is emitted and pushed on
11226 the stack. Need to optimize cases when the vtype is used immediately as in
11227 argument passing, stloc etc.
11228 - Instead of the to_end stuff in the old JIT, simply call the function handling
11229 the values on the stack before emitting the last instruction of the bb.
11232 #endif /* DISABLE_JIT */