2 * mini.c: The new Mono code generator.
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Copyright 2002-2003 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
21 #ifdef HAVE_SYS_TIME_H
25 #include <mono/utils/memcheck.h>
27 #include <mono/metadata/assembly.h>
28 #include <mono/metadata/loader.h>
29 #include <mono/metadata/tabledefs.h>
30 #include <mono/metadata/class.h>
31 #include <mono/metadata/object.h>
32 #include <mono/metadata/tokentype.h>
33 #include <mono/metadata/tabledefs.h>
34 #include <mono/metadata/threads.h>
35 #include <mono/metadata/appdomain.h>
36 #include <mono/metadata/debug-helpers.h>
37 #include "mono/metadata/profiler.h"
38 #include <mono/metadata/profiler-private.h>
39 #include <mono/metadata/mono-config.h>
40 #include <mono/metadata/environment.h>
41 #include <mono/metadata/mono-debug.h>
42 #include <mono/metadata/gc-internal.h>
43 #include <mono/metadata/threads-types.h>
44 #include <mono/metadata/verify.h>
45 #include <mono/metadata/verify-internals.h>
46 #include <mono/metadata/mempool-internals.h>
47 #include <mono/metadata/attach.h>
48 #include <mono/metadata/runtime.h>
49 #include <mono/utils/mono-math.h>
50 #include <mono/utils/mono-compiler.h>
51 #include <mono/utils/mono-counters.h>
52 #include <mono/utils/mono-error-internals.h>
53 #include <mono/utils/mono-logger-internal.h>
54 #include <mono/utils/mono-mmap.h>
55 #include <mono/utils/mono-path.h>
56 #include <mono/utils/mono-tls.h>
57 #include <mono/utils/mono-hwcap.h>
58 #include <mono/utils/dtrace.h>
59 #include <mono/utils/mono-threads.h>
60 #include <mono/io-layer/io-layer.h>
63 #include "seq-points.h"
71 #include "jit-icalls.h"
74 #include "debugger-agent.h"
76 MonoTraceSpec *mono_jit_trace_calls;
77 MonoMethodDesc *mono_inject_async_exc_method;
78 int mono_inject_async_exc_pos;
79 MonoMethodDesc *mono_break_at_bb_method;
80 int mono_break_at_bb_bb_num;
81 gboolean mono_do_x86_stack_align = TRUE;
82 gboolean mono_using_xdebug;
84 #define mono_jit_lock() mono_mutex_lock (&jit_mutex)
85 #define mono_jit_unlock() mono_mutex_unlock (&jit_mutex)
86 static mono_mutex_t jit_mutex;
88 /* Whenever to check for pending exceptions in managed-to-native wrappers */
89 gboolean check_for_pending_exc = TRUE;
92 mono_realloc_native_code (MonoCompile *cfg)
94 #if defined(__default_codegen__)
95 return g_realloc (cfg->native_code, cfg->code_size);
96 #elif defined(__native_client_codegen__)
99 guint alignment_check;
101 /* Save the old alignment offset so we can re-align after the realloc. */
102 old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
103 cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
105 cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
106 cfg->code_size + kNaClAlignment );
108 /* Align native_code to next nearest kNaClAlignment byte. */
109 native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
110 native_code = (guint)native_code & ~kNaClAlignmentMask;
112 /* Shift the data to be 32-byte aligned again. */
113 memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
115 alignment_check = (guint)native_code & kNaClAlignmentMask;
116 g_assert (alignment_check == 0);
119 g_assert_not_reached ();
120 return cfg->native_code;
124 #ifdef __native_client_codegen__
126 /* Prevent instructions from straddling a 32-byte alignment boundary. */
127 /* Instructions longer than 32 bytes must be aligned internally. */
128 /* IN: pcode, instlen */
130 void mono_nacl_align_inst(guint8 **pcode, int instlen) {
133 space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
135 if (G_UNLIKELY (instlen >= kNaClAlignment)) {
136 g_assert_not_reached();
137 } else if (instlen > space_in_block) {
138 *pcode = mono_arch_nacl_pad(*pcode, space_in_block);
142 /* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
143 /* IN: start pointer to start of call sequence */
144 /* IN: pcode pointer to end of call sequence (current "IP") */
145 /* OUT: start pointer to the start of the call sequence after padding */
146 /* OUT: pcode pointer to the end of the call sequence after padding */
147 void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
148 const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
149 guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
152 const size_t length = (size_t)((*pcode)-(*start));
153 g_assert(length < MAX_NACL_CALL_LENGTH);
155 memcpy(copy_of_call, *start, length);
156 temp = mono_nacl_pad_call(*start, (guint8)length);
157 memcpy(temp, copy_of_call, length);
159 (*pcode) = temp + length;
162 /* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
163 /* code pointer to buffer for emitting code */
164 /* ilength length of call instruction */
165 guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
166 int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
167 int padding = freeSpaceInBlock - ilength;
170 /* There isn't enough space in this block for the instruction. */
171 /* Fill this block and start a new one. */
172 code = mono_arch_nacl_pad(code, freeSpaceInBlock);
173 freeSpaceInBlock = kNaClAlignment;
174 padding = freeSpaceInBlock - ilength;
176 g_assert(ilength > 0);
177 g_assert(padding >= 0);
178 g_assert(padding < kNaClAlignment);
179 if (0 == padding) return code;
180 return mono_arch_nacl_pad(code, padding);
183 guint8 *mono_nacl_align(guint8 *code) {
184 int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
185 if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
189 void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
191 #ifndef USE_JUMP_TABLES
192 MonoJumpInfo *patch_info;
193 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
194 unsigned char *ip = patch_info->ip.i + code;
195 ip = mono_arch_nacl_skip_nops(ip);
196 patch_info->ip.i = ip - code;
200 #endif /* __native_client_codegen__ */
203 MonoExceptionClause *clause;
204 MonoBasicBlock *basic_block;
209 * mono_emit_unwind_op:
211 * Add an unwind op with the given parameters for the list of unwind ops stored in
215 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
217 MonoUnwindOp *op = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
224 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
225 if (cfg->verbose_level > 1) {
228 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
230 case DW_CFA_def_cfa_register:
231 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
233 case DW_CFA_def_cfa_offset:
234 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
237 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
243 #define MONO_INIT_VARINFO(vi,id) do { \
244 (vi)->range.first_use.pos.bid = 0xffff; \
250 * mono_unlink_bblock:
252 * Unlink two basic blocks.
255 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
261 for (i = 0; i < from->out_count; ++i) {
262 if (to == from->out_bb [i]) {
269 for (i = 0; i < from->out_count; ++i) {
270 if (from->out_bb [i] != to)
271 from->out_bb [pos ++] = from->out_bb [i];
273 g_assert (pos == from->out_count - 1);
278 for (i = 0; i < to->in_count; ++i) {
279 if (from == to->in_bb [i]) {
286 for (i = 0; i < to->in_count; ++i) {
287 if (to->in_bb [i] != from)
288 to->in_bb [pos ++] = to->in_bb [i];
290 g_assert (pos == to->in_count - 1);
296 * mono_bblocks_linked:
298 * Return whenever BB1 and BB2 are linked in the CFG.
301 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
305 for (i = 0; i < bb1->out_count; ++i) {
306 if (bb1->out_bb [i] == bb2)
314 mono_find_block_region_notry (MonoCompile *cfg, int offset)
316 MonoMethodHeader *header = cfg->header;
317 MonoExceptionClause *clause;
320 for (i = 0; i < header->num_clauses; ++i) {
321 clause = &header->clauses [i];
322 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
323 (offset < (clause->handler_offset)))
324 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
326 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
327 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
328 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
329 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
330 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
332 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
340 * mono_get_block_region_notry:
342 * Return the region corresponding to REGION, ignoring try clauses nested inside
346 mono_get_block_region_notry (MonoCompile *cfg, int region)
348 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
349 MonoMethodHeader *header = cfg->header;
352 * This can happen if a try clause is nested inside a finally clause.
354 int clause_index = (region >> 8) - 1;
355 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
357 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
364 mono_find_spvar_for_region (MonoCompile *cfg, int region)
366 region = mono_get_block_region_notry (cfg, region);
368 return g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
372 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
376 array [*dfn] = start;
377 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
378 for (i = 0; i < start->out_count; ++i) {
379 if (start->out_bb [i]->dfn)
382 start->out_bb [i]->dfn = *dfn;
383 start->out_bb [i]->df_parent = start;
384 array [*dfn] = start->out_bb [i];
385 df_visit (start->out_bb [i], dfn, array);
390 mono_reverse_branch_op (guint32 opcode)
392 static const int reverse_map [] = {
393 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
394 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
396 static const int reverse_fmap [] = {
397 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
398 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
400 static const int reverse_lmap [] = {
401 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
402 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
404 static const int reverse_imap [] = {
405 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
406 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
409 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
410 opcode = reverse_map [opcode - CEE_BEQ];
411 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
412 opcode = reverse_fmap [opcode - OP_FBEQ];
413 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
414 opcode = reverse_lmap [opcode - OP_LBEQ];
415 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
416 opcode = reverse_imap [opcode - OP_IBEQ];
418 g_assert_not_reached ();
424 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
426 type = mini_get_underlying_type (cfg, type);
429 switch (type->type) {
432 return OP_STOREI1_MEMBASE_REG;
435 return OP_STOREI2_MEMBASE_REG;
438 return OP_STOREI4_MEMBASE_REG;
442 case MONO_TYPE_FNPTR:
443 return OP_STORE_MEMBASE_REG;
444 case MONO_TYPE_CLASS:
445 case MONO_TYPE_STRING:
446 case MONO_TYPE_OBJECT:
447 case MONO_TYPE_SZARRAY:
448 case MONO_TYPE_ARRAY:
449 return OP_STORE_MEMBASE_REG;
452 return OP_STOREI8_MEMBASE_REG;
454 return OP_STORER4_MEMBASE_REG;
456 return OP_STORER8_MEMBASE_REG;
457 case MONO_TYPE_VALUETYPE:
458 if (type->data.klass->enumtype) {
459 type = mono_class_enum_basetype (type->data.klass);
462 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
463 return OP_STOREX_MEMBASE;
464 return OP_STOREV_MEMBASE;
465 case MONO_TYPE_TYPEDBYREF:
466 return OP_STOREV_MEMBASE;
467 case MONO_TYPE_GENERICINST:
468 type = &type->data.generic_class->container_class->byval_arg;
472 g_assert (mini_type_var_is_vt (cfg, type));
473 return OP_STOREV_MEMBASE;
475 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
481 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
483 type = mini_get_underlying_type (cfg, type);
485 switch (type->type) {
487 return OP_LOADI1_MEMBASE;
489 return OP_LOADU1_MEMBASE;
491 return OP_LOADI2_MEMBASE;
493 return OP_LOADU2_MEMBASE;
495 return OP_LOADI4_MEMBASE;
497 return OP_LOADU4_MEMBASE;
501 case MONO_TYPE_FNPTR:
502 return OP_LOAD_MEMBASE;
503 case MONO_TYPE_CLASS:
504 case MONO_TYPE_STRING:
505 case MONO_TYPE_OBJECT:
506 case MONO_TYPE_SZARRAY:
507 case MONO_TYPE_ARRAY:
508 return OP_LOAD_MEMBASE;
511 return OP_LOADI8_MEMBASE;
513 return OP_LOADR4_MEMBASE;
515 return OP_LOADR8_MEMBASE;
516 case MONO_TYPE_VALUETYPE:
517 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
518 return OP_LOADX_MEMBASE;
519 case MONO_TYPE_TYPEDBYREF:
520 return OP_LOADV_MEMBASE;
521 case MONO_TYPE_GENERICINST:
522 if (mono_type_generic_inst_is_valuetype (type))
523 return OP_LOADV_MEMBASE;
525 return OP_LOAD_MEMBASE;
529 g_assert (cfg->generic_sharing_context);
530 g_assert (mini_type_var_is_vt (cfg, type));
531 return OP_LOADV_MEMBASE;
533 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
539 mini_type_to_ldind (MonoCompile* cfg, MonoType *type)
541 type = mini_get_underlying_type (cfg, type);
542 if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
543 g_assert (mini_type_var_is_vt (cfg, type));
546 return mono_type_to_ldind (type);
552 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
554 type = mini_get_underlying_type (cfg, type);
555 if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
556 g_assert (mini_type_var_is_vt (cfg, type));
559 return mono_type_to_stind (type);
563 mono_op_imm_to_op (int opcode)
567 #if SIZEOF_REGISTER == 4
583 #if SIZEOF_REGISTER == 4
589 #if SIZEOF_REGISTER == 4
595 #if SIZEOF_REGISTER == 4
635 #if SIZEOF_REGISTER == 4
641 #if SIZEOF_REGISTER == 4
660 case OP_ICOMPARE_IMM:
662 case OP_LOCALLOC_IMM:
665 printf ("%s\n", mono_inst_name (opcode));
666 g_assert_not_reached ();
672 * mono_decompose_op_imm:
674 * Replace the OP_.._IMM INS with its non IMM variant.
677 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
681 MONO_INST_NEW (cfg, temp, OP_ICONST);
682 temp->inst_c0 = ins->inst_imm;
683 temp->dreg = mono_alloc_ireg (cfg);
684 mono_bblock_insert_before_ins (bb, ins, temp);
685 ins->opcode = mono_op_imm_to_op (ins->opcode);
686 if (ins->opcode == OP_LOCALLOC)
687 ins->sreg1 = temp->dreg;
689 ins->sreg2 = temp->dreg;
691 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
697 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
699 if (vreg >= cfg->vreg_to_inst_len) {
700 MonoInst **tmp = cfg->vreg_to_inst;
701 int size = cfg->vreg_to_inst_len;
703 while (vreg >= cfg->vreg_to_inst_len)
704 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
705 cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
707 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
709 cfg->vreg_to_inst [vreg] = inst;
712 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
713 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
718 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
726 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
729 int num = cfg->num_varinfo;
732 type = mini_get_underlying_type (cfg, type);
734 if ((num + 1) >= cfg->varinfo_count) {
735 int orig_count = cfg->varinfo_count;
736 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
737 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
738 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
739 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
742 cfg->stat_allocate_var++;
744 MONO_INST_NEW (cfg, inst, opcode);
746 inst->inst_vtype = type;
747 inst->klass = mono_class_from_mono_type (type);
748 type_to_eval_stack_type (cfg, type, inst);
749 /* if set to 1 the variable is native */
750 inst->backend.is_pinvoke = 0;
753 if (inst->klass->exception_type)
754 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
756 if (cfg->compute_gc_maps) {
758 mono_mark_vreg_as_mp (cfg, vreg);
760 if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (cfg, type)) {
761 inst->flags |= MONO_INST_GC_TRACK;
762 mono_mark_vreg_as_ref (cfg, vreg);
767 cfg->varinfo [num] = inst;
769 MONO_INIT_VARINFO (&cfg->vars [num], num);
770 MONO_VARINFO (cfg, num)->vreg = vreg;
773 set_vreg_to_inst (cfg, vreg, inst);
775 #if SIZEOF_REGISTER == 4
776 if (mono_arch_is_soft_float ()) {
777 regpair = mono_type_is_long (type) || mono_type_is_float (type);
779 regpair = mono_type_is_long (type);
789 * These two cannot be allocated using create_var_for_vreg since that would
790 * put it into the cfg->varinfo array, confusing many parts of the JIT.
794 * Set flags to VOLATILE so SSA skips it.
797 if (cfg->verbose_level >= 4) {
798 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2);
801 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
802 if (mono_type_is_float (type))
803 inst->flags = MONO_INST_VOLATILE;
806 /* Allocate a dummy MonoInst for the first vreg */
807 MONO_INST_NEW (cfg, tree, OP_LOCAL);
808 tree->dreg = inst->dreg + 1;
809 if (cfg->opt & MONO_OPT_SSA)
810 tree->flags = MONO_INST_VOLATILE;
812 tree->type = STACK_I4;
813 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
814 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
816 set_vreg_to_inst (cfg, inst->dreg + 1, tree);
818 /* Allocate a dummy MonoInst for the second vreg */
819 MONO_INST_NEW (cfg, tree, OP_LOCAL);
820 tree->dreg = inst->dreg + 2;
821 if (cfg->opt & MONO_OPT_SSA)
822 tree->flags = MONO_INST_VOLATILE;
824 tree->type = STACK_I4;
825 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
826 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
828 set_vreg_to_inst (cfg, inst->dreg + 2, tree);
832 if (cfg->verbose_level > 2)
833 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
838 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
841 type = mini_get_underlying_type (cfg, type);
843 if (mono_type_is_long (type))
844 dreg = mono_alloc_dreg (cfg, STACK_I8);
845 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
846 dreg = mono_alloc_dreg (cfg, STACK_R8);
848 /* All the others are unified */
849 dreg = mono_alloc_preg (cfg);
851 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
855 * Transform a MonoInst into a load from the variable of index var_index.
858 mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index)
860 memset (dest, 0, sizeof (MonoInst));
861 dest->inst_i0 = cfg->varinfo [var_index];
862 dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype);
863 type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest);
864 dest->klass = dest->inst_i0->klass;
868 mini_get_int_to_float_spill_area (MonoCompile *cfg)
871 if (!cfg->iconv_raw_var) {
872 cfg->iconv_raw_var = mono_compile_create_var (cfg, &mono_defaults.int32_class->byval_arg, OP_LOCAL);
873 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
875 return cfg->iconv_raw_var;
884 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
886 if (vreg >= cfg->vreg_is_ref_len) {
887 gboolean *tmp = cfg->vreg_is_ref;
888 int size = cfg->vreg_is_ref_len;
890 while (vreg >= cfg->vreg_is_ref_len)
891 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
892 cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
894 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
896 cfg->vreg_is_ref [vreg] = TRUE;
900 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
902 if (vreg >= cfg->vreg_is_mp_len) {
903 gboolean *tmp = cfg->vreg_is_mp;
904 int size = cfg->vreg_is_mp_len;
906 while (vreg >= cfg->vreg_is_mp_len)
907 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
908 cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
910 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
912 cfg->vreg_is_mp [vreg] = TRUE;
916 type_from_stack_type (MonoInst *ins) {
918 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
919 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
920 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
921 case STACK_R8: return &mono_defaults.double_class->byval_arg;
924 * this if used to be commented without any specific reason, but
925 * it breaks #80235 when commented
928 return &ins->klass->this_arg;
930 return &mono_defaults.object_class->this_arg;
932 /* ins->klass may not be set for ldnull.
933 * Also, if we have a boxed valuetype, we want an object lass,
934 * not the valuetype class
936 if (ins->klass && !ins->klass->valuetype)
937 return &ins->klass->byval_arg;
938 return &mono_defaults.object_class->byval_arg;
939 case STACK_VTYPE: return &ins->klass->byval_arg;
941 g_error ("stack type %d to montype not handled\n", ins->type);
947 mono_type_from_stack_type (MonoInst *ins) {
948 return type_from_stack_type (ins);
952 * mono_add_ins_to_end:
954 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
957 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
962 MONO_ADD_INS (bb, inst);
966 switch (bb->last_ins->opcode) {
980 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
983 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
984 /* Need to insert the ins before the compare */
985 if (bb->code == bb->last_ins) {
986 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
990 if (bb->code->next == bb->last_ins) {
991 /* Only two instructions */
992 opcode = bb->code->opcode;
994 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
996 mono_bblock_insert_before_ins (bb, bb->code, inst);
998 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1001 opcode = bb->last_ins->prev->opcode;
1003 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
1005 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
1007 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1012 MONO_ADD_INS (bb, inst);
1018 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
1020 MonoJumpInfo *ji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
1021 MonoJumpInfoBBTable *table;
1023 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
1025 table->table_size = num_blocks;
1027 ji->ip.label = label;
1028 ji->type = MONO_PATCH_INFO_SWITCH;
1029 ji->data.table = table;
1030 ji->next = cfg->patch_info;
1031 cfg->patch_info = ji;
1034 static MonoMethodSignature *
1035 mono_get_array_new_va_signature (int arity)
1037 static GHashTable *sighash;
1038 MonoMethodSignature *res;
1043 sighash = g_hash_table_new (NULL, NULL);
1045 else if ((res = g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
1050 res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
1053 if (ARCH_VARARG_ICALLS)
1054 /* Only set this only some archs since not all backends can handle varargs+pinvoke */
1055 res->call_convention = MONO_CALL_VARARG;
1058 res->call_convention = MONO_CALL_C;
1061 res->params [0] = &mono_defaults.int_class->byval_arg;
1062 for (i = 0; i < arity; i++)
1063 res->params [i + 1] = &mono_defaults.int_class->byval_arg;
1065 res->ret = &mono_defaults.object_class->byval_arg;
1067 g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
1074 mono_get_array_new_va_icall (int rank)
1076 MonoMethodSignature *esig;
1077 char icall_name [256];
1079 MonoJitICallInfo *info;
1081 /* Need to register the icall so it gets an icall wrapper */
1082 sprintf (icall_name, "ves_array_new_va_%d", rank);
1085 info = mono_find_jit_icall_by_name (icall_name);
1087 esig = mono_get_array_new_va_signature (rank);
1088 name = g_strdup (icall_name);
1089 info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
1097 mini_class_is_system_array (MonoClass *klass)
1099 if (klass->parent == mono_defaults.array_class)
1106 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
1108 MonoAssembly *assembly = method->klass->image->assembly;
1109 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
1111 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
1113 if (mono_security_enabled ())
1115 return mono_assembly_has_skip_verification (assembly);
1119 * mini_method_verify:
1121 * Verify the method using the new verfier.
1123 * Returns true if the method is invalid.
1126 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1129 gboolean is_fulltrust;
1130 MonoLoaderError *error;
1132 if (method->verification_success)
1135 if (!mono_verifier_is_enabled_for_method (method))
1138 /*skip verification implies the assembly must be */
1139 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
1141 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
1143 if ((error = mono_loader_get_last_error ())) {
1145 cfg->exception_type = error->exception_type;
1147 mono_loader_clear_error ();
1149 mono_free_verify_list (res);
1154 for (tmp = res; tmp; tmp = tmp->next) {
1155 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
1156 if (info->info.status == MONO_VERIFY_ERROR) {
1158 char *method_name = mono_method_full_name (method, TRUE);
1159 cfg->exception_type = info->exception_type;
1160 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1161 g_free (method_name);
1163 mono_free_verify_list (res);
1166 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
1168 char *method_name = mono_method_full_name (method, TRUE);
1169 cfg->exception_type = info->exception_type;
1170 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1171 g_free (method_name);
1173 mono_free_verify_list (res);
1177 mono_free_verify_list (res);
1179 method->verification_success = 1;
1183 /*Returns true if something went wrong*/
1185 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1187 MonoMethod *method_definition = method;
1188 gboolean dont_verify = method->klass->image->assembly->corlib_internal;
1190 while (method_definition->is_inflated) {
1191 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1192 method_definition = imethod->declaring;
1195 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1199 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1201 if (!domain_jit_info (domain)->dynamic_code_hash)
1202 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1203 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1206 static MonoJitDynamicMethodInfo*
1207 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1209 MonoJitDynamicMethodInfo *res;
1211 if (domain_jit_info (domain)->dynamic_code_hash)
1212 res = g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1220 GList *active, *inactive;
1225 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1227 MonoMethodVar *v1 = (MonoMethodVar*)a;
1228 MonoMethodVar *v2 = (MonoMethodVar*)b;
1232 else if (v1->interval->range && v2->interval->range)
1233 return v1->interval->range->from - v2->interval->range->from;
1234 else if (v1->interval->range)
1243 #define LSCAN_DEBUG(a) do { a; } while (0)
1245 #define LSCAN_DEBUG(a)
1249 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1251 int i, slot, offset, size;
1256 GList *vars = NULL, *l, *unhandled;
1257 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1260 gboolean reuse_slot;
1262 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1264 scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1265 vtype_stack_slots = NULL;
1268 offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1269 for (i = 0; i < cfg->num_varinfo; ++i)
1272 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1273 inst = cfg->varinfo [i];
1274 vmv = MONO_VARINFO (cfg, i);
1276 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1279 vars = g_list_prepend (vars, vmv);
1282 vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
1287 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1288 MonoMethodVar *current = unhandled->data;
1290 if (current->interval->range) {
1291 g_assert (current->interval->range->from >= i);
1292 i = current->interval->range->from;
1299 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1300 MonoMethodVar *current = unhandled->data;
1303 inst = cfg->varinfo [vmv->idx];
1305 t = mono_type_get_underlying_type (inst->inst_vtype);
1306 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1309 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1310 * pinvoke wrappers when they call functions returning structures */
1311 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1312 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1317 size = mini_type_stack_size (NULL, t, &ialign);
1320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1325 if (cfg->disable_reuse_stack_slots)
1328 t = mini_get_underlying_type (cfg, t);
1330 case MONO_TYPE_GENERICINST:
1331 if (!mono_type_generic_inst_is_valuetype (t)) {
1332 slot_info = &scalar_stack_slots [t->type];
1336 case MONO_TYPE_VALUETYPE:
1337 if (!vtype_stack_slots)
1338 vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1339 for (i = 0; i < nvtypes; ++i)
1340 if (t->data.klass == vtype_stack_slots [i].vtype)
1343 slot_info = &vtype_stack_slots [i];
1345 g_assert (nvtypes < 256);
1346 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1347 slot_info = &vtype_stack_slots [nvtypes];
1350 if (cfg->disable_reuse_ref_stack_slots)
1357 #if SIZEOF_VOID_P == 4
1362 if (cfg->disable_ref_noref_stack_slot_share) {
1363 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1368 case MONO_TYPE_CLASS:
1369 case MONO_TYPE_OBJECT:
1370 case MONO_TYPE_ARRAY:
1371 case MONO_TYPE_SZARRAY:
1372 case MONO_TYPE_STRING:
1373 /* Share non-float stack slots of the same size */
1374 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1375 if (cfg->disable_reuse_ref_stack_slots)
1380 slot_info = &scalar_stack_slots [t->type];
1384 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1388 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1390 if (!current->interval->range) {
1391 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1395 inst->flags |= MONO_INST_IS_DEAD;
1400 pos = current->interval->range->from;
1402 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1403 if (current->interval->range)
1404 LSCAN_DEBUG (mono_linterval_print (current->interval));
1405 LSCAN_DEBUG (printf ("\n"));
1407 /* Check for intervals in active which expired or inactive */
1409 /* FIXME: Optimize this */
1412 for (l = slot_info->active; l != NULL; l = l->next) {
1413 MonoMethodVar *v = (MonoMethodVar*)l->data;
1415 if (v->interval->last_range->to < pos) {
1416 slot_info->active = g_list_delete_link (slot_info->active, l);
1417 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1418 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1422 else if (!mono_linterval_covers (v->interval, pos)) {
1423 slot_info->inactive = g_list_append (slot_info->inactive, v);
1424 slot_info->active = g_list_delete_link (slot_info->active, l);
1425 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1432 /* Check for intervals in inactive which expired or active */
1434 /* FIXME: Optimize this */
1437 for (l = slot_info->inactive; l != NULL; l = l->next) {
1438 MonoMethodVar *v = (MonoMethodVar*)l->data;
1440 if (v->interval->last_range->to < pos) {
1441 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1442 // FIXME: Enabling this seems to cause impossible to debug crashes
1443 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1444 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1448 else if (mono_linterval_covers (v->interval, pos)) {
1449 slot_info->active = g_list_append (slot_info->active, v);
1450 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1451 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1459 * This also handles the case when the variable is used in an
1460 * exception region, as liveness info is not computed there.
1463 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1466 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1467 if (slot_info->slots) {
1468 slot = GPOINTER_TO_INT (slot_info->slots->data);
1470 slot_info->slots = slot_info->slots->next;
1473 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1475 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1481 static int count = 0;
1484 if (count == atoi (g_getenv ("COUNT3")))
1485 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1486 if (count > atoi (g_getenv ("COUNT3")))
1489 mono_print_ins (inst);
1494 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1496 if (inst->flags & MONO_INST_LMF) {
1497 size = sizeof (MonoLMF);
1498 align = sizeof (mgreg_t);
1505 if (slot == 0xffffff) {
1507 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1508 * efficient copying (and to work around the fact that OP_MEMCPY
1509 * and OP_MEMSET ignores alignment).
1511 if (MONO_TYPE_ISSTRUCT (t)) {
1512 align = MAX (align, sizeof (gpointer));
1513 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1518 offset += align - 1;
1519 offset &= ~(align - 1);
1523 offset += align - 1;
1524 offset &= ~(align - 1);
1529 if (*stack_align == 0)
1530 *stack_align = align;
1533 offsets [vmv->idx] = slot;
1536 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1537 if (scalar_stack_slots [i].active)
1538 g_list_free (scalar_stack_slots [i].active);
1540 for (i = 0; i < nvtypes; ++i) {
1541 if (vtype_stack_slots [i].active)
1542 g_list_free (vtype_stack_slots [i].active);
1545 cfg->stat_locals_stack_size += offset;
1547 *stack_size = offset;
1552 * mono_allocate_stack_slots:
1554 * Allocate stack slots for all non register allocated variables using a
1555 * linear scan algorithm.
1556 * Returns: an array of stack offsets.
1557 * STACK_SIZE is set to the amount of stack space needed.
1558 * STACK_ALIGN is set to the alignment needed by the locals area.
1561 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1563 int i, slot, offset, size;
1568 GList *vars = NULL, *l;
1569 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1572 gboolean reuse_slot;
1574 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1575 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1577 scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1578 vtype_stack_slots = NULL;
1581 offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1582 for (i = 0; i < cfg->num_varinfo; ++i)
1585 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1586 inst = cfg->varinfo [i];
1587 vmv = MONO_VARINFO (cfg, i);
1589 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1592 vars = g_list_prepend (vars, vmv);
1595 vars = mono_varlist_sort (cfg, vars, 0);
1597 *stack_align = sizeof(mgreg_t);
1598 for (l = vars; l; l = l->next) {
1600 inst = cfg->varinfo [vmv->idx];
1602 t = mono_type_get_underlying_type (inst->inst_vtype);
1603 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1606 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1607 * pinvoke wrappers when they call functions returning structures */
1608 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1609 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1613 size = mini_type_stack_size (NULL, t, &ialign);
1616 if (mono_class_from_mono_type (t)->exception_type)
1617 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1619 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1624 if (cfg->disable_reuse_stack_slots)
1627 t = mini_get_underlying_type (cfg, t);
1629 case MONO_TYPE_GENERICINST:
1630 if (!mono_type_generic_inst_is_valuetype (t)) {
1631 slot_info = &scalar_stack_slots [t->type];
1635 case MONO_TYPE_VALUETYPE:
1636 if (!vtype_stack_slots)
1637 vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1638 for (i = 0; i < nvtypes; ++i)
1639 if (t->data.klass == vtype_stack_slots [i].vtype)
1642 slot_info = &vtype_stack_slots [i];
1644 g_assert (nvtypes < 256);
1645 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1646 slot_info = &vtype_stack_slots [nvtypes];
1649 if (cfg->disable_reuse_ref_stack_slots)
1656 #if SIZEOF_VOID_P == 4
1661 if (cfg->disable_ref_noref_stack_slot_share) {
1662 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1667 case MONO_TYPE_CLASS:
1668 case MONO_TYPE_OBJECT:
1669 case MONO_TYPE_ARRAY:
1670 case MONO_TYPE_SZARRAY:
1671 case MONO_TYPE_STRING:
1672 /* Share non-float stack slots of the same size */
1673 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1674 if (cfg->disable_reuse_ref_stack_slots)
1678 case MONO_TYPE_MVAR:
1679 slot_info = &scalar_stack_slots [t->type];
1682 slot_info = &scalar_stack_slots [t->type];
1687 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1688 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1690 /* expire old intervals in active */
1691 while (slot_info->active) {
1692 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1694 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1697 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1699 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1700 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1704 * This also handles the case when the variable is used in an
1705 * exception region, as liveness info is not computed there.
1708 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1711 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1712 if (slot_info->slots) {
1713 slot = GPOINTER_TO_INT (slot_info->slots->data);
1715 slot_info->slots = slot_info->slots->next;
1718 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1723 static int count = 0;
1727 if (count == atoi (g_getenv ("COUNT")))
1728 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1729 if (count > atoi (g_getenv ("COUNT")))
1732 mono_print_ins (inst);
1737 if (inst->flags & MONO_INST_LMF) {
1739 * This variable represents a MonoLMF structure, which has no corresponding
1740 * CLR type, so hard-code its size/alignment.
1742 size = sizeof (MonoLMF);
1743 align = sizeof (mgreg_t);
1750 if (slot == 0xffffff) {
1752 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1753 * efficient copying (and to work around the fact that OP_MEMCPY
1754 * and OP_MEMSET ignores alignment).
1756 if (MONO_TYPE_ISSTRUCT (t)) {
1757 align = MAX (align, sizeof (gpointer));
1758 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1760 * Align the size too so the code generated for passing vtypes in
1761 * registers doesn't overwrite random locals.
1763 size = (size + (align - 1)) & ~(align -1);
1768 offset += align - 1;
1769 offset &= ~(align - 1);
1773 offset += align - 1;
1774 offset &= ~(align - 1);
1779 *stack_align = MAX (*stack_align, align);
1782 offsets [vmv->idx] = slot;
1785 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1786 if (scalar_stack_slots [i].active)
1787 g_list_free (scalar_stack_slots [i].active);
1789 for (i = 0; i < nvtypes; ++i) {
1790 if (vtype_stack_slots [i].active)
1791 g_list_free (vtype_stack_slots [i].active);
1794 cfg->stat_locals_stack_size += offset;
1796 *stack_size = offset;
1803 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1805 g_assert_not_reached ();
1809 #endif /* DISABLE_JIT */
1811 #define EMUL_HIT_SHIFT 3
1812 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1813 /* small hit bitmap cache */
1814 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1815 static short emul_opcode_num = 0;
1816 static short emul_opcode_alloced = 0;
1817 static short *emul_opcode_opcodes;
1818 static MonoJitICallInfo **emul_opcode_map;
1821 mono_find_jit_opcode_emulation (int opcode)
1823 g_assert (opcode >= 0 && opcode <= OP_LAST);
1824 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1826 for (i = 0; i < emul_opcode_num; ++i) {
1827 if (emul_opcode_opcodes [i] == opcode)
1828 return emul_opcode_map [i];
1835 mini_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_throw)
1837 MonoJitICallInfo *info;
1838 MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
1840 g_assert (!sig->hasthis);
1841 g_assert (sig->param_count < 3);
1843 /* Opcode emulation functions are assumed to don't call mono_raise_exception () */
1844 info = mono_register_jit_icall_full (func, name, sig, no_throw, TRUE, symbol);
1846 if (emul_opcode_num >= emul_opcode_alloced) {
1847 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1848 emul_opcode_alloced += incr;
1849 emul_opcode_map = g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1850 emul_opcode_opcodes = g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1852 emul_opcode_map [emul_opcode_num] = info;
1853 emul_opcode_opcodes [emul_opcode_num] = opcode;
1855 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1859 print_dfn (MonoCompile *cfg) {
1866 char *method_name = mono_method_full_name (cfg->method, TRUE);
1867 g_print ("IR code for method %s\n", method_name);
1868 g_free (method_name);
1871 for (i = 0; i < cfg->num_bblocks; ++i) {
1872 bb = cfg->bblocks [i];
1873 /*if (bb->cil_code) {
1874 char* code1, *code2;
1875 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1876 if (bb->last_ins->cil_code)
1877 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1879 code2 = g_strdup ("");
1881 code1 [strlen (code1) - 1] = 0;
1882 code = g_strdup_printf ("%s -> %s", code1, code2);
1886 code = g_strdup ("\n");
1887 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1888 MONO_BB_FOR_EACH_INS (bb, c) {
1889 mono_print_ins_index (-1, c);
1892 g_print ("\tprev:");
1893 for (j = 0; j < bb->in_count; ++j) {
1894 g_print (" BB%d", bb->in_bb [j]->block_num);
1896 g_print ("\t\tsucc:");
1897 for (j = 0; j < bb->out_count; ++j) {
1898 g_print (" BB%d", bb->out_bb [j]->block_num);
1900 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1903 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1906 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
1908 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
1916 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
1918 MONO_ADD_INS (bb, inst);
1922 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1926 bb->code = ins_to_insert;
1928 /* Link with next */
1929 ins_to_insert->next = ins;
1931 ins->prev = ins_to_insert;
1933 if (bb->last_ins == NULL)
1934 bb->last_ins = ins_to_insert;
1936 /* Link with next */
1937 ins_to_insert->next = ins->next;
1939 ins->next->prev = ins_to_insert;
1941 /* Link with previous */
1942 ins->next = ins_to_insert;
1943 ins_to_insert->prev = ins;
1945 if (bb->last_ins == ins)
1946 bb->last_ins = ins_to_insert;
1951 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1956 ins->prev = ins_to_insert;
1957 bb->code = ins_to_insert;
1958 ins_to_insert->next = ins;
1959 if (bb->last_ins == NULL)
1960 bb->last_ins = ins_to_insert;
1962 /* Link with previous */
1964 ins->prev->next = ins_to_insert;
1965 ins_to_insert->prev = ins->prev;
1967 /* Link with next */
1968 ins->prev = ins_to_insert;
1969 ins_to_insert->next = ins;
1971 if (bb->code == ins)
1972 bb->code = ins_to_insert;
1977 * mono_verify_bblock:
1979 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1982 mono_verify_bblock (MonoBasicBlock *bb)
1984 MonoInst *ins, *prev;
1987 for (ins = bb->code; ins; ins = ins->next) {
1988 g_assert (ins->prev == prev);
1992 g_assert (!bb->last_ins->next);
1998 * Perform consistency checks on the JIT data structures and the IR
2001 mono_verify_cfg (MonoCompile *cfg)
2005 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2006 mono_verify_bblock (bb);
2010 mono_destroy_compile (MonoCompile *cfg)
2015 mono_metadata_free_mh (cfg->header);
2016 //mono_mempool_stats (cfg->mempool);
2017 mono_free_loop_info (cfg);
2019 mono_regstate_free (cfg->rs);
2021 g_hash_table_destroy (cfg->spvars);
2023 g_hash_table_destroy (cfg->exvars);
2024 for (l = cfg->headers_to_free; l; l = l->next)
2025 mono_metadata_free_mh (l->data);
2026 g_list_free (cfg->ldstr_list);
2027 g_hash_table_destroy (cfg->token_info_hash);
2028 if (cfg->abs_patches)
2029 g_hash_table_destroy (cfg->abs_patches);
2030 mono_mempool_destroy (cfg->mempool);
2032 mono_debug_free_method (cfg);
2034 g_free (cfg->varinfo);
2036 g_free (cfg->exception_message);
2043 mono_create_tls_get_offset (MonoCompile *cfg, int offset)
2047 if (!MONO_ARCH_HAVE_TLS_GET)
2053 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
2054 ins->dreg = mono_alloc_preg (cfg);
2055 ins->inst_offset = offset;
2060 mini_tls_get_supported (MonoCompile *cfg, MonoTlsKey key)
2062 if (!MONO_ARCH_HAVE_TLS_GET)
2065 if (cfg->compile_aot)
2066 return ARCH_HAVE_TLS_GET_REG;
2068 return mini_get_tls_offset (key) != -1;
2072 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
2075 * TLS offsets might be different at AOT time, so load them from a GOT slot and
2076 * use a different opcode.
2078 if (cfg->compile_aot) {
2079 if (MONO_ARCH_HAVE_TLS_GET && ARCH_HAVE_TLS_GET_REG) {
2082 EMIT_NEW_TLS_OFFSETCONST (cfg, c, key);
2083 MONO_INST_NEW (cfg, ins, OP_TLS_GET_REG);
2084 ins->dreg = mono_alloc_preg (cfg);
2085 ins->sreg1 = c->dreg;
2092 return mono_create_tls_get_offset (cfg, mini_get_tls_offset (key));
2096 mono_get_jit_tls_intrinsic (MonoCompile *cfg)
2098 return mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2102 mono_get_domain_intrinsic (MonoCompile* cfg)
2104 return mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
2108 mono_get_thread_intrinsic (MonoCompile* cfg)
2110 return mono_create_tls_get (cfg, TLS_KEY_THREAD);
2114 mono_get_lmf_intrinsic (MonoCompile* cfg)
2116 return mono_create_tls_get (cfg, TLS_KEY_LMF);
2120 mono_get_lmf_addr_intrinsic (MonoCompile* cfg)
2122 return mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
2125 #endif /* !DISABLE_JIT */
2129 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
2131 MonoJumpInfo *ji = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
2135 ji->data.target = target;
2136 ji->next = cfg->patch_info;
2138 cfg->patch_info = ji;
2142 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
2144 MonoJumpInfo *ji = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
2148 ji->relocation = relocation;
2149 ji->data.target = target;
2150 ji->next = cfg->patch_info;
2152 cfg->patch_info = ji;
2156 mono_remove_patch_info (MonoCompile *cfg, int ip)
2158 MonoJumpInfo **ji = &cfg->patch_info;
2161 if ((*ji)->ip.i == ip)
2164 ji = &((*ji)->next);
2169 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
2171 ins->inst_offset = native_offset;
2172 g_ptr_array_add (cfg->seq_points, ins);
2174 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
2175 bb->last_seq_point = ins;
2180 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
2182 MonoDwarfLocListEntry *entry = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
2185 g_assert (offset == 0);
2187 entry->is_reg = is_reg;
2189 entry->offset = offset;
2193 if (var == cfg->args [0])
2194 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
2195 else if (var == cfg->rgctx_var)
2196 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
2202 mono_compile_create_vars (MonoCompile *cfg)
2204 MonoMethodSignature *sig;
2205 MonoMethodHeader *header;
2208 header = cfg->header;
2210 sig = mono_method_signature (cfg->method);
2212 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2213 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2214 /* Inhibit optimizations */
2215 cfg->ret->flags |= MONO_INST_VOLATILE;
2217 if (cfg->verbose_level > 2)
2218 g_print ("creating vars\n");
2220 cfg->args = mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2223 cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG);
2225 for (i = 0; i < sig->param_count; ++i) {
2226 cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2229 if (cfg->verbose_level > 2) {
2231 printf ("\treturn : ");
2232 mono_print_ins (cfg->ret);
2236 printf ("\tthis: ");
2237 mono_print_ins (cfg->args [0]);
2240 for (i = 0; i < sig->param_count; ++i) {
2241 printf ("\targ [%d]: ", i);
2242 mono_print_ins (cfg->args [i + sig->hasthis]);
2246 cfg->locals_start = cfg->num_varinfo;
2247 cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2249 if (cfg->verbose_level > 2)
2250 g_print ("creating locals\n");
2252 for (i = 0; i < header->num_locals; ++i)
2253 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2255 if (cfg->verbose_level > 2)
2256 g_print ("locals done\n");
2258 mono_arch_create_vars (cfg);
2260 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2261 MonoInst *lmf_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2262 lmf_var->flags |= MONO_INST_VOLATILE;
2263 lmf_var->flags |= MONO_INST_LMF;
2264 cfg->lmf_var = lmf_var;
2269 mono_print_code (MonoCompile *cfg, const char* msg)
2273 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2274 mono_print_bb (bb, msg);
2278 mono_postprocess_patches (MonoCompile *cfg)
2280 MonoJumpInfo *patch_info;
2283 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2284 switch (patch_info->type) {
2285 case MONO_PATCH_INFO_ABS: {
2286 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
2289 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2293 //printf ("TEST %s %p\n", info->name, patch_info->data.target);
2294 /* for these array methods we currently register the same function pointer
2295 * since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
2296 * will return the incorrect one depending on the order they are registered.
2297 * See tests/test-arr.cs
2299 if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
2300 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2301 patch_info->data.name = info->name;
2305 if (patch_info->type == MONO_PATCH_INFO_ABS) {
2306 if (cfg->abs_patches) {
2307 MonoJumpInfo *abs_ji = g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2309 patch_info->type = abs_ji->type;
2310 patch_info->data.target = abs_ji->data.target;
2317 case MONO_PATCH_INFO_SWITCH: {
2319 #if defined(__native_client__) && defined(__native_client_codegen__)
2320 /* This memory will leak. */
2321 /* TODO: can we free this when */
2322 /* making the final jump table? */
2323 table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
2325 if (cfg->method->dynamic) {
2326 table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2328 table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
2332 for (i = 0; i < patch_info->data.table->table_size; i++) {
2333 /* Might be NULL if the switch is eliminated */
2334 if (patch_info->data.table->table [i]) {
2335 g_assert (patch_info->data.table->table [i]->native_offset);
2336 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2341 patch_info->data.table->table = (MonoBasicBlock**)table;
2344 case MONO_PATCH_INFO_METHOD_JUMP: {
2345 MonoJumpList *jlist;
2346 MonoDomain *domain = cfg->domain;
2347 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2348 #if defined(__native_client__) && defined(__native_client_codegen__)
2349 /* When this jump target gets evaluated, the method */
2350 /* will be installed in the dynamic code section, */
2351 /* not at the location of cfg->native_code. */
2352 ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
2355 mono_domain_lock (domain);
2356 jlist = g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
2358 jlist = mono_domain_alloc0 (domain, sizeof (MonoJumpList));
2359 g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist);
2361 jlist->list = g_slist_prepend (jlist->list, ip);
2362 mono_domain_unlock (domain);
2373 mono_codegen (MonoCompile *cfg)
2376 int max_epilog_size;
2378 MonoDomain *code_domain;
2380 if (mono_using_xdebug)
2382 * Recent gdb versions have trouble processing symbol files containing
2383 * overlapping address ranges, so allocate all code from the code manager
2384 * of the root domain. (#666152).
2386 code_domain = mono_get_root_domain ();
2388 code_domain = cfg->domain;
2390 #if defined(__native_client_codegen__) && defined(__native_client__)
2393 /* This keeps patch targets from being transformed during
2394 * ordinary method compilation, for local branches and jumps.
2396 nacl_allow_target_modification (FALSE);
2399 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2400 cfg->spill_count = 0;
2401 /* we reuse dfn here */
2402 /* bb->dfn = bb_count++; */
2404 mono_arch_lowering_pass (cfg, bb);
2406 if (cfg->opt & MONO_OPT_PEEPHOLE)
2407 mono_arch_peephole_pass_1 (cfg, bb);
2410 mono_local_regalloc (cfg, bb);
2412 if (cfg->opt & MONO_OPT_PEEPHOLE)
2413 mono_arch_peephole_pass_2 (cfg, bb);
2415 if (cfg->gen_seq_points && !cfg->gen_seq_points_debug_data)
2416 bb_deduplicate_op_il_seq_points (cfg, bb);
2419 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
2420 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, cfg->num_bblocks);
2422 code = mono_arch_emit_prolog (cfg);
2424 cfg->code_len = code - cfg->native_code;
2425 cfg->prolog_end = cfg->code_len;
2426 cfg->cfa_reg = cfg->cur_cfa_reg;
2427 cfg->cfa_offset = cfg->cur_cfa_offset;
2429 mono_debug_open_method (cfg);
2431 /* emit code all basic blocks */
2432 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2433 bb->native_offset = cfg->code_len;
2434 bb->real_native_offset = cfg->code_len;
2435 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2436 mono_arch_output_basic_block (cfg, bb);
2437 bb->native_length = cfg->code_len - bb->native_offset;
2439 if (bb == cfg->bb_exit) {
2440 cfg->epilog_begin = cfg->code_len;
2441 mono_arch_emit_epilog (cfg);
2442 cfg->epilog_end = cfg->code_len;
2446 #ifdef __native_client_codegen__
2447 mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
2449 mono_arch_emit_exceptions (cfg);
2451 max_epilog_size = 0;
2453 /* we always allocate code in cfg->domain->code_mp to increase locality */
2454 cfg->code_size = cfg->code_len + max_epilog_size;
2455 #ifdef __native_client_codegen__
2456 cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
2458 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2460 if (cfg->method->dynamic) {
2461 guint unwindlen = 0;
2462 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2463 unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
2465 /* Allocate the code into a separate memory pool so it can be freed */
2466 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2467 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2468 mono_domain_lock (cfg->domain);
2469 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2470 mono_domain_unlock (cfg->domain);
2472 if (mono_using_xdebug)
2473 /* See the comment for cfg->code_domain */
2474 code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
2476 code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
2478 guint unwindlen = 0;
2479 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2480 unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
2482 code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
2484 #if defined(__native_client_codegen__) && defined(__native_client__)
2485 nacl_allow_target_modification (TRUE);
2489 memcpy (code, cfg->native_code, cfg->code_len);
2490 #if defined(__default_codegen__)
2491 g_free (cfg->native_code);
2492 #elif defined(__native_client_codegen__)
2493 if (cfg->native_code_alloc) {
2494 g_free (cfg->native_code_alloc);
2495 cfg->native_code_alloc = 0;
2497 else if (cfg->native_code) {
2498 g_free (cfg->native_code);
2500 #endif /* __native_client_codegen__ */
2501 cfg->native_code = code;
2502 code = cfg->native_code + cfg->code_len;
2504 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2505 mono_postprocess_patches (cfg);
2507 #ifdef VALGRIND_JIT_REGISTER_MAP
2508 if (valgrind_register){
2509 char* nm = mono_method_full_name (cfg->method, TRUE);
2510 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2515 if (cfg->verbose_level > 0) {
2516 char* nm = mono_method_full_name (cfg->method, TRUE);
2517 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2519 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
2524 gboolean is_generic = FALSE;
2526 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2527 cfg->method->klass->generic_container || cfg->method->klass->generic_class) {
2531 if (cfg->generic_sharing_context)
2532 g_assert (is_generic);
2535 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2536 mono_arch_save_unwind_info (cfg);
2539 #if defined(__native_client_codegen__) && defined(__native_client__)
2540 if (!cfg->compile_aot) {
2541 if (cfg->method->dynamic) {
2542 code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
2544 code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
2549 #if defined(__native_client_codegen__)
2550 mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
2553 mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->dynamic_info ? cfg->dynamic_info->code_mp : NULL, cfg->run_cctors);
2555 if (cfg->method->dynamic) {
2556 if (mono_using_xdebug)
2557 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2559 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2561 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2563 #if defined(__native_client_codegen__) && defined(__native_client__)
2564 cfg->native_code = code_dest;
2566 mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
2568 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2570 mono_debug_close_method (cfg);
2572 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2573 mono_arch_unwindinfo_install_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2578 compute_reachable (MonoBasicBlock *bb)
2582 if (!(bb->flags & BB_VISITED)) {
2583 bb->flags |= BB_VISITED;
2584 for (i = 0; i < bb->out_count; ++i)
2585 compute_reachable (bb->out_bb [i]);
2590 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2593 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2594 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2596 MONO_INST_NEW (cfg, ins, OP_BR);
2597 MONO_ADD_INS (bb, ins);
2598 ins->inst_target_bb = bb->next_bb;
2603 #endif /* #ifndef DISABLE_JIT */
2606 create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
2608 MonoDomain *domain = mono_get_root_domain ();
2613 if (info->uw_info) {
2614 uw_info = info->uw_info;
2615 info_len = info->uw_info_len;
2617 uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
2620 jinfo = mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
2621 jinfo->d.method = wrapper;
2622 jinfo->code_start = info->code;
2623 jinfo->code_size = info->code_size;
2624 jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
2635 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2638 MonoMethodHeader *header;
2640 MonoJitInfoFlags flags = JIT_INFO_NONE;
2641 int num_clauses, num_holes = 0;
2642 guint32 stack_size = 0;
2644 g_assert (method_to_compile == cfg->method);
2645 header = cfg->header;
2647 if (cfg->generic_sharing_context)
2648 flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
2650 if (cfg->arch_eh_jit_info) {
2651 MonoJitArgumentInfo *arg_info;
2652 MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register);
2655 * This cannot be computed during stack walking, as
2656 * mono_arch_get_argument_info () is not signal safe.
2658 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2659 stack_size = mono_arch_get_argument_info (cfg->generic_sharing_context, sig, sig->param_count, arg_info);
2662 flags |= JIT_INFO_HAS_ARCH_EH_INFO;
2665 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2666 flags |= JIT_INFO_HAS_ARCH_EH_INFO;
2668 if (cfg->try_block_holes) {
2669 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2670 TryBlockHole *hole = tmp->data;
2671 MonoExceptionClause *ec = hole->clause;
2672 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2673 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2674 g_assert (clause_last_bb);
2676 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2677 if (clause_last_bb->native_offset != hole_end)
2681 flags |= JIT_INFO_HAS_TRY_BLOCK_HOLES;
2682 if (G_UNLIKELY (cfg->verbose_level >= 4))
2683 printf ("Number of try block holes %d\n", num_holes);
2686 if (mono_security_method_has_declsec (cfg->method_to_register))
2687 flags |= JIT_INFO_HAS_ARCH_EH_INFO;
2689 if (COMPILE_LLVM (cfg))
2690 num_clauses = cfg->llvm_ex_info_len;
2692 num_clauses = header->num_clauses;
2694 if (cfg->method->dynamic)
2695 jinfo = g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2697 jinfo = mono_domain_alloc0 (cfg->domain, mono_jit_info_size (flags, num_clauses, num_holes));
2698 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2699 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2701 if (COMPILE_LLVM (cfg))
2702 jinfo->from_llvm = TRUE;
2704 if (cfg->generic_sharing_context) {
2706 MonoGenericJitInfo *gi;
2707 GSList *loclist = NULL;
2709 gi = mono_jit_info_get_generic_jit_info (jinfo);
2712 if (cfg->method->dynamic)
2713 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2715 gi->generic_sharing_context = mono_domain_alloc0 (cfg->domain, sizeof (MonoGenericSharingContext));
2716 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2718 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2719 mini_method_get_context (method_to_compile)->method_inst ||
2720 method_to_compile->klass->valuetype) {
2721 g_assert (cfg->rgctx_var);
2726 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2727 mini_method_get_context (method_to_compile)->method_inst ||
2728 method_to_compile->klass->valuetype) {
2729 inst = cfg->rgctx_var;
2730 if (!COMPILE_LLVM (cfg))
2731 g_assert (inst->opcode == OP_REGOFFSET);
2732 loclist = cfg->rgctx_loclist;
2734 inst = cfg->args [0];
2735 loclist = cfg->this_loclist;
2739 /* Needed to handle async exceptions */
2743 gi->nlocs = g_slist_length (loclist);
2744 if (cfg->method->dynamic)
2745 gi->locations = g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2747 gi->locations = mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2749 for (l = loclist; l; l = l->next) {
2750 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2755 if (COMPILE_LLVM (cfg)) {
2756 g_assert (cfg->llvm_this_reg != -1);
2757 gi->this_in_reg = 0;
2758 gi->this_reg = cfg->llvm_this_reg;
2759 gi->this_offset = cfg->llvm_this_offset;
2760 } else if (inst->opcode == OP_REGVAR) {
2761 gi->this_in_reg = 1;
2762 gi->this_reg = inst->dreg;
2764 g_assert (inst->opcode == OP_REGOFFSET);
2766 g_assert (inst->inst_basereg == X86_EBP);
2767 #elif defined(TARGET_AMD64)
2768 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2770 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2772 gi->this_in_reg = 0;
2773 gi->this_reg = inst->inst_basereg;
2774 gi->this_offset = inst->inst_offset;
2779 MonoTryBlockHoleTableJitInfo *table;
2782 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2783 table->num_holes = (guint16)num_holes;
2785 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2786 guint32 start_bb_offset;
2787 MonoTryBlockHoleJitInfo *hole;
2788 TryBlockHole *hole_data = tmp->data;
2789 MonoExceptionClause *ec = hole_data->clause;
2790 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2791 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2792 g_assert (clause_last_bb);
2794 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2795 if (clause_last_bb->native_offset == hole_end)
2798 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2799 hole = &table->holes [i++];
2800 hole->clause = hole_data->clause - &header->clauses [0];
2801 hole->offset = (guint32)hole_data->start_offset;
2802 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2804 if (G_UNLIKELY (cfg->verbose_level >= 4))
2805 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2807 g_assert (i == num_holes);
2810 if (jinfo->has_arch_eh_info) {
2811 MonoArchEHJitInfo *info;
2813 info = mono_jit_info_get_arch_eh_info (jinfo);
2815 info->stack_size = stack_size;
2818 if (COMPILE_LLVM (cfg)) {
2820 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2821 } else if (header->num_clauses) {
2824 for (i = 0; i < header->num_clauses; i++) {
2825 MonoExceptionClause *ec = &header->clauses [i];
2826 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2827 MonoBasicBlock *tblock;
2828 MonoInst *exvar, *spvar;
2830 ei->flags = ec->flags;
2833 * The spvars are needed by mono_arch_install_handler_block_guard ().
2835 if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2838 region = ((i + 1) << 8) | MONO_REGION_FINALLY | ec->flags;
2839 spvar = mono_find_spvar_for_region (cfg, region);
2841 ei->exvar_offset = spvar->inst_offset;
2843 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2844 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2847 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2848 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2850 ei->data.filter = cfg->native_code + tblock->native_offset;
2852 ei->data.catch_class = ec->data.catch_class;
2855 tblock = cfg->cil_offset_to_bb [ec->try_offset];
2857 g_assert (tblock->native_offset);
2858 ei->try_start = cfg->native_code + tblock->native_offset;
2859 if (tblock->extend_try_block) {
2861 * Extend the try block backwards to include parts of the previous call
2864 ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
2866 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2868 if (!tblock->native_offset) {
2870 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
2871 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2872 if (bb && bb->native_offset) {
2878 ei->try_end = cfg->native_code + tblock->native_offset;
2879 g_assert (tblock->native_offset);
2880 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
2882 ei->handler_start = cfg->native_code + tblock->native_offset;
2884 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2885 TryBlockHole *hole = tmp->data;
2886 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
2887 if (hole->clause == ec && hole_end == ei->try_end) {
2888 if (G_UNLIKELY (cfg->verbose_level >= 4))
2889 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
2891 ei->try_end = cfg->native_code + hole->start_offset;
2896 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2898 if (ec->handler_offset + ec->handler_len < header->code_size) {
2899 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
2900 if (tblock->native_offset) {
2901 end_offset = tblock->native_offset;
2905 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
2906 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2907 if (bb && bb->native_offset) {
2912 end_offset = tblock->native_offset + tblock->native_length;
2915 end_offset = cfg->epilog_begin;
2917 ei->data.handler_end = cfg->native_code + end_offset;
2922 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
2924 for (i = 0; i < jinfo->num_clauses; i++) {
2925 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2926 int start = (guint8*)ei->try_start - cfg->native_code;
2927 int end = (guint8*)ei->try_end - cfg->native_code;
2928 int handler = (guint8*)ei->handler_start - cfg->native_code;
2929 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
2931 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
2935 if (cfg->encoded_unwind_ops) {
2936 /* Generated by LLVM */
2937 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
2938 g_free (cfg->encoded_unwind_ops);
2939 } else if (cfg->unwind_ops) {
2941 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
2942 guint32 unwind_desc;
2944 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
2946 if (cfg->has_unwind_info_for_epilog) {
2947 MonoArchEHJitInfo *info;
2949 info = mono_jit_info_get_arch_eh_info (jinfo);
2951 info->epilog_size = cfg->code_len - cfg->epilog_begin;
2953 jinfo->unwind_info = unwind_desc;
2954 g_free (unwind_info);
2956 jinfo->unwind_info = cfg->used_int_regs;
2964 is_gsharedvt_type (MonoType *t)
2966 return (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->gshared_constraint == MONO_TYPE_VALUETYPE;
2969 /* Return whenever METHOD is a gsharedvt method */
2971 is_gsharedvt_method (MonoMethod *method)
2973 MonoGenericContext *context;
2974 MonoGenericInst *inst;
2977 if (!method->is_inflated)
2979 context = mono_method_get_context (method);
2980 inst = context->class_inst;
2982 for (i = 0; i < inst->type_argc; ++i)
2983 if (is_gsharedvt_type (inst->type_argv [i]))
2986 inst = context->method_inst;
2988 for (i = 0; i < inst->type_argc; ++i)
2989 if (is_gsharedvt_type (inst->type_argv [i]))
2996 is_open_method (MonoMethod *method)
2998 MonoGenericContext *context;
3000 if (!method->is_inflated)
3002 context = mono_method_get_context (method);
3003 if (context->class_inst && context->class_inst->is_open)
3005 if (context->method_inst && context->method_inst->is_open)
3012 * mini_method_compile:
3013 * @method: the method to compile
3014 * @opts: the optimization flags to use
3015 * @domain: the domain where the method will be compiled in
3016 * @flags: compilation flags
3017 * @parts: debug flag
3019 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3020 * field in the returned struct to see if compilation succeded.
3023 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
3025 MonoMethodHeader *header;
3026 MonoMethodSignature *sig;
3029 int dfn, i, code_size_ratio;
3030 gboolean try_generic_shared, try_llvm = FALSE;
3031 MonoMethod *method_to_compile, *method_to_register;
3032 gboolean method_is_gshared = FALSE;
3033 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3034 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3035 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3036 gboolean gsharedvt_method = FALSE;
3038 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3040 static gboolean verbose_method_inited;
3041 static const char *verbose_method_name;
3043 InterlockedIncrement (&mono_jit_stats.methods_compiled);
3044 if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION)
3045 mono_profiler_method_jit (method);
3046 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3047 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3050 * In AOT mode, method can be the following:
3051 * - the generic method definition. In this case, we are compiling the fully shared
3052 * version of the method, i.e. the version where all the type parameters are
3054 * - a gsharedvt method.
3055 * - a method inflated with type parameters. This is for partial sharing.
3056 * - a method inflated with concrete types.
3059 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3060 (opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_full (method, TRUE, FALSE, FALSE)));
3062 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3063 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
3066 if (try_generic_shared && !mono_debug_count ())
3067 try_generic_shared = FALSE;
3070 if (opts & MONO_OPT_GSHARED) {
3071 if (try_generic_shared)
3072 mono_stats.generics_sharable_methods++;
3073 else if (mono_method_is_generic_impl (method))
3074 mono_stats.generics_unsharable_methods++;
3077 if (mini_is_gsharedvt_sharable_method (method)) {
3078 if (!mono_debug_count ())
3079 try_generic_shared = FALSE;
3081 try_generic_shared = FALSE;
3084 gsharedvt_method = is_gsharedvt_method (method);
3085 if (gsharedvt_method || (compile_aot && is_open_method (method))) {
3086 /* We are AOTing a gshared method directly */
3087 method_is_gshared = TRUE;
3088 g_assert (compile_aot);
3089 try_generic_shared = TRUE;
3093 try_llvm = mono_use_llvm || llvm;
3097 if (method_is_gshared) {
3098 method_to_compile = method;
3100 if (try_generic_shared) {
3101 method_to_compile = mini_get_shared_method (method);
3102 g_assert (method_to_compile);
3104 method_to_compile = method;
3108 cfg = g_new0 (MonoCompile, 1);
3109 cfg->method = method_to_compile;
3110 cfg->header = mono_method_get_header (cfg->method);
3111 cfg->mempool = mono_mempool_new ();
3113 cfg->prof_options = mono_profiler_get_events ();
3114 cfg->run_cctors = run_cctors;
3115 cfg->domain = domain;
3116 cfg->verbose_level = mini_verbose;
3117 cfg->compile_aot = compile_aot;
3118 cfg->full_aot = full_aot;
3119 cfg->skip_visibility = method->skip_visibility;
3120 cfg->orig_method = method;
3121 cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_seq_points_debug_data;
3122 cfg->gen_seq_points_debug_data = debug_options.gen_seq_points_debug_data;
3124 cfg->explicit_null_checks = debug_options.explicit_null_checks;
3125 cfg->soft_breakpoints = debug_options.soft_breakpoints;
3126 cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
3127 if (try_generic_shared)
3128 cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->gsctx;
3129 cfg->compile_llvm = try_llvm;
3130 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3132 if (!mono_debug_count ())
3133 cfg->opt &= ~MONO_OPT_FLOAT32;
3134 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3135 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3137 if (cfg->gen_seq_points)
3138 cfg->seq_points = g_ptr_array_new ();
3139 mono_error_init (&cfg->error);
3141 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || method->klass->generic_container || method_is_gshared)) {
3142 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3146 if (cfg->generic_sharing_context && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3147 MonoMethodInflated *inflated;
3148 MonoGenericContext *context;
3150 if (gsharedvt_method) {
3151 g_assert (method->is_inflated);
3152 inflated = (MonoMethodInflated*)method;
3153 context = &inflated->context;
3155 /* We are compiling a gsharedvt method directly */
3156 g_assert (compile_aot);
3158 g_assert (method_to_compile->is_inflated);
3159 inflated = (MonoMethodInflated*)method_to_compile;
3160 context = &inflated->context;
3163 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3164 cfg->gsctx_context = context;
3166 cfg->gsharedvt = TRUE;
3168 cfg->disable_llvm = TRUE;
3171 if (cfg->generic_sharing_context) {
3172 method_to_register = method_to_compile;
3173 cfg->gshared = TRUE;
3175 g_assert (method == method_to_compile);
3176 method_to_register = method;
3178 cfg->method_to_register = method_to_register;
3180 mono_error_init (&err);
3181 sig = mono_method_signature_checked (cfg->method, &err);
3183 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3184 cfg->exception_message = g_strdup (mono_error_get_message (&err));
3185 mono_error_cleanup (&err);
3186 if (MONO_METHOD_COMPILE_END_ENABLED ())
3187 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3191 header = cfg->header;
3193 MonoLoaderError *error;
3195 if ((error = mono_loader_get_last_error ())) {
3196 cfg->exception_type = error->exception_type;
3198 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
3199 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
3201 if (MONO_METHOD_COMPILE_END_ENABLED ())
3202 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3208 static gboolean inited;
3214 * Check for methods which cannot be compiled by LLVM early, to avoid
3215 * the extra compilation pass.
3217 if (COMPILE_LLVM (cfg)) {
3218 mono_llvm_check_method_supported (cfg);
3219 if (cfg->disable_llvm) {
3220 if (cfg->verbose_level >= 1) {
3221 //nm = mono_method_full_name (cfg->method, TRUE);
3222 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
3225 mono_destroy_compile (cfg);
3227 goto restart_compile;
3233 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3234 if (debug_options.mdb_optimizations) {
3235 cfg->disable_reuse_registers = TRUE;
3236 cfg->disable_reuse_stack_slots = TRUE;
3238 * This decreases the change the debugger will read registers/stack slots which are
3239 * not yet initialized.
3241 cfg->disable_initlocals_opt = TRUE;
3243 cfg->extend_live_ranges = TRUE;
3245 /* Temporarily disable this when running in the debugger until we have support
3246 * for this in the debugger. */
3247 /* This is no longer needed with sdb */
3248 //cfg->disable_omit_fp = TRUE;
3250 /* The debugger needs all locals to be on the stack or in a global register */
3251 cfg->disable_vreg_to_lvreg = TRUE;
3253 /* Don't remove unused variables when running inside the debugger since the user
3254 * may still want to view them. */
3255 cfg->disable_deadce_vars = TRUE;
3257 // cfg->opt |= MONO_OPT_SHARED;
3258 cfg->opt &= ~MONO_OPT_DEADCE;
3259 cfg->opt &= ~MONO_OPT_INLINE;
3260 cfg->opt &= ~MONO_OPT_COPYPROP;
3261 cfg->opt &= ~MONO_OPT_CONSPROP;
3262 /* This is no longer needed with sdb */
3263 //cfg->opt &= ~MONO_OPT_GSHARED;
3265 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3266 cfg->disable_out_of_line_bblocks = TRUE;
3269 if (mono_using_xdebug) {
3271 * Make each variable use its own register/stack slot and extend
3272 * their liveness to cover the whole method, making them displayable
3273 * in gdb even after they are dead.
3275 cfg->disable_reuse_registers = TRUE;
3276 cfg->disable_reuse_stack_slots = TRUE;
3277 cfg->extend_live_ranges = TRUE;
3278 cfg->compute_precise_live_ranges = TRUE;
3281 mini_gc_init_cfg (cfg);
3283 if (COMPILE_LLVM (cfg)) {
3284 cfg->opt |= MONO_OPT_ABCREM;
3287 if (!verbose_method_inited) {
3288 verbose_method_name = g_getenv ("MONO_VERBOSE_METHOD");
3289 verbose_method_inited = TRUE;
3291 if (verbose_method_name) {
3292 const char *name = verbose_method_name;
3294 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3295 MonoMethodDesc *desc;
3297 desc = mono_method_desc_new (name, TRUE);
3298 if (mono_method_desc_full_match (desc, cfg->method)) {
3299 cfg->verbose_level = 4;
3301 mono_method_desc_free (desc);
3303 if (strcmp (cfg->method->name, name) == 0)
3304 cfg->verbose_level = 4;
3308 cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3310 if (cfg->verbose_level > 0) {
3313 method_name = mono_method_full_name (method, TRUE);
3314 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->generic_sharing_context && !cfg->gsharedvt) ? "gshared " : "", method_name);
3316 if (COMPILE_LLVM (cfg))
3317 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3318 else if (cfg->gsharedvt)
3319 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3320 else if (cfg->generic_sharing_context)
3321 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3323 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3325 g_free (method_name);
3328 if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
3329 cfg->opt |= MONO_OPT_SSA;
3332 if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
3333 cfg->globalra = TRUE;
3336 //cfg->globalra = TRUE;
3338 //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
3339 // cfg->globalra = TRUE;
3342 static int count = 0;
3346 if (g_getenv ("COUNT2")) {
3347 cfg->globalra = TRUE;
3348 if (count == atoi (g_getenv ("COUNT2")))
3349 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
3350 if (count > atoi (g_getenv ("COUNT2")))
3351 cfg->globalra = FALSE;
3356 if (header->clauses)
3357 cfg->globalra = FALSE;
3359 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
3360 /* The code in the prolog clobbers caller saved registers */
3361 cfg->globalra = FALSE;
3363 // FIXME: Disable globalra in case of tracing/profiling
3365 if (cfg->method->save_lmf)
3366 /* The LMF saving code might clobber caller saved registers */
3367 cfg->globalra = FALSE;
3369 if (header->code_size > 5000)
3371 /* Too large bblocks could overflow the ins positions */
3372 cfg->globalra = FALSE;
3374 cfg->rs = mono_regstate_new ();
3376 cfg->rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS;
3377 cfg->next_vreg = cfg->rs->next_vreg;
3379 /* FIXME: Fix SSA to handle branches inside bblocks */
3380 if (cfg->opt & MONO_OPT_SSA)
3381 cfg->enable_extended_bblocks = FALSE;
3384 * FIXME: This confuses liveness analysis because variables which are assigned after
3385 * a branch inside a bblock become part of the kill set, even though the assignment
3386 * might not get executed. This causes the optimize_initlocals pass to delete some
3387 * assignments which are needed.
3388 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3391 //cfg->enable_extended_bblocks = TRUE;
3393 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3394 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3395 if (mini_get_debug_options ()->break_on_unverified)
3401 * create MonoInst* which represents arguments and local variables
3403 mono_compile_create_vars (cfg);
3405 /* SSAPRE is not supported on linear IR */
3406 cfg->opt &= ~MONO_OPT_SSAPRE;
3408 i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE);
3411 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3413 if (MONO_METHOD_COMPILE_END_ENABLED ())
3414 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3417 mono_destroy_compile (cfg);
3418 try_generic_shared = FALSE;
3419 goto restart_compile;
3421 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3423 if (MONO_METHOD_COMPILE_END_ENABLED ())
3424 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3425 /* cfg contains the details of the failure, so let the caller cleanup */
3429 cfg->stat_basic_blocks += cfg->num_bblocks;
3431 if (COMPILE_LLVM (cfg)) {
3434 /* The IR has to be in SSA form for LLVM */
3435 cfg->opt |= MONO_OPT_SSA;
3439 // Allow SSA on the result value
3440 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3442 // Add an explicit return instruction referencing the return value
3443 MONO_INST_NEW (cfg, ins, OP_SETRET);
3444 ins->sreg1 = cfg->ret->dreg;
3446 MONO_ADD_INS (cfg->bb_exit, ins);
3449 cfg->opt &= ~MONO_OPT_LINEARS;
3452 cfg->opt &= ~MONO_OPT_BRANCH;
3455 /* todo: remove code when we have verified that the liveness for try/catch blocks
3459 * Currently, this can't be commented out since exception blocks are not
3460 * processed during liveness analysis.
3461 * It is also needed, because otherwise the local optimization passes would
3462 * delete assignments in cases like this:
3464 * <something which throws>
3466 * This also allows SSA to be run on methods containing exception clauses, since
3467 * SSA will ignore variables marked VOLATILE.
3469 mono_liveness_handle_exception_clauses (cfg);
3471 mono_handle_out_of_line_bblock (cfg);
3473 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3475 if (!COMPILE_LLVM (cfg))
3476 mono_decompose_long_opts (cfg);
3478 /* Should be done before branch opts */
3479 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
3480 mono_local_cprop (cfg);
3482 if (cfg->opt & MONO_OPT_BRANCH)
3483 mono_optimize_branches (cfg);
3485 /* This must be done _before_ global reg alloc and _after_ decompose */
3486 mono_handle_global_vregs (cfg);
3487 if (cfg->opt & MONO_OPT_DEADCE)
3488 mono_local_deadce (cfg);
3489 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS)
3490 mono_local_alias_analysis (cfg);
3491 /* Disable this for LLVM to make the IR easier to handle */
3492 if (!COMPILE_LLVM (cfg))
3493 mono_if_conversion (cfg);
3495 if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
3496 mono_remove_critical_edges (cfg);
3498 /* Depth-first ordering on basic blocks */
3499 cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
3501 cfg->max_block_num = cfg->num_bblocks;
3504 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
3505 if (cfg->num_bblocks != dfn + 1) {
3508 cfg->num_bblocks = dfn + 1;
3510 /* remove unreachable code, because the code in them may be
3511 * inconsistent (access to dead variables for example) */
3512 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
3513 bb->flags &= ~BB_VISITED;
3514 compute_reachable (cfg->bb_entry);
3515 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
3516 if (bb->flags & BB_EXCEPTION_HANDLER)
3517 compute_reachable (bb);
3518 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3519 if (!(bb->flags & BB_VISITED)) {
3520 if (cfg->verbose_level > 1)
3521 g_print ("found unreachable code in BB%d\n", bb->block_num);
3522 bb->code = bb->last_ins = NULL;
3523 while (bb->out_count)
3524 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
3527 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
3528 bb->flags &= ~BB_VISITED;
3531 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3533 * we disable some optimizations if there are too many variables
3534 * because JIT time may become too expensive. The actual number needs
3535 * to be tweaked and eventually the non-linear algorithms should be fixed.
3537 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3538 cfg->disable_ssa = TRUE;
3541 if (cfg->opt & MONO_OPT_LOOP) {
3542 mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM);
3543 mono_compute_natural_loops (cfg);
3546 /* after method_to_ir */
3548 if (MONO_METHOD_COMPILE_END_ENABLED ())
3549 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3554 if (header->num_clauses)
3555 cfg->disable_ssa = TRUE;
3558 //#define DEBUGSSA "logic_run"
3559 //#define DEBUGSSA_CLASS "Tests"
3562 if (!cfg->disable_ssa) {
3563 mono_local_cprop (cfg);
3566 mono_ssa_compute (cfg);
3570 if (cfg->opt & MONO_OPT_SSA) {
3571 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3573 mono_ssa_compute (cfg);
3576 if (cfg->verbose_level >= 2) {
3583 /* after SSA translation */
3585 if (MONO_METHOD_COMPILE_END_ENABLED ())
3586 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3590 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3591 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3593 mono_ssa_cprop (cfg);
3599 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3600 //mono_ssa_strength_reduction (cfg);
3602 if (cfg->opt & MONO_OPT_SSAPRE) {
3603 mono_perform_ssapre (cfg);
3604 //mono_local_cprop (cfg);
3607 if (cfg->opt & MONO_OPT_DEADCE)
3608 mono_ssa_deadce (cfg);
3610 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
3611 mono_perform_abc_removal (cfg);
3613 mono_ssa_remove (cfg);
3614 mono_local_cprop (cfg);
3615 mono_handle_global_vregs (cfg);
3616 if (cfg->opt & MONO_OPT_DEADCE)
3617 mono_local_deadce (cfg);
3619 if (cfg->opt & MONO_OPT_BRANCH) {
3622 mono_optimize_branches (cfg);
3624 /* Have to recompute cfg->bblocks and bb->dfn */
3625 if (cfg->globalra) {
3626 mono_remove_critical_edges (cfg);
3628 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
3631 /* Depth-first ordering on basic blocks */
3632 cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
3635 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
3636 cfg->num_bblocks = dfn + 1;
3642 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3643 mono_ssa_loop_invariant_code_motion (cfg);
3644 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3645 if (cfg->opt & MONO_OPT_ABCREM)
3646 mono_perform_abc_removal (cfg);
3649 /* after SSA removal */
3651 if (MONO_METHOD_COMPILE_END_ENABLED ())
3652 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3656 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3657 if (COMPILE_SOFT_FLOAT (cfg))
3658 mono_decompose_soft_float (cfg);
3660 if (COMPILE_LLVM (cfg))
3661 mono_decompose_vtype_opts_llvm (cfg);
3663 mono_decompose_vtype_opts (cfg);
3664 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3665 mono_decompose_array_access_opts (cfg);
3668 #ifndef MONO_ARCH_GOT_REG
3673 g_assert (cfg->got_var_allocated);
3676 * Allways allocate the GOT var to a register, because keeping it
3677 * in memory will increase the number of live temporaries in some
3678 * code created by inssel.brg, leading to the well known spills+
3679 * branches problem. Testcase: mcs crash in
3680 * System.MonoCustomAttrs:GetCustomAttributes.
3682 #ifdef MONO_ARCH_GOT_REG
3683 got_reg = MONO_ARCH_GOT_REG;
3685 regs = mono_arch_get_global_int_regs (cfg);
3687 got_reg = GPOINTER_TO_INT (regs->data);
3690 cfg->got_var->opcode = OP_REGVAR;
3691 cfg->got_var->dreg = got_reg;
3692 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3696 * Have to call this again to process variables added since the first call.
3698 mono_liveness_handle_exception_clauses (cfg);
3700 if (cfg->globalra) {
3703 /* Have to do this before regalloc since it can create vregs */
3704 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
3705 mono_arch_lowering_pass (cfg, bb);
3707 mono_global_regalloc (cfg);
3710 if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
3711 GList *vars, *regs, *l;
3713 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3714 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3715 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3716 mono_analyze_liveness (cfg);
3718 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3719 regs = mono_arch_get_global_int_regs (cfg);
3720 /* Remove the reg reserved for holding the GOT address */
3722 for (l = regs; l; l = l->next) {
3723 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3724 regs = g_list_delete_link (regs, l);
3729 mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs);
3733 //mono_print_code (cfg, "");
3737 /* variables are allocated after decompose, since decompose could create temps */
3738 if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
3739 mono_arch_allocate_vars (cfg);
3740 if (cfg->exception_type)
3746 gboolean need_local_opts;
3748 if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
3749 mono_spill_global_vars (cfg, &need_local_opts);
3751 if (need_local_opts || cfg->compile_aot) {
3752 /* To optimize code created by spill_global_vars */
3753 mono_local_cprop (cfg);
3754 if (cfg->opt & MONO_OPT_DEADCE)
3755 mono_local_deadce (cfg);
3759 /* Add branches between non-consecutive bblocks */
3760 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3761 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
3762 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
3763 /* we are careful when inverting, since bugs like #59580
3764 * could show up when dealing with NaNs.
3766 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
3767 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
3768 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
3769 bb->last_ins->inst_false_bb = tmp;
3771 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
3773 MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
3774 inst->opcode = OP_BR;
3775 inst->inst_target_bb = bb->last_ins->inst_false_bb;
3776 mono_bblock_add_inst (bb, inst);
3781 if (cfg->verbose_level >= 4 && !cfg->globalra) {
3782 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3783 MonoInst *tree = bb->code;
3784 g_print ("DUMP BLOCK %d:\n", bb->block_num);
3787 for (; tree; tree = tree->next) {
3788 mono_print_ins_index (-1, tree);
3794 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3795 bb->max_vreg = cfg->next_vreg;
3799 if (COMPILE_LLVM (cfg)) {
3803 /* The IR has to be in SSA form for LLVM */
3804 if (!(cfg->comp_done & MONO_COMP_SSA)) {
3805 cfg->exception_message = g_strdup ("SSA disabled.");
3806 cfg->disable_llvm = TRUE;
3809 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3810 mono_decompose_array_access_opts (cfg);
3812 if (!cfg->disable_llvm)
3813 mono_llvm_emit_method (cfg);
3814 if (cfg->disable_llvm) {
3815 if (cfg->verbose_level >= 1) {
3816 //nm = mono_method_full_name (cfg->method, TRUE);
3817 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
3820 mono_destroy_compile (cfg);
3822 goto restart_compile;
3825 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
3826 nm = mono_method_full_name (cfg->method, TRUE);
3827 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3829 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
3837 if (COMPILE_LLVM (cfg))
3838 InterlockedIncrement (&mono_jit_stats.methods_with_llvm);
3840 InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
3842 cfg->jit_info = create_jit_info (cfg, method_to_compile);
3844 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3845 if (cfg->extend_live_ranges) {
3846 /* Extend live ranges to cover the whole method */
3847 for (i = 0; i < cfg->num_varinfo; ++i)
3848 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
3852 if (!cfg->compile_aot)
3853 mono_save_xdebug_info (cfg);
3855 mini_gc_create_gc_map (cfg);
3857 mono_save_seq_point_info (cfg);
3859 if (cfg->verbose_level >= 2) {
3860 char *id = mono_method_full_name (cfg->method, FALSE);
3861 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
3865 if (!cfg->compile_aot) {
3866 mono_domain_lock (cfg->domain);
3867 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
3869 if (cfg->method->dynamic)
3870 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
3871 mono_domain_unlock (cfg->domain);
3876 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
3879 /* collect statistics */
3880 #ifndef DISABLE_PERFCOUNTERS
3881 mono_perfcounters->jit_methods++;
3882 mono_perfcounters->jit_bytes += header->code_size;
3884 mono_jit_stats.allocated_code_size += cfg->code_len;
3885 code_size_ratio = cfg->code_len;
3886 if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) {
3887 mono_jit_stats.biggest_method_size = code_size_ratio;
3888 g_free (mono_jit_stats.biggest_method);
3889 mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3891 code_size_ratio = (code_size_ratio * 100) / header->code_size;
3892 if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
3893 mono_jit_stats.max_code_size_ratio = code_size_ratio;
3894 g_free (mono_jit_stats.max_ratio_method);
3895 mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3897 mono_jit_stats.native_code_size += cfg->code_len;
3899 if (MONO_METHOD_COMPILE_END_ENABLED ())
3900 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3908 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
3910 g_assert_not_reached ();
3914 #endif /* DISABLE_JIT */
3917 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoException **jit_ex)
3920 gpointer code = NULL;
3921 MonoJitInfo *jinfo, *info;
3923 MonoException *ex = NULL;
3924 guint32 prof_options;
3926 MonoMethod *prof_method, *shared;
3928 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3929 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
3931 MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method;
3933 if (!piinfo->addr) {
3934 if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)
3935 piinfo->addr = mono_lookup_internal_call (method);
3936 else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)
3938 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
3940 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
3943 mono_lookup_pinvoke_call (method, NULL, NULL);
3945 nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, mono_aot_only);
3946 code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
3947 jinfo = mono_jit_info_table_find (target_domain, code);
3949 jinfo = mono_jit_info_table_find (mono_domain_get (), code);
3951 mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
3953 } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
3954 const char *name = method->name;
3955 char *full_name, *msg;
3958 if (method->klass->parent == mono_defaults.multicastdelegate_class) {
3959 if (*name == '.' && (strcmp (name, ".ctor") == 0)) {
3960 MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("mono_delegate_ctor");
3963 * We need to make sure this wrapper
3964 * is compiled because it might end up
3965 * in an (M)RGCTX if generic sharing
3966 * is enabled, and would be called
3967 * indirectly. If it were a
3968 * trampoline we'd try to patch that
3969 * indirect call, which is not
3972 return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
3973 } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
3974 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
3975 return mono_create_delegate_trampoline (target_domain, method->klass);
3977 nm = mono_marshal_get_delegate_invoke (method, NULL);
3978 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
3980 } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
3981 nm = mono_marshal_get_delegate_begin_invoke (method);
3982 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
3983 } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) {
3984 nm = mono_marshal_get_delegate_end_invoke (method);
3985 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
3989 full_name = mono_method_full_name (method, TRUE);
3990 msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
3991 *jit_ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
3997 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
3998 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
4000 if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) {
4001 static MonoTrampInfo *in_tinfo, *out_tinfo;
4002 MonoTrampInfo *tinfo;
4004 gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN;
4006 if (is_in && in_tinfo)
4007 return in_tinfo->code;
4008 else if (!is_in && out_tinfo)
4009 return out_tinfo->code;
4012 * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH
4014 * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms
4017 mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo);
4019 mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE);
4020 jinfo = create_jit_info_for_trampoline (method, tinfo);
4021 mono_jit_info_table_add (mono_get_root_domain (), jinfo);
4030 if (mono_aot_only) {
4031 char *fullname = mono_method_full_name (method, TRUE);
4032 char *msg = g_strdup_printf ("Attempting to JIT compile method '%s' while running with --aot-only. See http://docs.xamarin.com/ios/about/limitations for more information.\n", fullname);
4034 *jit_ex = mono_get_exception_execution_engine (msg);
4041 jit_timer = g_timer_new ();
4043 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0);
4044 prof_method = cfg->method;
4046 g_timer_stop (jit_timer);
4047 mono_jit_stats.jit_time += g_timer_elapsed (jit_timer, NULL);
4048 g_timer_destroy (jit_timer);
4050 switch (cfg->exception_type) {
4051 case MONO_EXCEPTION_NONE:
4053 case MONO_EXCEPTION_TYPE_LOAD:
4054 case MONO_EXCEPTION_MISSING_FIELD:
4055 case MONO_EXCEPTION_MISSING_METHOD:
4056 case MONO_EXCEPTION_FILE_NOT_FOUND:
4057 case MONO_EXCEPTION_BAD_IMAGE: {
4058 /* Throw a type load exception if needed */
4059 MonoLoaderError *error = mono_loader_get_last_error ();
4062 ex = mono_loader_error_prepare_exception (error);
4064 if (cfg->exception_ptr) {
4065 ex = mono_class_get_exception_for_failure (cfg->exception_ptr);
4067 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4068 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4069 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4070 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4071 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4072 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4073 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4074 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message);
4075 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4076 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4078 g_assert_not_reached ();
4083 case MONO_EXCEPTION_INVALID_PROGRAM:
4084 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
4086 case MONO_EXCEPTION_UNVERIFIABLE_IL:
4087 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.Security", "VerificationException", cfg->exception_message);
4089 case MONO_EXCEPTION_METHOD_ACCESS:
4090 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MethodAccessException", cfg->exception_message);
4092 case MONO_EXCEPTION_FIELD_ACCESS:
4093 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FieldAccessException", cfg->exception_message);
4095 #ifndef DISABLE_SECURITY
4096 /* this can only be set if the security manager is active */
4097 case MONO_EXCEPTION_SECURITY_LINKDEMAND: {
4098 MonoSecurityManager* secman = mono_security_manager_get_methods ();
4099 MonoObject *exc = NULL;
4102 args [0] = &cfg->exception_data;
4104 mono_runtime_invoke (secman->linkdemandsecurityexception, NULL, args, &exc);
4106 ex = (MonoException*)exc;
4110 case MONO_EXCEPTION_OBJECT_SUPPLIED: {
4111 MonoException *exp = cfg->exception_ptr;
4112 MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
4117 case MONO_EXCEPTION_OUT_OF_MEMORY:
4118 ex = mono_domain_get ()->out_of_memory_ex;
4120 case MONO_EXCEPTION_MONO_ERROR:
4121 g_assert (!mono_error_ok (&cfg->error));
4122 ex = mono_error_convert_to_exception (&cfg->error);
4125 g_assert_not_reached ();
4129 if (cfg->prof_options & MONO_PROFILE_JIT_COMPILATION)
4130 mono_profiler_method_end_jit (method, NULL, MONO_PROFILE_FAILED);
4132 mono_destroy_compile (cfg);
4138 if (mono_method_is_generic_sharable (method, FALSE))
4139 shared = mini_get_shared_method (method);
4143 mono_domain_lock (target_domain);
4145 /* Check if some other thread already did the job. In this case, we can
4146 discard the code this thread generated. */
4148 info = mini_lookup_method (target_domain, method, shared);
4150 /* We can't use a domain specific method in another domain */
4151 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4152 code = info->code_start;
4153 // printf("Discarding code for method %s\n", method->name);
4157 /* The lookup + insert is atomic since this is done inside the domain lock */
4158 mono_domain_jit_code_hash_lock (target_domain);
4159 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4160 mono_domain_jit_code_hash_unlock (target_domain);
4162 code = cfg->native_code;
4164 if (cfg->generic_sharing_context && mono_method_is_generic_sharable (method, FALSE))
4165 mono_stats.generics_shared_methods++;
4167 mono_stats.gsharedvt_methods++;
4170 jinfo = cfg->jit_info;
4172 prof_options = cfg->prof_options;
4175 * Update global stats while holding a lock, instead of doing many
4176 * InterlockedIncrement operations during JITting.
4178 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4179 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4180 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4181 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4182 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4183 mono_jit_stats.regvars += cfg->stat_n_regvars;
4184 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4185 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4186 mono_jit_stats.cas_demand_generation += cfg->stat_cas_demand_generation;
4187 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4189 mono_destroy_compile (cfg);
4192 if (domain_jit_info (target_domain)->jump_target_hash) {
4193 MonoJumpInfo patch_info;
4194 MonoJumpList *jlist;
4196 jlist = g_hash_table_lookup (domain_jit_info (target_domain)->jump_target_hash, method);
4198 patch_info.next = NULL;
4199 patch_info.ip.i = 0;
4200 patch_info.type = MONO_PATCH_INFO_METHOD_JUMP;
4201 patch_info.data.method = method;
4202 g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method);
4204 #if defined(__native_client_codegen__) && defined(__native_client__)
4205 /* These patches are applied after a method has been installed, no target munging is needed. */
4206 nacl_allow_target_modification (FALSE);
4208 for (tmp = jlist->list; tmp; tmp = tmp->next)
4209 mono_arch_patch_code (NULL, target_domain, tmp->data, &patch_info, NULL, TRUE);
4210 #if defined(__native_client_codegen__) && defined(__native_client__)
4211 nacl_allow_target_modification (TRUE);
4216 mono_emit_jit_map (jinfo);
4218 mono_domain_unlock (target_domain);
4220 vtable = mono_class_vtable (target_domain, method->klass);
4222 ex = mono_class_get_exception_for_failure (method->klass);
4228 if (prof_options & MONO_PROFILE_JIT_COMPILATION) {
4229 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4230 if (mono_marshal_method_from_wrapper (method)) {
4231 /* Native func wrappers have no method */
4232 /* The profiler doesn't know about wrappers, so pass the original icall method */
4233 mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK);
4236 mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
4237 if (prof_method != method) {
4238 mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK);
4242 ex = mono_runtime_class_init_full (vtable, FALSE);
4253 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments) {
4254 return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
4258 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
4260 TryBlockHole *hole = mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
4261 hole->clause = clause;
4262 hole->start_offset = start - cfg->native_code;
4263 hole->basic_block = bb;
4265 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
4269 mono_cfg_set_exception (MonoCompile *cfg, int type)
4271 cfg->exception_type = type;
4276 /* Dummy versions of some arch specific functions to avoid ifdefs at call sites */
4278 #ifndef MONO_ARCH_GSHAREDVT_SUPPORTED
4281 mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig)
4287 mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
4289 g_assert_not_reached ();
4294 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
4296 g_assert_not_reached ();
4301 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
4303 g_assert_not_reached ();
4309 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED) && !defined(ENABLE_GSHAREDVT)
4312 mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig)
4318 mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
4326 #ifdef USE_JUMP_TABLES
4327 #define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
4329 typedef struct MonoJumpTableChunk {
4332 struct MonoJumpTableChunk *previous;
4333 /* gpointer entries[total]; */
4334 } MonoJumpTableChunk;
4336 static MonoJumpTableChunk* g_jumptable;
4337 #define mono_jumptable_lock() mono_mutex_lock (&jumptable_mutex)
4338 #define mono_jumptable_unlock() mono_mutex_unlock (&jumptable_mutex)
4339 static mono_mutex_t jumptable_mutex;
4341 static MonoJumpTableChunk*
4342 mono_create_jumptable_chunk (guint32 max_entries)
4344 guint32 size = sizeof (MonoJumpTableChunk) + max_entries * sizeof(gpointer);
4345 MonoJumpTableChunk *chunk = (MonoJumpTableChunk*) g_new0 (guchar, size);
4346 chunk->total = max_entries;
4351 mono_jumptable_init (void)
4353 if (g_jumptable == NULL) {
4354 mono_mutex_init_recursive (&jumptable_mutex);
4355 g_jumptable = mono_create_jumptable_chunk (DEFAULT_JUMPTABLE_CHUNK_ELEMENTS);
4360 mono_jumptable_add_entry (void)
4362 return mono_jumptable_add_entries (1);
4366 mono_jumptable_add_entries (guint32 entries)
4371 mono_jumptable_init ();
4372 mono_jumptable_lock ();
4373 index = g_jumptable->active;
4374 if (index + entries >= g_jumptable->total) {
4376 * Grow jumptable, by adding one more chunk.
4377 * We cannot realloc jumptable, as there could be pointers
4378 * to existing jump table entries in the code, so instead
4379 * we just add one more chunk.
4381 guint32 max_entries = entries;
4382 MonoJumpTableChunk *new_chunk;
4384 if (max_entries < DEFAULT_JUMPTABLE_CHUNK_ELEMENTS)
4385 max_entries = DEFAULT_JUMPTABLE_CHUNK_ELEMENTS;
4386 new_chunk = mono_create_jumptable_chunk (max_entries);
4387 /* Link old jumptable, so that we could free it up later. */
4388 new_chunk->previous = g_jumptable;
4389 g_jumptable = new_chunk;
4392 g_jumptable->active = index + entries;
4393 result = (gpointer*)((guchar*)g_jumptable + sizeof(MonoJumpTableChunk)) + index;
4394 mono_jumptable_unlock();
4400 mono_jumptable_cleanup (void)
4403 MonoJumpTableChunk *current = g_jumptable, *prev;
4404 while (current != NULL) {
4405 prev = current->previous;
4410 mono_mutex_destroy (&jumptable_mutex);
4415 mono_jumptable_get_entry (guint8 *code_ptr)
4417 return mono_arch_jumptable_entry_from_code (code_ptr);
4422 * mini_replace_type:
4424 * Replace the type used in the metadata stream with what the JIT will actually use during compilation.
4427 mini_replace_type (MonoType *type)
4429 type = mono_type_get_underlying_type (type);
4430 return mini_native_type_replace_type (type);
4434 * mini_get_underlying_type:
4436 * Return the type the JIT will use during compilation.
4437 * Handles: byref, enums, native types, generic sharing.
4438 * For gsharedvt types, it will return the original VAR/MVAR.
4441 mini_get_underlying_type (MonoCompile *cfg, MonoType *type)
4443 type = mini_type_get_underlying_type (cfg->generic_sharing_context, type);
4444 return mini_native_type_replace_type (type);
4448 mini_jit_init (void)
4450 mono_mutex_init_recursive (&jit_mutex);
4454 mini_jit_cleanup (void)
4456 g_free (emul_opcode_map);
4457 g_free (emul_opcode_opcodes);