2 * mini.c: The new Mono code generator.
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Copyright 2002-2003 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
22 #ifdef HAVE_SYS_TIME_H
26 #include <mono/utils/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/tokentype.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/appdomain.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include "mono/metadata/profiler.h"
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/utils/mono-math.h>
51 #include <mono/utils/mono-compiler.h>
52 #include <mono/utils/mono-counters.h>
53 #include <mono/utils/mono-error-internals.h>
54 #include <mono/utils/mono-logger-internals.h>
55 #include <mono/utils/mono-mmap.h>
56 #include <mono/utils/mono-path.h>
57 #include <mono/utils/mono-tls.h>
58 #include <mono/utils/mono-hwcap.h>
59 #include <mono/utils/dtrace.h>
60 #include <mono/utils/mono-threads.h>
61 #include <mono/io-layer/io-layer.h>
64 #include "seq-points.h"
72 #include "jit-icalls.h"
75 #include "debugger-agent.h"
76 #include "llvm-runtime.h"
77 #include "mini-llvm.h"
79 MonoTraceSpec *mono_jit_trace_calls;
80 MonoMethodDesc *mono_inject_async_exc_method;
81 int mono_inject_async_exc_pos;
82 MonoMethodDesc *mono_break_at_bb_method;
83 int mono_break_at_bb_bb_num;
84 gboolean mono_do_x86_stack_align = TRUE;
85 gboolean mono_using_xdebug;
87 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
88 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
89 static mono_mutex_t jit_mutex;
91 MonoBackend *current_backend;
96 mono_realloc_native_code (MonoCompile *cfg)
98 #if defined(__default_codegen__)
99 return g_realloc (cfg->native_code, cfg->code_size);
100 #elif defined(__native_client_codegen__)
102 gpointer native_code;
103 guint alignment_check;
105 /* Save the old alignment offset so we can re-align after the realloc. */
106 old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
107 cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
109 cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
110 cfg->code_size + kNaClAlignment );
112 /* Align native_code to next nearest kNaClAlignment byte. */
113 native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
114 native_code = (guint)native_code & ~kNaClAlignmentMask;
116 /* Shift the data to be 32-byte aligned again. */
117 memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
119 alignment_check = (guint)native_code & kNaClAlignmentMask;
120 g_assert (alignment_check == 0);
123 g_assert_not_reached ();
124 return cfg->native_code;
128 #ifdef __native_client_codegen__
130 /* Prevent instructions from straddling a 32-byte alignment boundary. */
131 /* Instructions longer than 32 bytes must be aligned internally. */
132 /* IN: pcode, instlen */
134 void mono_nacl_align_inst(guint8 **pcode, int instlen) {
137 space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
139 if (G_UNLIKELY (instlen >= kNaClAlignment)) {
140 g_assert_not_reached();
141 } else if (instlen > space_in_block) {
142 *pcode = mono_arch_nacl_pad(*pcode, space_in_block);
146 /* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
147 /* IN: start pointer to start of call sequence */
148 /* IN: pcode pointer to end of call sequence (current "IP") */
149 /* OUT: start pointer to the start of the call sequence after padding */
150 /* OUT: pcode pointer to the end of the call sequence after padding */
151 void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
152 const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
153 guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
156 const size_t length = (size_t)((*pcode)-(*start));
157 g_assert(length < MAX_NACL_CALL_LENGTH);
159 memcpy(copy_of_call, *start, length);
160 temp = mono_nacl_pad_call(*start, (guint8)length);
161 memcpy(temp, copy_of_call, length);
163 (*pcode) = temp + length;
166 /* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
167 /* code pointer to buffer for emitting code */
168 /* ilength length of call instruction */
169 guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
170 int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
171 int padding = freeSpaceInBlock - ilength;
174 /* There isn't enough space in this block for the instruction. */
175 /* Fill this block and start a new one. */
176 code = mono_arch_nacl_pad(code, freeSpaceInBlock);
177 freeSpaceInBlock = kNaClAlignment;
178 padding = freeSpaceInBlock - ilength;
180 g_assert(ilength > 0);
181 g_assert(padding >= 0);
182 g_assert(padding < kNaClAlignment);
183 if (0 == padding) return code;
184 return mono_arch_nacl_pad(code, padding);
187 guint8 *mono_nacl_align(guint8 *code) {
188 int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
189 if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
193 void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
195 #ifndef USE_JUMP_TABLES
196 MonoJumpInfo *patch_info;
197 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
198 unsigned char *ip = patch_info->ip.i + code;
199 ip = mono_arch_nacl_skip_nops(ip);
200 patch_info->ip.i = ip - code;
204 #endif /* __native_client_codegen__ */
206 #ifdef USE_JUMP_TABLES
208 #define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
210 typedef struct MonoJumpTableChunk {
213 struct MonoJumpTableChunk *previous;
214 /* gpointer entries[total]; */
215 } MonoJumpTableChunk;
217 static MonoJumpTableChunk* g_jumptable;
218 #define mono_jumptable_lock() mono_os_mutex_lock (&jumptable_mutex)
219 #define mono_jumptable_unlock() mono_os_mutex_unlock (&jumptable_mutex)
220 static mono_mutex_t jumptable_mutex;
222 static MonoJumpTableChunk*
223 mono_create_jumptable_chunk (guint32 max_entries)
225 guint32 size = sizeof (MonoJumpTableChunk) + max_entries * sizeof(gpointer);
226 MonoJumpTableChunk *chunk = (MonoJumpTableChunk*) g_new0 (guchar, size);
227 chunk->total = max_entries;
232 mono_jumptable_init (void)
234 if (g_jumptable == NULL) {
235 mono_os_mutex_init_recursive (&jumptable_mutex);
236 g_jumptable = mono_create_jumptable_chunk (DEFAULT_JUMPTABLE_CHUNK_ELEMENTS);
241 mono_jumptable_add_entry (void)
243 return mono_jumptable_add_entries (1);
247 mono_jumptable_add_entries (guint32 entries)
252 mono_jumptable_init ();
253 mono_jumptable_lock ();
254 index = g_jumptable->active;
255 if (index + entries >= g_jumptable->total) {
257 * Grow jumptable, by adding one more chunk.
258 * We cannot realloc jumptable, as there could be pointers
259 * to existing jump table entries in the code, so instead
260 * we just add one more chunk.
262 guint32 max_entries = entries;
263 MonoJumpTableChunk *new_chunk;
265 if (max_entries < DEFAULT_JUMPTABLE_CHUNK_ELEMENTS)
266 max_entries = DEFAULT_JUMPTABLE_CHUNK_ELEMENTS;
267 new_chunk = mono_create_jumptable_chunk (max_entries);
268 /* Link old jumptable, so that we could free it up later. */
269 new_chunk->previous = g_jumptable;
270 g_jumptable = new_chunk;
273 g_jumptable->active = index + entries;
274 result = (gpointer*)((guchar*)g_jumptable + sizeof(MonoJumpTableChunk)) + index;
275 mono_jumptable_unlock();
281 mono_jumptable_cleanup (void)
284 MonoJumpTableChunk *current = g_jumptable, *prev;
285 while (current != NULL) {
286 prev = current->previous;
291 mono_os_mutex_destroy (&jumptable_mutex);
296 mono_jumptable_get_entry (guint8 *code_ptr)
298 return mono_arch_jumptable_entry_from_code (code_ptr);
301 #endif /* USE_JUMP_TABLES */
304 MonoExceptionClause *clause;
305 MonoBasicBlock *basic_block;
310 * mono_emit_unwind_op:
312 * Add an unwind op with the given parameters for the list of unwind ops stored in
316 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
318 MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
325 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
326 if (cfg->verbose_level > 1) {
329 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
331 case DW_CFA_def_cfa_register:
332 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
334 case DW_CFA_def_cfa_offset:
335 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
338 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
344 #define MONO_INIT_VARINFO(vi,id) do { \
345 (vi)->range.first_use.pos.bid = 0xffff; \
351 * mono_unlink_bblock:
353 * Unlink two basic blocks.
356 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
362 for (i = 0; i < from->out_count; ++i) {
363 if (to == from->out_bb [i]) {
370 for (i = 0; i < from->out_count; ++i) {
371 if (from->out_bb [i] != to)
372 from->out_bb [pos ++] = from->out_bb [i];
374 g_assert (pos == from->out_count - 1);
379 for (i = 0; i < to->in_count; ++i) {
380 if (from == to->in_bb [i]) {
387 for (i = 0; i < to->in_count; ++i) {
388 if (to->in_bb [i] != from)
389 to->in_bb [pos ++] = to->in_bb [i];
391 g_assert (pos == to->in_count - 1);
397 * mono_bblocks_linked:
399 * Return whenever BB1 and BB2 are linked in the CFG.
402 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
406 for (i = 0; i < bb1->out_count; ++i) {
407 if (bb1->out_bb [i] == bb2)
415 mono_find_block_region_notry (MonoCompile *cfg, int offset)
417 MonoMethodHeader *header = cfg->header;
418 MonoExceptionClause *clause;
421 for (i = 0; i < header->num_clauses; ++i) {
422 clause = &header->clauses [i];
423 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
424 (offset < (clause->handler_offset)))
425 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
427 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
428 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
429 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
430 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
431 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
433 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
441 * mono_get_block_region_notry:
443 * Return the region corresponding to REGION, ignoring try clauses nested inside
447 mono_get_block_region_notry (MonoCompile *cfg, int region)
449 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
450 MonoMethodHeader *header = cfg->header;
453 * This can happen if a try clause is nested inside a finally clause.
455 int clause_index = (region >> 8) - 1;
456 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
458 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
465 mono_find_spvar_for_region (MonoCompile *cfg, int region)
467 region = mono_get_block_region_notry (cfg, region);
469 return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
473 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
477 array [*dfn] = start;
478 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
479 for (i = 0; i < start->out_count; ++i) {
480 if (start->out_bb [i]->dfn)
483 start->out_bb [i]->dfn = *dfn;
484 start->out_bb [i]->df_parent = start;
485 array [*dfn] = start->out_bb [i];
486 df_visit (start->out_bb [i], dfn, array);
491 mono_reverse_branch_op (guint32 opcode)
493 static const int reverse_map [] = {
494 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
495 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
497 static const int reverse_fmap [] = {
498 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
499 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
501 static const int reverse_lmap [] = {
502 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
503 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
505 static const int reverse_imap [] = {
506 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
507 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
510 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
511 opcode = reverse_map [opcode - CEE_BEQ];
512 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
513 opcode = reverse_fmap [opcode - OP_FBEQ];
514 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
515 opcode = reverse_lmap [opcode - OP_LBEQ];
516 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
517 opcode = reverse_imap [opcode - OP_IBEQ];
519 g_assert_not_reached ();
525 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
527 type = mini_get_underlying_type (type);
530 switch (type->type) {
533 return OP_STOREI1_MEMBASE_REG;
536 return OP_STOREI2_MEMBASE_REG;
539 return OP_STOREI4_MEMBASE_REG;
543 case MONO_TYPE_FNPTR:
544 return OP_STORE_MEMBASE_REG;
545 case MONO_TYPE_CLASS:
546 case MONO_TYPE_STRING:
547 case MONO_TYPE_OBJECT:
548 case MONO_TYPE_SZARRAY:
549 case MONO_TYPE_ARRAY:
550 return OP_STORE_MEMBASE_REG;
553 return OP_STOREI8_MEMBASE_REG;
555 return OP_STORER4_MEMBASE_REG;
557 return OP_STORER8_MEMBASE_REG;
558 case MONO_TYPE_VALUETYPE:
559 if (type->data.klass->enumtype) {
560 type = mono_class_enum_basetype (type->data.klass);
563 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
564 return OP_STOREX_MEMBASE;
565 return OP_STOREV_MEMBASE;
566 case MONO_TYPE_TYPEDBYREF:
567 return OP_STOREV_MEMBASE;
568 case MONO_TYPE_GENERICINST:
569 type = &type->data.generic_class->container_class->byval_arg;
573 g_assert (mini_type_var_is_vt (type));
574 return OP_STOREV_MEMBASE;
576 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
582 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
584 type = mini_get_underlying_type (type);
586 switch (type->type) {
588 return OP_LOADI1_MEMBASE;
590 return OP_LOADU1_MEMBASE;
592 return OP_LOADI2_MEMBASE;
594 return OP_LOADU2_MEMBASE;
596 return OP_LOADI4_MEMBASE;
598 return OP_LOADU4_MEMBASE;
602 case MONO_TYPE_FNPTR:
603 return OP_LOAD_MEMBASE;
604 case MONO_TYPE_CLASS:
605 case MONO_TYPE_STRING:
606 case MONO_TYPE_OBJECT:
607 case MONO_TYPE_SZARRAY:
608 case MONO_TYPE_ARRAY:
609 return OP_LOAD_MEMBASE;
612 return OP_LOADI8_MEMBASE;
614 return OP_LOADR4_MEMBASE;
616 return OP_LOADR8_MEMBASE;
617 case MONO_TYPE_VALUETYPE:
618 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
619 return OP_LOADX_MEMBASE;
620 case MONO_TYPE_TYPEDBYREF:
621 return OP_LOADV_MEMBASE;
622 case MONO_TYPE_GENERICINST:
623 if (mono_type_generic_inst_is_valuetype (type))
624 return OP_LOADV_MEMBASE;
626 return OP_LOAD_MEMBASE;
630 g_assert (cfg->gshared);
631 g_assert (mini_type_var_is_vt (type));
632 return OP_LOADV_MEMBASE;
634 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
640 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
642 type = mini_get_underlying_type (type);
643 if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
644 g_assert (mini_type_var_is_vt (type));
647 return mono_type_to_stind (type);
651 mono_op_imm_to_op (int opcode)
655 #if SIZEOF_REGISTER == 4
673 #if SIZEOF_REGISTER == 4
679 #if SIZEOF_REGISTER == 4
685 #if SIZEOF_REGISTER == 4
731 #if SIZEOF_REGISTER == 4
737 #if SIZEOF_REGISTER == 4
756 case OP_ICOMPARE_IMM:
758 case OP_LOCALLOC_IMM:
766 * mono_decompose_op_imm:
768 * Replace the OP_.._IMM INS with its non IMM variant.
771 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
773 int opcode2 = mono_op_imm_to_op (ins->opcode);
776 const char *spec = INS_INFO (ins->opcode);
778 if (spec [MONO_INST_SRC2] == 'l') {
779 dreg = mono_alloc_lreg (cfg);
781 /* Load the 64bit constant using decomposed ops */
782 MONO_INST_NEW (cfg, temp, OP_ICONST);
783 temp->inst_c0 = ins->inst_ls_word;
784 temp->dreg = MONO_LVREG_LS (dreg);
785 mono_bblock_insert_before_ins (bb, ins, temp);
787 MONO_INST_NEW (cfg, temp, OP_ICONST);
788 temp->inst_c0 = ins->inst_ms_word;
789 temp->dreg = MONO_LVREG_MS (dreg);
791 dreg = mono_alloc_ireg (cfg);
793 MONO_INST_NEW (cfg, temp, OP_ICONST);
794 temp->inst_c0 = ins->inst_imm;
798 mono_bblock_insert_before_ins (bb, ins, temp);
801 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
802 ins->opcode = opcode2;
804 if (ins->opcode == OP_LOCALLOC)
809 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
813 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
815 if (vreg >= cfg->vreg_to_inst_len) {
816 MonoInst **tmp = cfg->vreg_to_inst;
817 int size = cfg->vreg_to_inst_len;
819 while (vreg >= cfg->vreg_to_inst_len)
820 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
821 cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
823 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
825 cfg->vreg_to_inst [vreg] = inst;
828 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
829 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
832 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
835 int num = cfg->num_varinfo;
838 type = mini_get_underlying_type (type);
840 if ((num + 1) >= cfg->varinfo_count) {
841 int orig_count = cfg->varinfo_count;
842 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
843 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
844 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
845 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
848 cfg->stat_allocate_var++;
850 MONO_INST_NEW (cfg, inst, opcode);
852 inst->inst_vtype = type;
853 inst->klass = mono_class_from_mono_type (type);
854 type_to_eval_stack_type (cfg, type, inst);
855 /* if set to 1 the variable is native */
856 inst->backend.is_pinvoke = 0;
859 if (mono_class_has_failure (inst->klass))
860 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
862 if (cfg->compute_gc_maps) {
864 mono_mark_vreg_as_mp (cfg, vreg);
866 if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (type)) {
867 inst->flags |= MONO_INST_GC_TRACK;
868 mono_mark_vreg_as_ref (cfg, vreg);
873 cfg->varinfo [num] = inst;
875 MONO_INIT_VARINFO (&cfg->vars [num], num);
876 MONO_VARINFO (cfg, num)->vreg = vreg;
879 set_vreg_to_inst (cfg, vreg, inst);
881 #if SIZEOF_REGISTER == 4
882 if (mono_arch_is_soft_float ()) {
883 regpair = mono_type_is_long (type) || mono_type_is_float (type);
885 regpair = mono_type_is_long (type);
895 * These two cannot be allocated using create_var_for_vreg since that would
896 * put it into the cfg->varinfo array, confusing many parts of the JIT.
900 * Set flags to VOLATILE so SSA skips it.
903 if (cfg->verbose_level >= 4) {
904 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
907 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
908 if (mono_type_is_float (type))
909 inst->flags = MONO_INST_VOLATILE;
912 /* Allocate a dummy MonoInst for the first vreg */
913 MONO_INST_NEW (cfg, tree, OP_LOCAL);
914 tree->dreg = MONO_LVREG_LS (inst->dreg);
915 if (cfg->opt & MONO_OPT_SSA)
916 tree->flags = MONO_INST_VOLATILE;
918 tree->type = STACK_I4;
919 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
920 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
922 set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
924 /* Allocate a dummy MonoInst for the second vreg */
925 MONO_INST_NEW (cfg, tree, OP_LOCAL);
926 tree->dreg = MONO_LVREG_MS (inst->dreg);
927 if (cfg->opt & MONO_OPT_SSA)
928 tree->flags = MONO_INST_VOLATILE;
930 tree->type = STACK_I4;
931 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
932 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
934 set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
938 if (cfg->verbose_level > 2)
939 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
944 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
947 type = mini_get_underlying_type (type);
949 if (mono_type_is_long (type))
950 dreg = mono_alloc_dreg (cfg, STACK_I8);
951 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
952 dreg = mono_alloc_dreg (cfg, STACK_R8);
954 /* All the others are unified */
955 dreg = mono_alloc_preg (cfg);
957 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
961 mini_get_int_to_float_spill_area (MonoCompile *cfg)
964 if (!cfg->iconv_raw_var) {
965 cfg->iconv_raw_var = mono_compile_create_var (cfg, &mono_defaults.int32_class->byval_arg, OP_LOCAL);
966 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
968 return cfg->iconv_raw_var;
975 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
977 if (vreg >= cfg->vreg_is_ref_len) {
978 gboolean *tmp = cfg->vreg_is_ref;
979 int size = cfg->vreg_is_ref_len;
981 while (vreg >= cfg->vreg_is_ref_len)
982 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
983 cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
985 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
987 cfg->vreg_is_ref [vreg] = TRUE;
991 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
993 if (vreg >= cfg->vreg_is_mp_len) {
994 gboolean *tmp = cfg->vreg_is_mp;
995 int size = cfg->vreg_is_mp_len;
997 while (vreg >= cfg->vreg_is_mp_len)
998 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
999 cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
1001 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
1003 cfg->vreg_is_mp [vreg] = TRUE;
1007 type_from_stack_type (MonoInst *ins)
1009 switch (ins->type) {
1010 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1011 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1012 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1013 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1016 * this if used to be commented without any specific reason, but
1017 * it breaks #80235 when commented
1020 return &ins->klass->this_arg;
1022 return &mono_defaults.object_class->this_arg;
1024 /* ins->klass may not be set for ldnull.
1025 * Also, if we have a boxed valuetype, we want an object lass,
1026 * not the valuetype class
1028 if (ins->klass && !ins->klass->valuetype)
1029 return &ins->klass->byval_arg;
1030 return &mono_defaults.object_class->byval_arg;
1031 case STACK_VTYPE: return &ins->klass->byval_arg;
1033 g_error ("stack type %d to montype not handled\n", ins->type);
1039 mono_type_from_stack_type (MonoInst *ins)
1041 return type_from_stack_type (ins);
1045 * mono_add_ins_to_end:
1047 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
1050 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
1055 MONO_ADD_INS (bb, inst);
1059 switch (bb->last_ins->opcode) {
1073 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1076 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
1077 /* Need to insert the ins before the compare */
1078 if (bb->code == bb->last_ins) {
1079 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1083 if (bb->code->next == bb->last_ins) {
1084 /* Only two instructions */
1085 opcode = bb->code->opcode;
1087 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
1089 mono_bblock_insert_before_ins (bb, bb->code, inst);
1091 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1094 opcode = bb->last_ins->prev->opcode;
1096 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
1098 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
1100 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
1105 MONO_ADD_INS (bb, inst);
1111 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
1113 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
1114 MonoJumpInfoBBTable *table;
1116 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
1118 table->table_size = num_blocks;
1120 ji->ip.label = label;
1121 ji->type = MONO_PATCH_INFO_SWITCH;
1122 ji->data.table = table;
1123 ji->next = cfg->patch_info;
1124 cfg->patch_info = ji;
1127 static MonoMethodSignature *
1128 mono_get_array_new_va_signature (int arity)
1130 static GHashTable *sighash;
1131 MonoMethodSignature *res;
1136 sighash = g_hash_table_new (NULL, NULL);
1138 else if ((res = (MonoMethodSignature *)g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
1143 res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
1146 if (ARCH_VARARG_ICALLS)
1147 /* Only set this only some archs since not all backends can handle varargs+pinvoke */
1148 res->call_convention = MONO_CALL_VARARG;
1151 res->call_convention = MONO_CALL_C;
1154 res->params [0] = &mono_defaults.int_class->byval_arg;
1155 for (i = 0; i < arity; i++)
1156 res->params [i + 1] = &mono_defaults.int_class->byval_arg;
1158 res->ret = &mono_defaults.object_class->byval_arg;
1160 g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
1167 mono_get_array_new_va_icall (int rank)
1169 MonoMethodSignature *esig;
1170 char icall_name [256];
1172 MonoJitICallInfo *info;
1174 /* Need to register the icall so it gets an icall wrapper */
1175 sprintf (icall_name, "ves_array_new_va_%d", rank);
1178 info = mono_find_jit_icall_by_name (icall_name);
1180 esig = mono_get_array_new_va_signature (rank);
1181 name = g_strdup (icall_name);
1182 info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
1190 mini_class_is_system_array (MonoClass *klass)
1192 if (klass->parent == mono_defaults.array_class)
1199 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
1201 MonoAssembly *assembly = method->klass->image->assembly;
1202 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
1204 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
1206 return mono_assembly_has_skip_verification (assembly);
1210 * mini_method_verify:
1212 * Verify the method using the verfier.
1214 * Returns true if the method is invalid.
1217 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1220 gboolean is_fulltrust;
1222 if (method->verification_success)
1225 if (!mono_verifier_is_enabled_for_method (method))
1228 /*skip verification implies the assembly must be */
1229 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
1231 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
1233 if (mono_loader_get_last_error ()) {
1235 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1236 mono_error_set_from_loader_error (&cfg->error);
1238 mono_loader_clear_error ();
1241 mono_free_verify_list (res);
1246 for (tmp = res; tmp; tmp = tmp->next) {
1247 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
1248 if (info->info.status == MONO_VERIFY_ERROR) {
1250 char *method_name = mono_method_full_name (method, TRUE);
1251 cfg->exception_type = info->exception_type;
1252 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1253 g_free (method_name);
1255 mono_free_verify_list (res);
1258 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
1260 char *method_name = mono_method_full_name (method, TRUE);
1261 char *msg = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1263 if (info->exception_type == MONO_EXCEPTION_METHOD_ACCESS)
1264 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "%s", msg);
1265 else if (info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)
1266 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "%s", msg);
1267 else if (info->exception_type == MONO_EXCEPTION_UNVERIFIABLE_IL)
1268 mono_error_set_generic_error (&cfg->error, "System.Security", "VerificationException", msg);
1269 if (!mono_error_ok (&cfg->error)) {
1270 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1273 cfg->exception_type = info->exception_type;
1274 cfg->exception_message = msg;
1276 g_free (method_name);
1278 mono_free_verify_list (res);
1282 mono_free_verify_list (res);
1284 method->verification_success = 1;
1288 /*Returns true if something went wrong*/
1290 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1292 MonoMethod *method_definition = method;
1293 gboolean dont_verify = method->klass->image->assembly->corlib_internal;
1295 while (method_definition->is_inflated) {
1296 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1297 method_definition = imethod->declaring;
1300 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1304 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1306 if (!domain_jit_info (domain)->dynamic_code_hash)
1307 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1308 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1311 static MonoJitDynamicMethodInfo*
1312 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1314 MonoJitDynamicMethodInfo *res;
1316 if (domain_jit_info (domain)->dynamic_code_hash)
1317 res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1325 GList *active, *inactive;
1330 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1332 MonoMethodVar *v1 = (MonoMethodVar*)a;
1333 MonoMethodVar *v2 = (MonoMethodVar*)b;
1337 else if (v1->interval->range && v2->interval->range)
1338 return v1->interval->range->from - v2->interval->range->from;
1339 else if (v1->interval->range)
1346 #define LSCAN_DEBUG(a) do { a; } while (0)
1348 #define LSCAN_DEBUG(a)
1352 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1354 int i, slot, offset, size;
1359 GList *vars = NULL, *l, *unhandled;
1360 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1363 gboolean reuse_slot;
1365 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1367 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1368 vtype_stack_slots = NULL;
1371 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1372 for (i = 0; i < cfg->num_varinfo; ++i)
1375 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1376 inst = cfg->varinfo [i];
1377 vmv = MONO_VARINFO (cfg, i);
1379 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1382 vars = g_list_prepend (vars, vmv);
1385 vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
1390 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1391 MonoMethodVar *current = unhandled->data;
1393 if (current->interval->range) {
1394 g_assert (current->interval->range->from >= i);
1395 i = current->interval->range->from;
1402 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1403 MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
1406 inst = cfg->varinfo [vmv->idx];
1408 t = mono_type_get_underlying_type (inst->inst_vtype);
1409 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1412 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1413 * pinvoke wrappers when they call functions returning structures */
1414 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1415 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1420 size = mini_type_stack_size (t, &ialign);
1423 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1428 if (cfg->disable_reuse_stack_slots)
1431 t = mini_get_underlying_type (t);
1433 case MONO_TYPE_GENERICINST:
1434 if (!mono_type_generic_inst_is_valuetype (t)) {
1435 slot_info = &scalar_stack_slots [t->type];
1439 case MONO_TYPE_VALUETYPE:
1440 if (!vtype_stack_slots)
1441 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1442 for (i = 0; i < nvtypes; ++i)
1443 if (t->data.klass == vtype_stack_slots [i].vtype)
1446 slot_info = &vtype_stack_slots [i];
1448 g_assert (nvtypes < 256);
1449 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1450 slot_info = &vtype_stack_slots [nvtypes];
1453 if (cfg->disable_reuse_ref_stack_slots)
1460 #if SIZEOF_VOID_P == 4
1465 if (cfg->disable_ref_noref_stack_slot_share) {
1466 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1471 case MONO_TYPE_CLASS:
1472 case MONO_TYPE_OBJECT:
1473 case MONO_TYPE_ARRAY:
1474 case MONO_TYPE_SZARRAY:
1475 case MONO_TYPE_STRING:
1476 /* Share non-float stack slots of the same size */
1477 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1478 if (cfg->disable_reuse_ref_stack_slots)
1483 slot_info = &scalar_stack_slots [t->type];
1487 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1491 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1493 if (!current->interval->range) {
1494 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1498 inst->flags |= MONO_INST_IS_DEAD;
1503 pos = current->interval->range->from;
1505 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1506 if (current->interval->range)
1507 LSCAN_DEBUG (mono_linterval_print (current->interval));
1508 LSCAN_DEBUG (printf ("\n"));
1510 /* Check for intervals in active which expired or inactive */
1512 /* FIXME: Optimize this */
1515 for (l = slot_info->active; l != NULL; l = l->next) {
1516 MonoMethodVar *v = (MonoMethodVar*)l->data;
1518 if (v->interval->last_range->to < pos) {
1519 slot_info->active = g_list_delete_link (slot_info->active, l);
1520 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1521 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1525 else if (!mono_linterval_covers (v->interval, pos)) {
1526 slot_info->inactive = g_list_append (slot_info->inactive, v);
1527 slot_info->active = g_list_delete_link (slot_info->active, l);
1528 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1535 /* Check for intervals in inactive which expired or active */
1537 /* FIXME: Optimize this */
1540 for (l = slot_info->inactive; l != NULL; l = l->next) {
1541 MonoMethodVar *v = (MonoMethodVar*)l->data;
1543 if (v->interval->last_range->to < pos) {
1544 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1545 // FIXME: Enabling this seems to cause impossible to debug crashes
1546 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1547 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1551 else if (mono_linterval_covers (v->interval, pos)) {
1552 slot_info->active = g_list_append (slot_info->active, v);
1553 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1554 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1562 * This also handles the case when the variable is used in an
1563 * exception region, as liveness info is not computed there.
1566 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1569 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1570 if (slot_info->slots) {
1571 slot = GPOINTER_TO_INT (slot_info->slots->data);
1573 slot_info->slots = slot_info->slots->next;
1576 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1578 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1584 static int count = 0;
1587 if (count == atoi (g_getenv ("COUNT3")))
1588 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1589 if (count > atoi (g_getenv ("COUNT3")))
1592 mono_print_ins (inst);
1597 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1599 if (inst->flags & MONO_INST_LMF) {
1600 size = sizeof (MonoLMF);
1601 align = sizeof (mgreg_t);
1608 if (slot == 0xffffff) {
1610 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1611 * efficient copying (and to work around the fact that OP_MEMCPY
1612 * and OP_MEMSET ignores alignment).
1614 if (MONO_TYPE_ISSTRUCT (t)) {
1615 align = MAX (align, sizeof (gpointer));
1616 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1621 offset += align - 1;
1622 offset &= ~(align - 1);
1626 offset += align - 1;
1627 offset &= ~(align - 1);
1632 if (*stack_align == 0)
1633 *stack_align = align;
1636 offsets [vmv->idx] = slot;
1639 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1640 if (scalar_stack_slots [i].active)
1641 g_list_free (scalar_stack_slots [i].active);
1643 for (i = 0; i < nvtypes; ++i) {
1644 if (vtype_stack_slots [i].active)
1645 g_list_free (vtype_stack_slots [i].active);
1648 cfg->stat_locals_stack_size += offset;
1650 *stack_size = offset;
1655 * mono_allocate_stack_slots:
1657 * Allocate stack slots for all non register allocated variables using a
1658 * linear scan algorithm.
1659 * Returns: an array of stack offsets.
1660 * STACK_SIZE is set to the amount of stack space needed.
1661 * STACK_ALIGN is set to the alignment needed by the locals area.
1664 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1666 int i, slot, offset, size;
1671 GList *vars = NULL, *l;
1672 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1675 gboolean reuse_slot;
1677 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1678 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1680 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1681 vtype_stack_slots = NULL;
1684 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1685 for (i = 0; i < cfg->num_varinfo; ++i)
1688 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1689 inst = cfg->varinfo [i];
1690 vmv = MONO_VARINFO (cfg, i);
1692 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1695 vars = g_list_prepend (vars, vmv);
1698 vars = mono_varlist_sort (cfg, vars, 0);
1700 *stack_align = sizeof(mgreg_t);
1701 for (l = vars; l; l = l->next) {
1702 vmv = (MonoMethodVar *)l->data;
1703 inst = cfg->varinfo [vmv->idx];
1705 t = mono_type_get_underlying_type (inst->inst_vtype);
1706 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1709 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1710 * pinvoke wrappers when they call functions returning structures */
1711 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1712 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1716 size = mini_type_stack_size (t, &ialign);
1719 if (mono_class_has_failure (mono_class_from_mono_type (t)))
1720 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1722 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1727 if (cfg->disable_reuse_stack_slots)
1730 t = mini_get_underlying_type (t);
1732 case MONO_TYPE_GENERICINST:
1733 if (!mono_type_generic_inst_is_valuetype (t)) {
1734 slot_info = &scalar_stack_slots [t->type];
1738 case MONO_TYPE_VALUETYPE:
1739 if (!vtype_stack_slots)
1740 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1741 for (i = 0; i < nvtypes; ++i)
1742 if (t->data.klass == vtype_stack_slots [i].vtype)
1745 slot_info = &vtype_stack_slots [i];
1747 g_assert (nvtypes < 256);
1748 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1749 slot_info = &vtype_stack_slots [nvtypes];
1752 if (cfg->disable_reuse_ref_stack_slots)
1759 #if SIZEOF_VOID_P == 4
1764 if (cfg->disable_ref_noref_stack_slot_share) {
1765 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1770 case MONO_TYPE_CLASS:
1771 case MONO_TYPE_OBJECT:
1772 case MONO_TYPE_ARRAY:
1773 case MONO_TYPE_SZARRAY:
1774 case MONO_TYPE_STRING:
1775 /* Share non-float stack slots of the same size */
1776 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1777 if (cfg->disable_reuse_ref_stack_slots)
1781 case MONO_TYPE_MVAR:
1782 slot_info = &scalar_stack_slots [t->type];
1785 slot_info = &scalar_stack_slots [t->type];
1790 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1791 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1793 /* expire old intervals in active */
1794 while (slot_info->active) {
1795 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1797 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1800 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1802 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1803 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1807 * This also handles the case when the variable is used in an
1808 * exception region, as liveness info is not computed there.
1811 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1814 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1815 if (slot_info->slots) {
1816 slot = GPOINTER_TO_INT (slot_info->slots->data);
1818 slot_info->slots = slot_info->slots->next;
1821 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1826 static int count = 0;
1830 if (count == atoi (g_getenv ("COUNT")))
1831 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1832 if (count > atoi (g_getenv ("COUNT")))
1835 mono_print_ins (inst);
1840 if (inst->flags & MONO_INST_LMF) {
1842 * This variable represents a MonoLMF structure, which has no corresponding
1843 * CLR type, so hard-code its size/alignment.
1845 size = sizeof (MonoLMF);
1846 align = sizeof (mgreg_t);
1853 if (slot == 0xffffff) {
1855 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1856 * efficient copying (and to work around the fact that OP_MEMCPY
1857 * and OP_MEMSET ignores alignment).
1859 if (MONO_TYPE_ISSTRUCT (t)) {
1860 align = MAX (align, sizeof (gpointer));
1861 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1863 * Align the size too so the code generated for passing vtypes in
1864 * registers doesn't overwrite random locals.
1866 size = (size + (align - 1)) & ~(align -1);
1871 offset += align - 1;
1872 offset &= ~(align - 1);
1876 offset += align - 1;
1877 offset &= ~(align - 1);
1882 *stack_align = MAX (*stack_align, align);
1885 offsets [vmv->idx] = slot;
1888 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1889 if (scalar_stack_slots [i].active)
1890 g_list_free (scalar_stack_slots [i].active);
1892 for (i = 0; i < nvtypes; ++i) {
1893 if (vtype_stack_slots [i].active)
1894 g_list_free (vtype_stack_slots [i].active);
1897 cfg->stat_locals_stack_size += offset;
1899 *stack_size = offset;
1903 #define EMUL_HIT_SHIFT 3
1904 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1905 /* small hit bitmap cache */
1906 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1907 static short emul_opcode_num = 0;
1908 static short emul_opcode_alloced = 0;
1909 static short *emul_opcode_opcodes;
1910 static MonoJitICallInfo **emul_opcode_map;
1913 mono_find_jit_opcode_emulation (int opcode)
1915 g_assert (opcode >= 0 && opcode <= OP_LAST);
1916 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1918 for (i = 0; i < emul_opcode_num; ++i) {
1919 if (emul_opcode_opcodes [i] == opcode)
1920 return emul_opcode_map [i];
1927 mini_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_throw)
1929 MonoJitICallInfo *info;
1930 MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
1932 g_assert (!sig->hasthis);
1933 g_assert (sig->param_count < 3);
1935 /* Opcode emulation functions are assumed to don't call mono_raise_exception () */
1936 info = mono_register_jit_icall_full (func, name, sig, no_throw, TRUE, symbol);
1938 if (emul_opcode_num >= emul_opcode_alloced) {
1939 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1940 emul_opcode_alloced += incr;
1941 emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1942 emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1944 emul_opcode_map [emul_opcode_num] = info;
1945 emul_opcode_opcodes [emul_opcode_num] = opcode;
1947 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1951 print_dfn (MonoCompile *cfg)
1959 char *method_name = mono_method_full_name (cfg->method, TRUE);
1960 g_print ("IR code for method %s\n", method_name);
1961 g_free (method_name);
1964 for (i = 0; i < cfg->num_bblocks; ++i) {
1965 bb = cfg->bblocks [i];
1966 /*if (bb->cil_code) {
1967 char* code1, *code2;
1968 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1969 if (bb->last_ins->cil_code)
1970 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1972 code2 = g_strdup ("");
1974 code1 [strlen (code1) - 1] = 0;
1975 code = g_strdup_printf ("%s -> %s", code1, code2);
1979 code = g_strdup ("\n");
1980 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1981 MONO_BB_FOR_EACH_INS (bb, c) {
1982 mono_print_ins_index (-1, c);
1985 g_print ("\tprev:");
1986 for (j = 0; j < bb->in_count; ++j) {
1987 g_print (" BB%d", bb->in_bb [j]->block_num);
1989 g_print ("\t\tsucc:");
1990 for (j = 0; j < bb->out_count; ++j) {
1991 g_print (" BB%d", bb->out_bb [j]->block_num);
1993 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1996 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1999 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
2001 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
2009 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
2011 MONO_ADD_INS (bb, inst);
2015 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
2019 bb->code = ins_to_insert;
2021 /* Link with next */
2022 ins_to_insert->next = ins;
2024 ins->prev = ins_to_insert;
2026 if (bb->last_ins == NULL)
2027 bb->last_ins = ins_to_insert;
2029 /* Link with next */
2030 ins_to_insert->next = ins->next;
2032 ins->next->prev = ins_to_insert;
2034 /* Link with previous */
2035 ins->next = ins_to_insert;
2036 ins_to_insert->prev = ins;
2038 if (bb->last_ins == ins)
2039 bb->last_ins = ins_to_insert;
2044 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
2049 ins->prev = ins_to_insert;
2050 bb->code = ins_to_insert;
2051 ins_to_insert->next = ins;
2052 if (bb->last_ins == NULL)
2053 bb->last_ins = ins_to_insert;
2055 /* Link with previous */
2057 ins->prev->next = ins_to_insert;
2058 ins_to_insert->prev = ins->prev;
2060 /* Link with next */
2061 ins->prev = ins_to_insert;
2062 ins_to_insert->next = ins;
2064 if (bb->code == ins)
2065 bb->code = ins_to_insert;
2070 * mono_verify_bblock:
2072 * Verify that the next and prev pointers are consistent inside the instructions in BB.
2075 mono_verify_bblock (MonoBasicBlock *bb)
2077 MonoInst *ins, *prev;
2080 for (ins = bb->code; ins; ins = ins->next) {
2081 g_assert (ins->prev == prev);
2085 g_assert (!bb->last_ins->next);
2091 * Perform consistency checks on the JIT data structures and the IR
2094 mono_verify_cfg (MonoCompile *cfg)
2098 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2099 mono_verify_bblock (bb);
2103 mono_destroy_compile (MonoCompile *cfg)
2108 mono_metadata_free_mh (cfg->header);
2109 //mono_mempool_stats (cfg->mempool);
2110 mono_free_loop_info (cfg);
2112 mono_regstate_free (cfg->rs);
2114 g_hash_table_destroy (cfg->spvars);
2116 g_hash_table_destroy (cfg->exvars);
2117 for (l = cfg->headers_to_free; l; l = l->next)
2118 mono_metadata_free_mh ((MonoMethodHeader *)l->data);
2119 g_list_free (cfg->ldstr_list);
2120 g_hash_table_destroy (cfg->token_info_hash);
2121 if (cfg->abs_patches)
2122 g_hash_table_destroy (cfg->abs_patches);
2123 mono_mempool_destroy (cfg->mempool);
2125 mono_debug_free_method (cfg);
2127 g_free (cfg->varinfo);
2129 g_free (cfg->exception_message);
2134 mono_create_tls_get_offset (MonoCompile *cfg, int offset)
2138 if (!cfg->backend->have_tls_get)
2144 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
2145 ins->dreg = mono_alloc_preg (cfg);
2146 ins->inst_offset = offset;
2151 mini_tls_get_supported (MonoCompile *cfg, MonoTlsKey key)
2153 if (!cfg->backend->have_tls_get)
2156 if (cfg->compile_aot)
2157 return cfg->backend->have_tls_get_reg;
2159 return mini_get_tls_offset (key) != -1;
2163 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
2165 if (!cfg->backend->have_tls_get)
2169 * TLS offsets might be different at AOT time, so load them from a GOT slot and
2170 * use a different opcode.
2172 if (cfg->compile_aot) {
2173 if (cfg->backend->have_tls_get_reg) {
2176 EMIT_NEW_TLS_OFFSETCONST (cfg, c, key);
2177 MONO_INST_NEW (cfg, ins, OP_TLS_GET_REG);
2178 ins->dreg = mono_alloc_preg (cfg);
2179 ins->sreg1 = c->dreg;
2186 return mono_create_tls_get_offset (cfg, mini_get_tls_offset (key));
2190 mono_get_jit_tls_intrinsic (MonoCompile *cfg)
2192 return mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2196 mono_get_domain_intrinsic (MonoCompile* cfg)
2198 return mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
2202 mono_get_thread_intrinsic (MonoCompile* cfg)
2204 return mono_create_tls_get (cfg, TLS_KEY_THREAD);
2208 mono_get_lmf_intrinsic (MonoCompile* cfg)
2210 return mono_create_tls_get (cfg, TLS_KEY_LMF);
2214 mono_get_lmf_addr_intrinsic (MonoCompile* cfg)
2216 return mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
2220 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
2222 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
2226 ji->data.target = target;
2227 ji->next = cfg->patch_info;
2229 cfg->patch_info = ji;
2233 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
2235 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
2239 ji->relocation = relocation;
2240 ji->data.target = target;
2241 ji->next = cfg->patch_info;
2243 cfg->patch_info = ji;
2247 mono_remove_patch_info (MonoCompile *cfg, int ip)
2249 MonoJumpInfo **ji = &cfg->patch_info;
2252 if ((*ji)->ip.i == ip)
2255 ji = &((*ji)->next);
2260 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
2262 ins->inst_offset = native_offset;
2263 g_ptr_array_add (cfg->seq_points, ins);
2265 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
2266 bb->last_seq_point = ins;
2271 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
2273 MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
2276 g_assert (offset == 0);
2278 entry->is_reg = is_reg;
2280 entry->offset = offset;
2284 if (var == cfg->args [0])
2285 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
2286 else if (var == cfg->rgctx_var)
2287 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
2291 mono_compile_create_vars (MonoCompile *cfg)
2293 MonoMethodSignature *sig;
2294 MonoMethodHeader *header;
2297 header = cfg->header;
2299 sig = mono_method_signature (cfg->method);
2301 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2302 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2303 /* Inhibit optimizations */
2304 cfg->ret->flags |= MONO_INST_VOLATILE;
2306 if (cfg->verbose_level > 2)
2307 g_print ("creating vars\n");
2309 cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2312 cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG);
2314 for (i = 0; i < sig->param_count; ++i) {
2315 cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2318 if (cfg->verbose_level > 2) {
2320 printf ("\treturn : ");
2321 mono_print_ins (cfg->ret);
2325 printf ("\tthis: ");
2326 mono_print_ins (cfg->args [0]);
2329 for (i = 0; i < sig->param_count; ++i) {
2330 printf ("\targ [%d]: ", i);
2331 mono_print_ins (cfg->args [i + sig->hasthis]);
2335 cfg->locals_start = cfg->num_varinfo;
2336 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2338 if (cfg->verbose_level > 2)
2339 g_print ("creating locals\n");
2341 for (i = 0; i < header->num_locals; ++i)
2342 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2344 if (cfg->verbose_level > 2)
2345 g_print ("locals done\n");
2348 if (COMPILE_LLVM (cfg))
2349 mono_llvm_create_vars (cfg);
2351 mono_arch_create_vars (cfg);
2353 mono_arch_create_vars (cfg);
2356 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2357 MonoInst *lmf_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2358 lmf_var->flags |= MONO_INST_VOLATILE;
2359 lmf_var->flags |= MONO_INST_LMF;
2360 cfg->lmf_var = lmf_var;
2365 mono_print_code (MonoCompile *cfg, const char* msg)
2369 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2370 mono_print_bb (bb, msg);
2374 mono_postprocess_patches (MonoCompile *cfg)
2376 MonoJumpInfo *patch_info;
2379 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2380 switch (patch_info->type) {
2381 case MONO_PATCH_INFO_ABS: {
2382 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
2385 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2389 //printf ("TEST %s %p\n", info->name, patch_info->data.target);
2390 /* for these array methods we currently register the same function pointer
2391 * since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
2392 * will return the incorrect one depending on the order they are registered.
2393 * See tests/test-arr.cs
2395 if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
2396 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2397 patch_info->data.name = info->name;
2401 if (patch_info->type == MONO_PATCH_INFO_ABS) {
2402 if (cfg->abs_patches) {
2403 MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2405 patch_info->type = abs_ji->type;
2406 patch_info->data.target = abs_ji->data.target;
2413 case MONO_PATCH_INFO_SWITCH: {
2415 #if defined(__native_client__) && defined(__native_client_codegen__)
2416 /* This memory will leak. */
2417 /* TODO: can we free this when */
2418 /* making the final jump table? */
2419 table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
2421 if (cfg->method->dynamic) {
2422 table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2424 table = (void **)mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
2428 for (i = 0; i < patch_info->data.table->table_size; i++) {
2429 /* Might be NULL if the switch is eliminated */
2430 if (patch_info->data.table->table [i]) {
2431 g_assert (patch_info->data.table->table [i]->native_offset);
2432 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2437 patch_info->data.table->table = (MonoBasicBlock**)table;
2440 case MONO_PATCH_INFO_METHOD_JUMP: {
2441 MonoJumpList *jlist;
2442 MonoDomain *domain = cfg->domain;
2443 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2444 #if defined(__native_client__) && defined(__native_client_codegen__)
2445 /* When this jump target gets evaluated, the method */
2446 /* will be installed in the dynamic code section, */
2447 /* not at the location of cfg->native_code. */
2448 ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
2451 mono_domain_lock (domain);
2452 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
2454 jlist = (MonoJumpList *)mono_domain_alloc0 (domain, sizeof (MonoJumpList));
2455 g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist);
2457 jlist->list = g_slist_prepend (jlist->list, ip);
2458 mono_domain_unlock (domain);
2469 mono_codegen (MonoCompile *cfg)
2472 int max_epilog_size;
2474 MonoDomain *code_domain;
2475 guint unwindlen = 0;
2477 if (mono_using_xdebug)
2479 * Recent gdb versions have trouble processing symbol files containing
2480 * overlapping address ranges, so allocate all code from the code manager
2481 * of the root domain. (#666152).
2483 code_domain = mono_get_root_domain ();
2485 code_domain = cfg->domain;
2487 #if defined(__native_client_codegen__) && defined(__native_client__)
2490 /* This keeps patch targets from being transformed during
2491 * ordinary method compilation, for local branches and jumps.
2493 nacl_allow_target_modification (FALSE);
2496 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2497 cfg->spill_count = 0;
2498 /* we reuse dfn here */
2499 /* bb->dfn = bb_count++; */
2501 mono_arch_lowering_pass (cfg, bb);
2503 if (cfg->opt & MONO_OPT_PEEPHOLE)
2504 mono_arch_peephole_pass_1 (cfg, bb);
2506 mono_local_regalloc (cfg, bb);
2508 if (cfg->opt & MONO_OPT_PEEPHOLE)
2509 mono_arch_peephole_pass_2 (cfg, bb);
2511 if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
2512 mono_bb_deduplicate_op_il_seq_points (cfg, bb);
2515 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
2516 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, cfg->num_bblocks);
2518 code = mono_arch_emit_prolog (cfg);
2520 cfg->code_len = code - cfg->native_code;
2521 cfg->prolog_end = cfg->code_len;
2522 cfg->cfa_reg = cfg->cur_cfa_reg;
2523 cfg->cfa_offset = cfg->cur_cfa_offset;
2525 mono_debug_open_method (cfg);
2527 /* emit code all basic blocks */
2528 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2529 bb->native_offset = cfg->code_len;
2530 bb->real_native_offset = cfg->code_len;
2531 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2532 mono_arch_output_basic_block (cfg, bb);
2533 bb->native_length = cfg->code_len - bb->native_offset;
2535 if (bb == cfg->bb_exit) {
2536 cfg->epilog_begin = cfg->code_len;
2537 mono_arch_emit_epilog (cfg);
2538 cfg->epilog_end = cfg->code_len;
2542 #ifdef __native_client_codegen__
2543 mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
2545 mono_arch_emit_exceptions (cfg);
2547 max_epilog_size = 0;
2549 /* we always allocate code in cfg->domain->code_mp to increase locality */
2550 cfg->code_size = cfg->code_len + max_epilog_size;
2551 #ifdef __native_client_codegen__
2552 cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
2554 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2556 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2557 unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
2560 if (cfg->method->dynamic) {
2561 /* Allocate the code into a separate memory pool so it can be freed */
2562 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2563 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2564 mono_domain_lock (cfg->domain);
2565 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2566 mono_domain_unlock (cfg->domain);
2568 if (mono_using_xdebug)
2569 /* See the comment for cfg->code_domain */
2570 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2572 code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
2574 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2576 #if defined(__native_client_codegen__) && defined(__native_client__)
2577 nacl_allow_target_modification (TRUE);
2579 if (cfg->thunk_area) {
2580 cfg->thunks_offset = cfg->code_size + unwindlen;
2581 cfg->thunks = code + cfg->thunks_offset;
2582 memset (cfg->thunks, 0, cfg->thunk_area);
2586 memcpy (code, cfg->native_code, cfg->code_len);
2587 #if defined(__default_codegen__)
2588 g_free (cfg->native_code);
2589 #elif defined(__native_client_codegen__)
2590 if (cfg->native_code_alloc) {
2591 g_free (cfg->native_code_alloc);
2592 cfg->native_code_alloc = 0;
2594 else if (cfg->native_code) {
2595 g_free (cfg->native_code);
2597 #endif /* __native_client_codegen__ */
2598 cfg->native_code = code;
2599 code = cfg->native_code + cfg->code_len;
2601 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2602 mono_postprocess_patches (cfg);
2604 #ifdef VALGRIND_JIT_REGISTER_MAP
2605 if (valgrind_register){
2606 char* nm = mono_method_full_name (cfg->method, TRUE);
2607 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2612 if (cfg->verbose_level > 0) {
2613 char* nm = mono_method_get_full_name (cfg->method);
2614 char *opt_descr = mono_opt_descr (cfg->opt);
2615 g_print ("Method %s emitted at %p to %p (code length %d) [%s] with opts %s\n",
2617 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name,
2624 gboolean is_generic = FALSE;
2626 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2627 cfg->method->klass->generic_container || cfg->method->klass->generic_class) {
2632 g_assert (is_generic);
2635 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2636 mono_arch_save_unwind_info (cfg);
2639 #if defined(__native_client_codegen__) && defined(__native_client__)
2640 if (!cfg->compile_aot) {
2641 if (cfg->method->dynamic) {
2642 code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
2644 code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
2649 #if defined(__native_client_codegen__)
2650 mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
2653 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2658 for (ji = cfg->patch_info; ji; ji = ji->next) {
2659 if (cfg->compile_aot) {
2661 case MONO_PATCH_INFO_BB:
2662 case MONO_PATCH_INFO_LABEL:
2665 /* No need to patch these */
2670 if (ji->type == MONO_PATCH_INFO_NONE)
2673 target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors, &cfg->error);
2674 if (!mono_error_ok (&cfg->error)) {
2675 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2678 mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
2682 mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
2685 if (cfg->method->dynamic) {
2686 if (mono_using_xdebug)
2687 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2689 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2691 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2693 #if defined(__native_client_codegen__) && defined(__native_client__)
2694 cfg->native_code = code_dest;
2696 mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
2698 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2700 mono_debug_close_method (cfg);
2702 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2703 mono_arch_unwindinfo_install_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2708 compute_reachable (MonoBasicBlock *bb)
2712 if (!(bb->flags & BB_VISITED)) {
2713 bb->flags |= BB_VISITED;
2714 for (i = 0; i < bb->out_count; ++i)
2715 compute_reachable (bb->out_bb [i]);
2719 static void mono_bb_ordering (MonoCompile *cfg)
2722 /* Depth-first ordering on basic blocks */
2723 cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
2725 cfg->max_block_num = cfg->num_bblocks;
2727 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
2728 if (cfg->num_bblocks != dfn + 1) {
2731 cfg->num_bblocks = dfn + 1;
2733 /* remove unreachable code, because the code in them may be
2734 * inconsistent (access to dead variables for example) */
2735 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2736 bb->flags &= ~BB_VISITED;
2737 compute_reachable (cfg->bb_entry);
2738 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2739 if (bb->flags & BB_EXCEPTION_HANDLER)
2740 compute_reachable (bb);
2741 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2742 if (!(bb->flags & BB_VISITED)) {
2743 if (cfg->verbose_level > 1)
2744 g_print ("found unreachable code in BB%d\n", bb->block_num);
2745 bb->code = bb->last_ins = NULL;
2746 while (bb->out_count)
2747 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
2750 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2751 bb->flags &= ~BB_VISITED;
2756 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2759 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2760 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2762 MONO_INST_NEW (cfg, ins, OP_BR);
2763 MONO_ADD_INS (bb, ins);
2764 ins->inst_target_bb = bb->next_bb;
2770 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2773 MonoMethodHeader *header;
2775 MonoJitInfoFlags flags = JIT_INFO_NONE;
2776 int num_clauses, num_holes = 0;
2777 guint32 stack_size = 0;
2779 g_assert (method_to_compile == cfg->method);
2780 header = cfg->header;
2783 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_GENERIC_JIT_INFO);
2785 if (cfg->arch_eh_jit_info) {
2786 MonoJitArgumentInfo *arg_info;
2787 MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register);
2790 * This cannot be computed during stack walking, as
2791 * mono_arch_get_argument_info () is not signal safe.
2793 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2794 stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
2797 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2800 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2801 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2803 if (cfg->thunk_area)
2804 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_THUNK_INFO);
2806 if (cfg->try_block_holes) {
2807 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2808 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2809 MonoExceptionClause *ec = hole->clause;
2810 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2811 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2812 g_assert (clause_last_bb);
2814 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2815 if (clause_last_bb->native_offset != hole_end)
2819 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_TRY_BLOCK_HOLES);
2820 if (G_UNLIKELY (cfg->verbose_level >= 4))
2821 printf ("Number of try block holes %d\n", num_holes);
2824 if (COMPILE_LLVM (cfg))
2825 num_clauses = cfg->llvm_ex_info_len;
2827 num_clauses = header->num_clauses;
2829 if (cfg->method->dynamic)
2830 jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2832 jinfo = (MonoJitInfo *)mono_domain_alloc0 (cfg->domain, mono_jit_info_size (flags, num_clauses, num_holes));
2833 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2834 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2836 if (COMPILE_LLVM (cfg))
2837 jinfo->from_llvm = TRUE;
2841 MonoGenericJitInfo *gi;
2842 GSList *loclist = NULL;
2844 gi = mono_jit_info_get_generic_jit_info (jinfo);
2847 if (cfg->method->dynamic)
2848 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2850 gi->generic_sharing_context = (MonoGenericSharingContext *)mono_domain_alloc0 (cfg->domain, sizeof (MonoGenericSharingContext));
2851 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2853 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2854 mini_method_get_context (method_to_compile)->method_inst ||
2855 method_to_compile->klass->valuetype) {
2856 g_assert (cfg->rgctx_var);
2861 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2862 mini_method_get_context (method_to_compile)->method_inst ||
2863 method_to_compile->klass->valuetype) {
2864 inst = cfg->rgctx_var;
2865 if (!COMPILE_LLVM (cfg))
2866 g_assert (inst->opcode == OP_REGOFFSET);
2867 loclist = cfg->rgctx_loclist;
2869 inst = cfg->args [0];
2870 loclist = cfg->this_loclist;
2874 /* Needed to handle async exceptions */
2878 gi->nlocs = g_slist_length (loclist);
2879 if (cfg->method->dynamic)
2880 gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2882 gi->locations = (MonoDwarfLocListEntry *)mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2884 for (l = loclist; l; l = l->next) {
2885 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2890 if (COMPILE_LLVM (cfg)) {
2891 g_assert (cfg->llvm_this_reg != -1);
2892 gi->this_in_reg = 0;
2893 gi->this_reg = cfg->llvm_this_reg;
2894 gi->this_offset = cfg->llvm_this_offset;
2895 } else if (inst->opcode == OP_REGVAR) {
2896 gi->this_in_reg = 1;
2897 gi->this_reg = inst->dreg;
2899 g_assert (inst->opcode == OP_REGOFFSET);
2901 g_assert (inst->inst_basereg == X86_EBP);
2902 #elif defined(TARGET_AMD64)
2903 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2905 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2907 gi->this_in_reg = 0;
2908 gi->this_reg = inst->inst_basereg;
2909 gi->this_offset = inst->inst_offset;
2914 MonoTryBlockHoleTableJitInfo *table;
2917 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2918 table->num_holes = (guint16)num_holes;
2920 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2921 guint32 start_bb_offset;
2922 MonoTryBlockHoleJitInfo *hole;
2923 TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
2924 MonoExceptionClause *ec = hole_data->clause;
2925 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2926 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2927 g_assert (clause_last_bb);
2929 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2930 if (clause_last_bb->native_offset == hole_end)
2933 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2934 hole = &table->holes [i++];
2935 hole->clause = hole_data->clause - &header->clauses [0];
2936 hole->offset = (guint32)hole_data->start_offset;
2937 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2939 if (G_UNLIKELY (cfg->verbose_level >= 4))
2940 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2942 g_assert (i == num_holes);
2945 if (jinfo->has_arch_eh_info) {
2946 MonoArchEHJitInfo *info;
2948 info = mono_jit_info_get_arch_eh_info (jinfo);
2950 info->stack_size = stack_size;
2953 if (cfg->thunk_area) {
2954 MonoThunkJitInfo *info;
2956 info = mono_jit_info_get_thunk_info (jinfo);
2957 info->thunks_offset = cfg->thunks_offset;
2958 info->thunks_size = cfg->thunk_area;
2961 if (COMPILE_LLVM (cfg)) {
2963 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2964 } else if (header->num_clauses) {
2967 for (i = 0; i < header->num_clauses; i++) {
2968 MonoExceptionClause *ec = &header->clauses [i];
2969 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2970 MonoBasicBlock *tblock;
2971 MonoInst *exvar, *spvar;
2973 ei->flags = ec->flags;
2975 if (G_UNLIKELY (cfg->verbose_level >= 4))
2976 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
2979 * The spvars are needed by mono_arch_install_handler_block_guard ().
2981 if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2984 region = ((i + 1) << 8) | MONO_REGION_FINALLY | ec->flags;
2985 spvar = mono_find_spvar_for_region (cfg, region);
2987 ei->exvar_offset = spvar->inst_offset;
2989 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2990 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2993 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2994 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2996 ei->data.filter = cfg->native_code + tblock->native_offset;
2998 ei->data.catch_class = ec->data.catch_class;
3001 tblock = cfg->cil_offset_to_bb [ec->try_offset];
3003 g_assert (tblock->native_offset);
3004 ei->try_start = cfg->native_code + tblock->native_offset;
3005 if (tblock->extend_try_block) {
3007 * Extend the try block backwards to include parts of the previous call
3010 ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
3012 if (ec->try_offset + ec->try_len < header->code_size)
3013 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
3015 tblock = cfg->bb_exit;
3016 if (G_UNLIKELY (cfg->verbose_level >= 4))
3017 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
3019 if (!tblock->native_offset) {
3021 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
3022 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
3023 if (bb && bb->native_offset) {
3029 ei->try_end = cfg->native_code + tblock->native_offset;
3030 g_assert (tblock->native_offset);
3031 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
3033 ei->handler_start = cfg->native_code + tblock->native_offset;
3035 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
3036 TryBlockHole *hole = (TryBlockHole *)tmp->data;
3037 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
3038 if (hole->clause == ec && hole_end == ei->try_end) {
3039 if (G_UNLIKELY (cfg->verbose_level >= 4))
3040 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
3042 ei->try_end = cfg->native_code + hole->start_offset;
3047 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
3049 if (ec->handler_offset + ec->handler_len < header->code_size) {
3050 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
3051 if (tblock->native_offset) {
3052 end_offset = tblock->native_offset;
3056 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
3057 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
3058 if (bb && bb->native_offset) {
3063 end_offset = tblock->native_offset + tblock->native_length;
3066 end_offset = cfg->epilog_begin;
3068 ei->data.handler_end = cfg->native_code + end_offset;
3073 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
3075 for (i = 0; i < jinfo->num_clauses; i++) {
3076 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
3077 int start = (guint8*)ei->try_start - cfg->native_code;
3078 int end = (guint8*)ei->try_end - cfg->native_code;
3079 int handler = (guint8*)ei->handler_start - cfg->native_code;
3080 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
3082 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
3086 if (cfg->encoded_unwind_ops) {
3087 /* Generated by LLVM */
3088 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
3089 g_free (cfg->encoded_unwind_ops);
3090 } else if (cfg->unwind_ops) {
3092 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
3093 guint32 unwind_desc;
3095 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
3097 if (cfg->has_unwind_info_for_epilog) {
3098 MonoArchEHJitInfo *info;
3100 info = mono_jit_info_get_arch_eh_info (jinfo);
3102 info->epilog_size = cfg->code_len - cfg->epilog_begin;
3104 jinfo->unwind_info = unwind_desc;
3105 g_free (unwind_info);
3107 jinfo->unwind_info = cfg->used_int_regs;
3113 /* Return whenever METHOD is a gsharedvt method */
3115 is_gsharedvt_method (MonoMethod *method)
3117 MonoGenericContext *context;
3118 MonoGenericInst *inst;
3121 if (!method->is_inflated)
3123 context = mono_method_get_context (method);
3124 inst = context->class_inst;
3126 for (i = 0; i < inst->type_argc; ++i)
3127 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
3130 inst = context->method_inst;
3132 for (i = 0; i < inst->type_argc; ++i)
3133 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
3140 is_open_method (MonoMethod *method)
3142 MonoGenericContext *context;
3144 if (!method->is_inflated)
3146 context = mono_method_get_context (method);
3147 if (context->class_inst && context->class_inst->is_open)
3149 if (context->method_inst && context->method_inst->is_open)
3155 mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
3157 MonoInst *poll_addr, *ins;
3158 if (cfg->verbose_level > 1)
3159 printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
3161 #if defined(__native_client_codegen__)
3162 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&__nacl_thread_suspension_needed);
3164 g_assert (mono_threads_is_coop_enabled ());
3165 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
3168 MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
3169 ins->sreg1 = poll_addr->dreg;
3171 if (bblock->flags & BB_EXCEPTION_HANDLER) {
3172 MonoInst *eh_op = bblock->code;
3174 if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
3177 MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
3178 // skip all EH relateds ops
3179 while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
3181 next_eh_op = eh_op->next;
3185 mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
3186 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
3187 } else if (bblock == cfg->bb_entry) {
3188 mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
3189 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
3192 mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
3193 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
3198 This code inserts safepoints into managed code at important code paths.
3201 -the first basic block
3202 -landing BB for exception handlers
3207 mono_insert_safepoints (MonoCompile *cfg)
3211 #if !defined(__native_client_codegen__)
3212 if (!mono_threads_is_coop_enabled ())
3216 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
3217 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
3218 #if defined(__native_client__) || defined(__native_client_codegen__)
3219 gpointer poll_func = &mono_nacl_gc;
3221 g_assert (mono_threads_is_coop_enabled ());
3222 gpointer poll_func = &mono_threads_state_poll;
3225 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
3226 if (cfg->verbose_level > 1)
3227 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
3232 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
3233 if (cfg->verbose_level > 1)
3234 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
3238 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
3239 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
3241 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
3242 (info->d.icall.func == mono_thread_interruption_checkpoint ||
3243 info->d.icall.func == mono_threads_finish_blocking ||
3244 info->d.icall.func == mono_threads_reset_blocking_start)) {
3245 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
3246 if (cfg->verbose_level > 1)
3247 printf ("SKIPPING SAFEPOINTS for wrapper %s\n", cfg->method->name);
3252 if (cfg->verbose_level > 1)
3253 printf ("INSERTING SAFEPOINTS\n");
3254 if (cfg->verbose_level > 2)
3255 mono_print_code (cfg, "BEFORE SAFEPOINTS");
3257 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3258 if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
3259 mono_create_gc_safepoint (cfg, bb);
3262 if (cfg->verbose_level > 2)
3263 mono_print_code (cfg, "AFTER SAFEPOINTS");
3269 mono_insert_branches_between_bblocks (MonoCompile *cfg)
3273 /* Add branches between non-consecutive bblocks */
3274 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3275 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
3276 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
3277 /* we are careful when inverting, since bugs like #59580
3278 * could show up when dealing with NaNs.
3280 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
3281 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
3282 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
3283 bb->last_ins->inst_false_bb = tmp;
3285 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
3287 MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
3288 inst->opcode = OP_BR;
3289 inst->inst_target_bb = bb->last_ins->inst_false_bb;
3290 mono_bblock_add_inst (bb, inst);
3295 if (cfg->verbose_level >= 4) {
3296 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3297 MonoInst *tree = bb->code;
3298 g_print ("DUMP BLOCK %d:\n", bb->block_num);
3301 for (; tree; tree = tree->next) {
3302 mono_print_ins_index (-1, tree);
3308 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3309 bb->max_vreg = cfg->next_vreg;
3314 init_backend (MonoBackend *backend)
3316 #ifdef MONO_ARCH_NEED_GOT_VAR
3317 backend->need_got_var = 1;
3319 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3320 backend->have_card_table_wb = 1;
3322 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3323 backend->have_op_generic_class_init = 1;
3325 #ifdef MONO_ARCH_EMULATE_MUL_DIV
3326 backend->emulate_mul_div = 1;
3328 #ifdef MONO_ARCH_EMULATE_DIV
3329 backend->emulate_div = 1;
3331 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3332 backend->emulate_long_shift_opts = 1;
3334 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3335 backend->have_objc_get_selector = 1;
3337 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
3338 backend->have_generalized_imt_thunk = 1;
3340 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3341 backend->gshared_supported = 1;
3343 if (MONO_ARCH_HAVE_TLS_GET)
3344 backend->have_tls_get = 1;
3345 #ifdef MONO_ARCH_HAVE_TLS_GET_REG
3346 backend->have_tls_get_reg = 1;
3348 if (MONO_ARCH_USE_FPSTACK)
3349 backend->use_fpstack = 1;
3350 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3351 backend->have_liverange_ops = 1;
3353 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
3354 backend->have_op_tail_call = 1;
3356 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3357 backend->monitor_enter_adjustment = 1;
3359 backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
3361 #if defined(__mono_ilp32__)
3364 #ifdef MONO_ARCH_HAVE_DUMMY_INIT
3365 backend->have_dummy_init = 1;
3367 #ifdef MONO_ARCH_NEED_DIV_CHECK
3368 backend->need_div_check = 1;
3370 #ifdef NO_UNALIGNED_ACCESS
3371 backend->no_unaligned_access = 1;
3373 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3374 backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
3379 * mini_method_compile:
3380 * @method: the method to compile
3381 * @opts: the optimization flags to use
3382 * @domain: the domain where the method will be compiled in
3383 * @flags: compilation flags
3384 * @parts: debug flag
3386 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3387 * field in the returned struct to see if compilation succeded.
3390 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
3392 MonoMethodHeader *header;
3393 MonoMethodSignature *sig;
3396 int i, code_size_ratio;
3397 gboolean try_generic_shared, try_llvm = FALSE;
3398 MonoMethod *method_to_compile, *method_to_register;
3399 gboolean method_is_gshared = FALSE;
3400 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3401 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3402 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3403 gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
3404 gboolean gsharedvt_method = FALSE;
3406 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3408 static gboolean verbose_method_inited;
3409 static const char *verbose_method_name;
3411 InterlockedIncrement (&mono_jit_stats.methods_compiled);
3412 if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION)
3413 mono_profiler_method_jit (method);
3414 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3415 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3417 gsharedvt_method = is_gsharedvt_method (method);
3420 * In AOT mode, method can be the following:
3421 * - a gsharedvt method.
3422 * - a method inflated with type parameters. This is for ref/partial sharing.
3423 * - a method inflated with concrete types.
3426 if (is_open_method (method)) {
3427 try_generic_shared = TRUE;
3428 method_is_gshared = TRUE;
3430 try_generic_shared = FALSE;
3432 g_assert (opts & MONO_OPT_GSHARED);
3434 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3435 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
3436 if (mini_is_gsharedvt_sharable_method (method)) {
3438 if (!mono_debug_count ())
3439 try_generic_shared = FALSE;
3445 if (try_generic_shared && !mono_debug_count ())
3446 try_generic_shared = FALSE;
3449 if (opts & MONO_OPT_GSHARED) {
3450 if (try_generic_shared)
3451 mono_stats.generics_sharable_methods++;
3452 else if (mono_method_is_generic_impl (method))
3453 mono_stats.generics_unsharable_methods++;
3457 try_llvm = mono_use_llvm || llvm;
3461 if (method_is_gshared) {
3462 method_to_compile = method;
3464 if (try_generic_shared) {
3465 method_to_compile = mini_get_shared_method (method);
3466 g_assert (method_to_compile);
3468 method_to_compile = method;
3472 cfg = g_new0 (MonoCompile, 1);
3473 cfg->method = method_to_compile;
3474 cfg->mempool = mono_mempool_new ();
3476 cfg->prof_options = mono_profiler_get_events ();
3477 cfg->run_cctors = run_cctors;
3478 cfg->domain = domain;
3479 cfg->verbose_level = mini_verbose;
3480 cfg->compile_aot = compile_aot;
3481 cfg->full_aot = full_aot;
3482 cfg->disable_omit_fp = debug_options.disable_omit_fp;
3483 cfg->skip_visibility = method->skip_visibility;
3484 cfg->orig_method = method;
3485 cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_sdb_seq_points;
3486 cfg->gen_sdb_seq_points = debug_options.gen_sdb_seq_points;
3487 cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
3488 cfg->backend = current_backend;
3490 #ifdef PLATFORM_ANDROID
3491 if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
3492 /* FIXME: Why is this needed */
3493 cfg->gen_seq_points = FALSE;
3494 cfg->gen_sdb_seq_points = FALSE;
3497 /* coop / nacl requires loop detection to happen */
3498 #if defined(__native_client_codegen__)
3499 cfg->opt |= MONO_OPT_LOOP;
3501 if (mono_threads_is_coop_enabled ())
3502 cfg->opt |= MONO_OPT_LOOP;
3504 cfg->explicit_null_checks = debug_options.explicit_null_checks || (flags & JIT_FLAG_EXPLICIT_NULL_CHECKS);
3505 cfg->soft_breakpoints = debug_options.soft_breakpoints;
3506 cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
3507 cfg->disable_direct_icalls = disable_direct_icalls;
3508 if (try_generic_shared)
3509 cfg->gshared = TRUE;
3510 cfg->compile_llvm = try_llvm;
3511 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3512 if (cfg->compile_aot)
3513 cfg->method_index = aot_method_index;
3516 if (!mono_debug_count ())
3517 cfg->opt &= ~MONO_OPT_FLOAT32;
3520 cfg->opt &= ~MONO_OPT_SIMD;
3521 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3522 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3524 if (cfg->gen_seq_points)
3525 cfg->seq_points = g_ptr_array_new ();
3526 mono_error_init (&cfg->error);
3528 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || method->klass->generic_container || method_is_gshared)) {
3529 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3533 if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3534 MonoMethodInflated *inflated;
3535 MonoGenericContext *context;
3537 if (gsharedvt_method) {
3538 g_assert (method->is_inflated);
3539 inflated = (MonoMethodInflated*)method;
3540 context = &inflated->context;
3542 /* We are compiling a gsharedvt method directly */
3543 g_assert (compile_aot);
3545 g_assert (method_to_compile->is_inflated);
3546 inflated = (MonoMethodInflated*)method_to_compile;
3547 context = &inflated->context;
3550 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3551 cfg->gsctx_context = context;
3553 cfg->gsharedvt = TRUE;
3554 if (!cfg->llvm_only) {
3555 cfg->disable_llvm = TRUE;
3556 cfg->exception_message = g_strdup ("gsharedvt");
3561 method_to_register = method_to_compile;
3563 g_assert (method == method_to_compile);
3564 method_to_register = method;
3566 cfg->method_to_register = method_to_register;
3568 mono_error_init (&err);
3569 sig = mono_method_signature_checked (cfg->method, &err);
3571 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3572 cfg->exception_message = g_strdup (mono_error_get_message (&err));
3573 mono_error_cleanup (&err);
3574 if (MONO_METHOD_COMPILE_END_ENABLED ())
3575 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3579 header = cfg->header = mono_method_get_header_checked (cfg->method, &cfg->error);
3581 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3582 if (MONO_METHOD_COMPILE_END_ENABLED ())
3583 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3589 static gboolean inited;
3595 * Check for methods which cannot be compiled by LLVM early, to avoid
3596 * the extra compilation pass.
3598 if (COMPILE_LLVM (cfg)) {
3599 mono_llvm_check_method_supported (cfg);
3600 if (cfg->disable_llvm) {
3601 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3602 //nm = mono_method_full_name (cfg->method, TRUE);
3603 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
3606 if (cfg->llvm_only) {
3607 cfg->disable_aot = TRUE;
3610 mono_destroy_compile (cfg);
3612 goto restart_compile;
3618 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3619 if (debug_options.mdb_optimizations) {
3620 cfg->disable_reuse_registers = TRUE;
3621 cfg->disable_reuse_stack_slots = TRUE;
3623 * This decreases the change the debugger will read registers/stack slots which are
3624 * not yet initialized.
3626 cfg->disable_initlocals_opt = TRUE;
3628 cfg->extend_live_ranges = TRUE;
3630 /* The debugger needs all locals to be on the stack or in a global register */
3631 cfg->disable_vreg_to_lvreg = TRUE;
3633 /* Don't remove unused variables when running inside the debugger since the user
3634 * may still want to view them. */
3635 cfg->disable_deadce_vars = TRUE;
3637 cfg->opt &= ~MONO_OPT_DEADCE;
3638 cfg->opt &= ~MONO_OPT_INLINE;
3639 cfg->opt &= ~MONO_OPT_COPYPROP;
3640 cfg->opt &= ~MONO_OPT_CONSPROP;
3642 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3643 cfg->disable_out_of_line_bblocks = TRUE;
3646 if (mono_using_xdebug) {
3648 * Make each variable use its own register/stack slot and extend
3649 * their liveness to cover the whole method, making them displayable
3650 * in gdb even after they are dead.
3652 cfg->disable_reuse_registers = TRUE;
3653 cfg->disable_reuse_stack_slots = TRUE;
3654 cfg->extend_live_ranges = TRUE;
3655 cfg->compute_precise_live_ranges = TRUE;
3658 mini_gc_init_cfg (cfg);
3660 if (COMPILE_LLVM (cfg)) {
3661 cfg->opt |= MONO_OPT_ABCREM;
3664 if (!verbose_method_inited) {
3665 verbose_method_name = g_getenv ("MONO_VERBOSE_METHOD");
3666 verbose_method_inited = TRUE;
3668 if (verbose_method_name) {
3669 const char *name = verbose_method_name;
3671 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3672 MonoMethodDesc *desc;
3674 desc = mono_method_desc_new (name, TRUE);
3675 if (mono_method_desc_full_match (desc, cfg->method)) {
3676 cfg->verbose_level = 4;
3678 mono_method_desc_free (desc);
3680 if (strcmp (cfg->method->name, name) == 0)
3681 cfg->verbose_level = 4;
3685 cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3687 if (cfg->verbose_level > 0) {
3690 method_name = mono_method_get_full_name (method);
3691 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
3693 if (COMPILE_LLVM (cfg))
3694 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3695 else if (cfg->gsharedvt)
3696 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3697 else if (cfg->gshared)
3698 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3700 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3702 g_free (method_name);
3705 if (cfg->opt & MONO_OPT_ABCREM)
3706 cfg->opt |= MONO_OPT_SSA;
3708 cfg->rs = mono_regstate_new ();
3709 cfg->next_vreg = cfg->rs->next_vreg;
3711 /* FIXME: Fix SSA to handle branches inside bblocks */
3712 if (cfg->opt & MONO_OPT_SSA)
3713 cfg->enable_extended_bblocks = FALSE;
3716 * FIXME: This confuses liveness analysis because variables which are assigned after
3717 * a branch inside a bblock become part of the kill set, even though the assignment
3718 * might not get executed. This causes the optimize_initlocals pass to delete some
3719 * assignments which are needed.
3720 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3723 //cfg->enable_extended_bblocks = TRUE;
3725 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3726 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3727 if (mini_get_debug_options ()->break_on_unverified)
3733 * create MonoInst* which represents arguments and local variables
3735 mono_compile_create_vars (cfg);
3737 MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
3740 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3742 if (MONO_METHOD_COMPILE_END_ENABLED ())
3743 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3746 mono_destroy_compile (cfg);
3747 try_generic_shared = FALSE;
3748 goto restart_compile;
3750 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3752 if (MONO_METHOD_COMPILE_END_ENABLED ())
3753 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3754 /* cfg contains the details of the failure, so let the caller cleanup */
3758 cfg->stat_basic_blocks += cfg->num_bblocks;
3760 if (COMPILE_LLVM (cfg)) {
3763 /* The IR has to be in SSA form for LLVM */
3764 cfg->opt |= MONO_OPT_SSA;
3768 // Allow SSA on the result value
3769 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3771 // Add an explicit return instruction referencing the return value
3772 MONO_INST_NEW (cfg, ins, OP_SETRET);
3773 ins->sreg1 = cfg->ret->dreg;
3775 MONO_ADD_INS (cfg->bb_exit, ins);
3778 cfg->opt &= ~MONO_OPT_LINEARS;
3781 cfg->opt &= ~MONO_OPT_BRANCH;
3784 /* todo: remove code when we have verified that the liveness for try/catch blocks
3788 * Currently, this can't be commented out since exception blocks are not
3789 * processed during liveness analysis.
3790 * It is also needed, because otherwise the local optimization passes would
3791 * delete assignments in cases like this:
3793 * <something which throws>
3795 * This also allows SSA to be run on methods containing exception clauses, since
3796 * SSA will ignore variables marked VOLATILE.
3798 MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
3800 MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
3802 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3804 if (!COMPILE_LLVM (cfg)) {
3805 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
3808 /* Should be done before branch opts */
3809 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
3810 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
3813 * Should be done after cprop which can do strength reduction on
3814 * some of these ops, after propagating immediates.
3816 if (cfg->has_emulated_ops)
3817 MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
3819 if (cfg->opt & MONO_OPT_BRANCH)
3820 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
3822 /* This must be done _before_ global reg alloc and _after_ decompose */
3823 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
3824 if (cfg->opt & MONO_OPT_DEADCE)
3825 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
3826 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS)
3827 MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
3828 /* Disable this for LLVM to make the IR easier to handle */
3829 if (!COMPILE_LLVM (cfg))
3830 MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
3832 mono_threads_safepoint ();
3834 MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
3836 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3838 * we disable some optimizations if there are too many variables
3839 * because JIT time may become too expensive. The actual number needs
3840 * to be tweaked and eventually the non-linear algorithms should be fixed.
3842 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3843 cfg->disable_ssa = TRUE;
3846 if (cfg->opt & MONO_OPT_LOOP) {
3847 MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
3848 MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
3851 MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, mono_insert_safepoints (cfg));
3853 /* after method_to_ir */
3855 if (MONO_METHOD_COMPILE_END_ENABLED ())
3856 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3861 if (header->num_clauses)
3862 cfg->disable_ssa = TRUE;
3865 //#define DEBUGSSA "logic_run"
3866 //#define DEBUGSSA_CLASS "Tests"
3869 if (!cfg->disable_ssa) {
3870 mono_local_cprop (cfg);
3873 mono_ssa_compute (cfg);
3877 if (cfg->opt & MONO_OPT_SSA) {
3878 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3880 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
3883 if (cfg->verbose_level >= 2) {
3890 /* after SSA translation */
3892 if (MONO_METHOD_COMPILE_END_ENABLED ())
3893 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3897 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3898 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3900 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
3906 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3907 //mono_ssa_strength_reduction (cfg);
3909 if (cfg->opt & MONO_OPT_DEADCE)
3910 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
3912 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
3913 MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
3915 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
3916 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
3917 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
3918 if (cfg->opt & MONO_OPT_DEADCE)
3919 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
3921 if (cfg->opt & MONO_OPT_BRANCH)
3922 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
3926 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3927 mono_ssa_loop_invariant_code_motion (cfg);
3928 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3929 if (cfg->opt & MONO_OPT_ABCREM)
3930 mono_perform_abc_removal (cfg);
3933 /* after SSA removal */
3935 if (MONO_METHOD_COMPILE_END_ENABLED ())
3936 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3940 if (cfg->llvm_only && cfg->gsharedvt)
3941 mono_ssa_remove_gsharedvt (cfg);
3943 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3944 if (COMPILE_SOFT_FLOAT (cfg))
3945 mono_decompose_soft_float (cfg);
3947 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
3948 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3949 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
3952 #ifndef MONO_ARCH_GOT_REG
3957 g_assert (cfg->got_var_allocated);
3960 * Allways allocate the GOT var to a register, because keeping it
3961 * in memory will increase the number of live temporaries in some
3962 * code created by inssel.brg, leading to the well known spills+
3963 * branches problem. Testcase: mcs crash in
3964 * System.MonoCustomAttrs:GetCustomAttributes.
3966 #ifdef MONO_ARCH_GOT_REG
3967 got_reg = MONO_ARCH_GOT_REG;
3969 regs = mono_arch_get_global_int_regs (cfg);
3971 got_reg = GPOINTER_TO_INT (regs->data);
3974 cfg->got_var->opcode = OP_REGVAR;
3975 cfg->got_var->dreg = got_reg;
3976 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3980 * Have to call this again to process variables added since the first call.
3982 MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
3984 if (cfg->opt & MONO_OPT_LINEARS) {
3985 GList *vars, *regs, *l;
3987 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3988 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3989 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3990 MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
3992 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3993 regs = mono_arch_get_global_int_regs (cfg);
3994 /* Remove the reg reserved for holding the GOT address */
3996 for (l = regs; l; l = l->next) {
3997 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3998 regs = g_list_delete_link (regs, l);
4003 MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
4007 //mono_print_code (cfg, "");
4011 /* variables are allocated after decompose, since decompose could create temps */
4012 if (!COMPILE_LLVM (cfg)) {
4013 MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
4014 if (cfg->exception_type)
4019 mono_allocate_gsharedvt_vars (cfg);
4021 if (!COMPILE_LLVM (cfg)) {
4022 gboolean need_local_opts;
4023 MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
4025 if (need_local_opts || cfg->compile_aot) {
4026 /* To optimize code created by spill_global_vars */
4027 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
4028 if (cfg->opt & MONO_OPT_DEADCE)
4029 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
4033 mono_insert_branches_between_bblocks (cfg);
4035 if (COMPILE_LLVM (cfg)) {
4039 /* The IR has to be in SSA form for LLVM */
4040 if (!(cfg->comp_done & MONO_COMP_SSA)) {
4041 cfg->exception_message = g_strdup ("SSA disabled.");
4042 cfg->disable_llvm = TRUE;
4045 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
4046 mono_decompose_array_access_opts (cfg);
4048 if (!cfg->disable_llvm)
4049 mono_llvm_emit_method (cfg);
4050 if (cfg->disable_llvm) {
4051 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
4052 //nm = mono_method_full_name (cfg->method, TRUE);
4053 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
4056 if (cfg->llvm_only) {
4057 cfg->disable_aot = TRUE;
4060 mono_destroy_compile (cfg);
4062 goto restart_compile;
4065 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
4066 nm = mono_method_full_name (cfg->method, TRUE);
4067 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
4069 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
4074 MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
4075 if (cfg->exception_type)
4079 if (COMPILE_LLVM (cfg))
4080 InterlockedIncrement (&mono_jit_stats.methods_with_llvm);
4082 InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
4084 MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
4086 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
4087 if (cfg->extend_live_ranges) {
4088 /* Extend live ranges to cover the whole method */
4089 for (i = 0; i < cfg->num_varinfo; ++i)
4090 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
4094 if (!cfg->compile_aot)
4095 mono_save_xdebug_info (cfg);
4097 MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
4099 MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg));
4101 if (cfg->verbose_level >= 2) {
4102 char *id = mono_method_full_name (cfg->method, FALSE);
4103 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
4107 if (!cfg->compile_aot) {
4108 mono_domain_lock (cfg->domain);
4109 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
4111 if (cfg->method->dynamic)
4112 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
4113 mono_domain_unlock (cfg->domain);
4118 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
4121 /* collect statistics */
4122 #ifndef DISABLE_PERFCOUNTERS
4123 mono_perfcounters->jit_methods++;
4124 mono_perfcounters->jit_bytes += header->code_size;
4126 mono_jit_stats.allocated_code_size += cfg->code_len;
4127 code_size_ratio = cfg->code_len;
4128 if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) {
4129 mono_jit_stats.biggest_method_size = code_size_ratio;
4130 g_free (mono_jit_stats.biggest_method);
4131 mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
4133 code_size_ratio = (code_size_ratio * 100) / header->code_size;
4134 if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
4135 mono_jit_stats.max_code_size_ratio = code_size_ratio;
4136 g_free (mono_jit_stats.max_ratio_method);
4137 mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
4139 mono_jit_stats.native_code_size += cfg->code_len;
4141 if (MONO_METHOD_COMPILE_END_ENABLED ())
4142 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
4148 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4150 return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
4154 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
4156 TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
4157 hole->clause = clause;
4158 hole->start_offset = start - cfg->native_code;
4159 hole->basic_block = bb;
4161 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
4165 mono_cfg_set_exception (MonoCompile *cfg, int type)
4167 cfg->exception_type = type;
4170 /* Assumes ownership of the MSG argument */
4172 mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
4174 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4175 mono_error_set_generic_error (&cfg->error, "System", "InvalidProgramException", msg);
4178 #endif /* DISABLE_JIT */
4181 create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
4183 MonoDomain *domain = mono_get_root_domain ();
4188 if (info->uw_info) {
4189 uw_info = info->uw_info;
4190 info_len = info->uw_info_len;
4192 uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
4195 jinfo = (MonoJitInfo *)mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
4196 jinfo->d.method = wrapper;
4197 jinfo->code_start = info->code;
4198 jinfo->code_size = info->code_size;
4199 jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
4207 GTimer *mono_time_track_start ()
4209 return g_timer_new ();
4212 void mono_time_track_end (double *time, GTimer *timer)
4214 g_timer_stop (timer);
4215 *time += g_timer_elapsed (timer, NULL);
4216 g_timer_destroy (timer);
4219 void mono_update_jit_stats (MonoCompile *cfg)
4221 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4222 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4223 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4224 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4225 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4226 mono_jit_stats.regvars += cfg->stat_n_regvars;
4227 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4228 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4229 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4233 * mono_jit_compile_method_inner:
4235 * Main entry point for the JIT.
4238 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoError *error)
4241 gpointer code = NULL;
4242 MonoJitInfo *jinfo, *info;
4244 MonoException *ex = NULL;
4245 guint32 prof_options;
4247 MonoMethod *prof_method, *shared;
4249 mono_error_init (error);
4251 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4252 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
4254 MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method;
4256 if (!piinfo->addr) {
4257 if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)
4258 piinfo->addr = mono_lookup_internal_call (method);
4259 else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)
4261 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4263 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4266 mono_lookup_pinvoke_call (method, NULL, NULL);
4268 nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
4269 code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
4270 jinfo = mono_jit_info_table_find (target_domain, (char *)code);
4272 jinfo = mono_jit_info_table_find (mono_domain_get (), (char *)code);
4274 mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
4276 } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
4277 const char *name = method->name;
4278 char *full_name, *msg;
4281 if (method->klass->parent == mono_defaults.multicastdelegate_class) {
4282 if (*name == '.' && (strcmp (name, ".ctor") == 0)) {
4283 MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("mono_delegate_ctor");
4286 * We need to make sure this wrapper
4287 * is compiled because it might end up
4288 * in an (M)RGCTX if generic sharing
4289 * is enabled, and would be called
4290 * indirectly. If it were a
4291 * trampoline we'd try to patch that
4292 * indirect call, which is not
4295 return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
4296 } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
4297 if (mono_llvm_only) {
4298 nm = mono_marshal_get_delegate_invoke (method, NULL);
4299 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
4301 return mono_create_delegate_trampoline (target_domain, method->klass);
4302 } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
4303 nm = mono_marshal_get_delegate_begin_invoke (method);
4304 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
4305 } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) {
4306 nm = mono_marshal_get_delegate_end_invoke (method);
4307 return mono_get_addr_from_ftnptr (mono_compile_method (nm));
4311 full_name = mono_method_full_name (method, TRUE);
4312 msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
4313 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
4314 mono_error_set_exception_instance (error, ex);
4320 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
4321 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
4323 if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) {
4324 static MonoTrampInfo *in_tinfo, *out_tinfo;
4325 MonoTrampInfo *tinfo;
4327 gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN;
4329 if (is_in && in_tinfo)
4330 return in_tinfo->code;
4331 else if (!is_in && out_tinfo)
4332 return out_tinfo->code;
4335 * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH
4337 * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms
4340 mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo);
4342 mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE);
4343 jinfo = create_jit_info_for_trampoline (method, tinfo);
4344 mono_jit_info_table_add (mono_get_root_domain (), jinfo);
4353 if (mono_aot_only) {
4354 char *fullname = mono_method_full_name (method, TRUE);
4355 char *msg = g_strdup_printf ("Attempting to JIT compile method '%s' while running with --aot-only. See http://docs.xamarin.com/ios/about/limitations for more information.\n", fullname);
4357 ex = mono_get_exception_execution_engine (msg);
4358 mono_error_set_exception_instance (error, ex);
4365 jit_timer = mono_time_track_start ();
4366 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
4367 mono_time_track_end (&mono_jit_stats.jit_time, jit_timer);
4369 prof_method = cfg->method;
4371 switch (cfg->exception_type) {
4372 case MONO_EXCEPTION_NONE:
4374 case MONO_EXCEPTION_TYPE_LOAD:
4375 case MONO_EXCEPTION_MISSING_FIELD:
4376 case MONO_EXCEPTION_MISSING_METHOD:
4377 case MONO_EXCEPTION_FILE_NOT_FOUND:
4378 case MONO_EXCEPTION_BAD_IMAGE: {
4379 /* Throw a type load exception if needed */
4380 if (cfg->exception_ptr) {
4381 ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
4383 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4384 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4385 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4386 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4387 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4388 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4389 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4390 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
4391 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4392 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4394 g_assert_not_reached ();
4398 case MONO_EXCEPTION_MONO_ERROR:
4399 // FIXME: MonoError has no copy ctor
4400 g_assert (!mono_error_ok (&cfg->error));
4401 ex = mono_error_convert_to_exception (&cfg->error);
4404 g_assert_not_reached ();
4408 if (cfg->prof_options & MONO_PROFILE_JIT_COMPILATION)
4409 mono_profiler_method_end_jit (method, NULL, MONO_PROFILE_FAILED);
4411 mono_destroy_compile (cfg);
4412 mono_error_set_exception_instance (error, ex);
4417 if (mono_method_is_generic_sharable (method, FALSE))
4418 shared = mini_get_shared_method (method);
4422 mono_domain_lock (target_domain);
4424 /* Check if some other thread already did the job. In this case, we can
4425 discard the code this thread generated. */
4427 info = mini_lookup_method (target_domain, method, shared);
4429 /* We can't use a domain specific method in another domain */
4430 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4431 code = info->code_start;
4432 // printf("Discarding code for method %s\n", method->name);
4436 /* The lookup + insert is atomic since this is done inside the domain lock */
4437 mono_domain_jit_code_hash_lock (target_domain);
4438 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4439 mono_domain_jit_code_hash_unlock (target_domain);
4441 code = cfg->native_code;
4443 if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
4444 mono_stats.generics_shared_methods++;
4446 mono_stats.gsharedvt_methods++;
4449 jinfo = cfg->jit_info;
4451 prof_options = cfg->prof_options;
4454 * Update global stats while holding a lock, instead of doing many
4455 * InterlockedIncrement operations during JITting.
4457 mono_update_jit_stats (cfg);
4459 mono_destroy_compile (cfg);
4462 if (domain_jit_info (target_domain)->jump_target_hash) {
4463 MonoJumpInfo patch_info;
4464 MonoJumpList *jlist;
4466 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (target_domain)->jump_target_hash, method);
4468 patch_info.next = NULL;
4469 patch_info.ip.i = 0;
4470 patch_info.type = MONO_PATCH_INFO_METHOD_JUMP;
4471 patch_info.data.method = method;
4472 g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method);
4474 #if defined(__native_client_codegen__) && defined(__native_client__)
4475 /* These patches are applied after a method has been installed, no target munging is needed. */
4476 nacl_allow_target_modification (FALSE);
4478 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
4479 for (tmp = jlist->list; tmp; tmp = tmp->next) {
4480 gpointer target = mono_resolve_patch_target (NULL, target_domain, (guint8 *)tmp->data, &patch_info, TRUE, error);
4481 if (!mono_error_ok (error))
4483 mono_arch_patch_code_new (NULL, target_domain, (guint8 *)tmp->data, &patch_info, target);
4486 for (tmp = jlist->list; tmp; tmp = tmp->next)
4487 mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE);
4489 #if defined(__native_client_codegen__) && defined(__native_client__)
4490 nacl_allow_target_modification (TRUE);
4495 /* Update llvm callees */
4496 if (domain_jit_info (target_domain)->llvm_jit_callees) {
4497 GSList *callees = g_hash_table_lookup (domain_jit_info (target_domain)->llvm_jit_callees, method);
4500 for (l = callees; l; l = l->next) {
4501 gpointer *addr = (gpointer*)l->data;
4507 mono_emit_jit_map (jinfo);
4509 mono_domain_unlock (target_domain);
4511 if (!mono_error_ok (error))
4514 vtable = mono_class_vtable (target_domain, method->klass);
4516 ex = mono_class_get_exception_for_failure (method->klass);
4518 mono_error_set_exception_instance (error, ex);
4522 if (prof_options & MONO_PROFILE_JIT_COMPILATION) {
4523 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4524 if (mono_marshal_method_from_wrapper (method)) {
4525 /* Native func wrappers have no method */
4526 /* The profiler doesn't know about wrappers, so pass the original icall method */
4527 mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK);
4530 mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
4531 if (prof_method != method) {
4532 mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK);
4536 if (!mono_runtime_class_init_full (vtable, error))
4542 * mini_get_underlying_type:
4544 * Return the type the JIT will use during compilation.
4545 * Handles: byref, enums, native types, generic sharing.
4546 * For gsharedvt types, it will return the original VAR/MVAR.
4549 mini_get_underlying_type (MonoType *type)
4551 return mini_type_get_underlying_type (type);
4555 mini_jit_init (void)
4557 mono_os_mutex_init_recursive (&jit_mutex);
4559 current_backend = g_new0 (MonoBackend, 1);
4560 init_backend (current_backend);
4565 mini_jit_cleanup (void)
4568 g_free (emul_opcode_map);
4569 g_free (emul_opcode_opcodes);
4575 mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
4577 g_assert_not_reached ();
4580 void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
4582 g_assert_not_reached ();
4587 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4590 mono_llvm_cpp_throw_exception (void)
4592 g_assert_not_reached ();
4600 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
4602 g_assert_not_reached ();
4607 mono_destroy_compile (MonoCompile *cfg)
4609 g_assert_not_reached ();
4613 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
4615 g_assert_not_reached ();
4618 #endif /* DISABLE_JIT */