3 * The new Mono code generator.
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
23 #ifdef HAVE_SYS_TIME_H
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
65 #include "seq-points.h"
73 #include "jit-icalls.h"
76 #include "debugger-agent.h"
77 #include "llvm-runtime.h"
78 #include "mini-llvm.h"
81 MonoTraceSpec *mono_jit_trace_calls;
82 MonoMethodDesc *mono_inject_async_exc_method;
83 int mono_inject_async_exc_pos;
84 MonoMethodDesc *mono_break_at_bb_method;
85 int mono_break_at_bb_bb_num;
86 gboolean mono_do_x86_stack_align = TRUE;
87 gboolean mono_using_xdebug;
90 static guint32 discarded_code;
91 static double discarded_jit_time;
93 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
94 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
95 static mono_mutex_t jit_mutex;
97 MonoBackend *current_backend;
102 mono_realloc_native_code (MonoCompile *cfg)
104 return g_realloc (cfg->native_code, cfg->code_size);
108 MonoExceptionClause *clause;
109 MonoBasicBlock *basic_block;
114 * mono_emit_unwind_op:
116 * Add an unwind op with the given parameters for the list of unwind ops stored in
120 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
122 MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
129 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
130 if (cfg->verbose_level > 1) {
133 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
135 case DW_CFA_def_cfa_register:
136 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
138 case DW_CFA_def_cfa_offset:
139 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
142 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
149 * mono_unlink_bblock:
151 * Unlink two basic blocks.
154 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
160 for (i = 0; i < from->out_count; ++i) {
161 if (to == from->out_bb [i]) {
168 for (i = 0; i < from->out_count; ++i) {
169 if (from->out_bb [i] != to)
170 from->out_bb [pos ++] = from->out_bb [i];
172 g_assert (pos == from->out_count - 1);
177 for (i = 0; i < to->in_count; ++i) {
178 if (from == to->in_bb [i]) {
185 for (i = 0; i < to->in_count; ++i) {
186 if (to->in_bb [i] != from)
187 to->in_bb [pos ++] = to->in_bb [i];
189 g_assert (pos == to->in_count - 1);
195 * mono_bblocks_linked:
197 * Return whenever BB1 and BB2 are linked in the CFG.
200 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
204 for (i = 0; i < bb1->out_count; ++i) {
205 if (bb1->out_bb [i] == bb2)
213 mono_find_block_region_notry (MonoCompile *cfg, int offset)
215 MonoMethodHeader *header = cfg->header;
216 MonoExceptionClause *clause;
219 for (i = 0; i < header->num_clauses; ++i) {
220 clause = &header->clauses [i];
221 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
222 (offset < (clause->handler_offset)))
223 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
225 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
226 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
227 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
228 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
229 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
231 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
239 * mono_get_block_region_notry:
241 * Return the region corresponding to REGION, ignoring try clauses nested inside
245 mono_get_block_region_notry (MonoCompile *cfg, int region)
247 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
248 MonoMethodHeader *header = cfg->header;
251 * This can happen if a try clause is nested inside a finally clause.
253 int clause_index = (region >> 8) - 1;
254 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
256 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
263 mono_find_spvar_for_region (MonoCompile *cfg, int region)
265 region = mono_get_block_region_notry (cfg, region);
267 return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
271 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
275 array [*dfn] = start;
276 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
277 for (i = 0; i < start->out_count; ++i) {
278 if (start->out_bb [i]->dfn)
281 start->out_bb [i]->dfn = *dfn;
282 start->out_bb [i]->df_parent = start;
283 array [*dfn] = start->out_bb [i];
284 df_visit (start->out_bb [i], dfn, array);
289 mono_reverse_branch_op (guint32 opcode)
291 static const int reverse_map [] = {
292 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
293 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
295 static const int reverse_fmap [] = {
296 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
297 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
299 static const int reverse_lmap [] = {
300 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
301 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
303 static const int reverse_imap [] = {
304 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
305 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
308 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
309 opcode = reverse_map [opcode - CEE_BEQ];
310 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
311 opcode = reverse_fmap [opcode - OP_FBEQ];
312 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
313 opcode = reverse_lmap [opcode - OP_LBEQ];
314 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
315 opcode = reverse_imap [opcode - OP_IBEQ];
317 g_assert_not_reached ();
323 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
325 type = mini_get_underlying_type (type);
328 switch (type->type) {
331 return OP_STOREI1_MEMBASE_REG;
334 return OP_STOREI2_MEMBASE_REG;
337 return OP_STOREI4_MEMBASE_REG;
341 case MONO_TYPE_FNPTR:
342 return OP_STORE_MEMBASE_REG;
343 case MONO_TYPE_CLASS:
344 case MONO_TYPE_STRING:
345 case MONO_TYPE_OBJECT:
346 case MONO_TYPE_SZARRAY:
347 case MONO_TYPE_ARRAY:
348 return OP_STORE_MEMBASE_REG;
351 return OP_STOREI8_MEMBASE_REG;
353 return OP_STORER4_MEMBASE_REG;
355 return OP_STORER8_MEMBASE_REG;
356 case MONO_TYPE_VALUETYPE:
357 if (type->data.klass->enumtype) {
358 type = mono_class_enum_basetype (type->data.klass);
361 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
362 return OP_STOREX_MEMBASE;
363 return OP_STOREV_MEMBASE;
364 case MONO_TYPE_TYPEDBYREF:
365 return OP_STOREV_MEMBASE;
366 case MONO_TYPE_GENERICINST:
367 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
368 return OP_STOREX_MEMBASE;
369 type = &type->data.generic_class->container_class->byval_arg;
373 g_assert (mini_type_var_is_vt (type));
374 return OP_STOREV_MEMBASE;
376 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
382 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
384 type = mini_get_underlying_type (type);
386 switch (type->type) {
388 return OP_LOADI1_MEMBASE;
390 return OP_LOADU1_MEMBASE;
392 return OP_LOADI2_MEMBASE;
394 return OP_LOADU2_MEMBASE;
396 return OP_LOADI4_MEMBASE;
398 return OP_LOADU4_MEMBASE;
402 case MONO_TYPE_FNPTR:
403 return OP_LOAD_MEMBASE;
404 case MONO_TYPE_CLASS:
405 case MONO_TYPE_STRING:
406 case MONO_TYPE_OBJECT:
407 case MONO_TYPE_SZARRAY:
408 case MONO_TYPE_ARRAY:
409 return OP_LOAD_MEMBASE;
412 return OP_LOADI8_MEMBASE;
414 return OP_LOADR4_MEMBASE;
416 return OP_LOADR8_MEMBASE;
417 case MONO_TYPE_VALUETYPE:
418 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
419 return OP_LOADX_MEMBASE;
420 case MONO_TYPE_TYPEDBYREF:
421 return OP_LOADV_MEMBASE;
422 case MONO_TYPE_GENERICINST:
423 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
424 return OP_LOADX_MEMBASE;
425 if (mono_type_generic_inst_is_valuetype (type))
426 return OP_LOADV_MEMBASE;
428 return OP_LOAD_MEMBASE;
432 g_assert (cfg->gshared);
433 g_assert (mini_type_var_is_vt (type));
434 return OP_LOADV_MEMBASE;
436 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
442 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
444 type = mini_get_underlying_type (type);
445 if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
446 g_assert (mini_type_var_is_vt (type));
449 return mono_type_to_stind (type);
453 mono_op_imm_to_op (int opcode)
457 #if SIZEOF_REGISTER == 4
475 #if SIZEOF_REGISTER == 4
481 #if SIZEOF_REGISTER == 4
487 #if SIZEOF_REGISTER == 4
533 #if SIZEOF_REGISTER == 4
539 #if SIZEOF_REGISTER == 4
558 case OP_ICOMPARE_IMM:
560 case OP_LOCALLOC_IMM:
568 * mono_decompose_op_imm:
570 * Replace the OP_.._IMM INS with its non IMM variant.
573 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
575 int opcode2 = mono_op_imm_to_op (ins->opcode);
578 const char *spec = INS_INFO (ins->opcode);
580 if (spec [MONO_INST_SRC2] == 'l') {
581 dreg = mono_alloc_lreg (cfg);
583 /* Load the 64bit constant using decomposed ops */
584 MONO_INST_NEW (cfg, temp, OP_ICONST);
585 temp->inst_c0 = ins->inst_ls_word;
586 temp->dreg = MONO_LVREG_LS (dreg);
587 mono_bblock_insert_before_ins (bb, ins, temp);
589 MONO_INST_NEW (cfg, temp, OP_ICONST);
590 temp->inst_c0 = ins->inst_ms_word;
591 temp->dreg = MONO_LVREG_MS (dreg);
593 dreg = mono_alloc_ireg (cfg);
595 MONO_INST_NEW (cfg, temp, OP_ICONST);
596 temp->inst_c0 = ins->inst_imm;
600 mono_bblock_insert_before_ins (bb, ins, temp);
603 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
604 ins->opcode = opcode2;
606 if (ins->opcode == OP_LOCALLOC)
611 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
615 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
617 if (vreg >= cfg->vreg_to_inst_len) {
618 MonoInst **tmp = cfg->vreg_to_inst;
619 int size = cfg->vreg_to_inst_len;
621 while (vreg >= cfg->vreg_to_inst_len)
622 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
623 cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
625 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
627 cfg->vreg_to_inst [vreg] = inst;
630 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
631 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
634 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
637 int num = cfg->num_varinfo;
640 type = mini_get_underlying_type (type);
642 if ((num + 1) >= cfg->varinfo_count) {
643 int orig_count = cfg->varinfo_count;
644 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
645 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
646 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
647 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
650 cfg->stat_allocate_var++;
652 MONO_INST_NEW (cfg, inst, opcode);
654 inst->inst_vtype = type;
655 inst->klass = mono_class_from_mono_type (type);
656 type_to_eval_stack_type (cfg, type, inst);
657 /* if set to 1 the variable is native */
658 inst->backend.is_pinvoke = 0;
661 if (mono_class_has_failure (inst->klass))
662 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
664 if (cfg->compute_gc_maps) {
666 mono_mark_vreg_as_mp (cfg, vreg);
668 if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (type)) {
669 inst->flags |= MONO_INST_GC_TRACK;
670 mono_mark_vreg_as_ref (cfg, vreg);
675 cfg->varinfo [num] = inst;
677 cfg->vars [num].idx = num;
678 cfg->vars [num].vreg = vreg;
679 cfg->vars [num].range.first_use.pos.bid = 0xffff;
680 cfg->vars [num].reg = -1;
683 set_vreg_to_inst (cfg, vreg, inst);
685 #if SIZEOF_REGISTER == 4
686 if (mono_arch_is_soft_float ()) {
687 regpair = mono_type_is_long (type) || mono_type_is_float (type);
689 regpair = mono_type_is_long (type);
699 * These two cannot be allocated using create_var_for_vreg since that would
700 * put it into the cfg->varinfo array, confusing many parts of the JIT.
704 * Set flags to VOLATILE so SSA skips it.
707 if (cfg->verbose_level >= 4) {
708 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
711 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
712 if (mono_type_is_float (type))
713 inst->flags = MONO_INST_VOLATILE;
716 /* Allocate a dummy MonoInst for the first vreg */
717 MONO_INST_NEW (cfg, tree, OP_LOCAL);
718 tree->dreg = MONO_LVREG_LS (inst->dreg);
719 if (cfg->opt & MONO_OPT_SSA)
720 tree->flags = MONO_INST_VOLATILE;
722 tree->type = STACK_I4;
723 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
724 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
726 set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
728 /* Allocate a dummy MonoInst for the second vreg */
729 MONO_INST_NEW (cfg, tree, OP_LOCAL);
730 tree->dreg = MONO_LVREG_MS (inst->dreg);
731 if (cfg->opt & MONO_OPT_SSA)
732 tree->flags = MONO_INST_VOLATILE;
734 tree->type = STACK_I4;
735 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
736 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
738 set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
742 if (cfg->verbose_level > 2)
743 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
748 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
751 type = mini_get_underlying_type (type);
753 if (mono_type_is_long (type))
754 dreg = mono_alloc_dreg (cfg, STACK_I8);
755 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
756 dreg = mono_alloc_dreg (cfg, STACK_R8);
758 /* All the others are unified */
759 dreg = mono_alloc_preg (cfg);
761 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
765 mini_get_int_to_float_spill_area (MonoCompile *cfg)
768 if (!cfg->iconv_raw_var) {
769 cfg->iconv_raw_var = mono_compile_create_var (cfg, &mono_defaults.int32_class->byval_arg, OP_LOCAL);
770 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
772 return cfg->iconv_raw_var;
779 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
781 if (vreg >= cfg->vreg_is_ref_len) {
782 gboolean *tmp = cfg->vreg_is_ref;
783 int size = cfg->vreg_is_ref_len;
785 while (vreg >= cfg->vreg_is_ref_len)
786 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
787 cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
789 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
791 cfg->vreg_is_ref [vreg] = TRUE;
795 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
797 if (vreg >= cfg->vreg_is_mp_len) {
798 gboolean *tmp = cfg->vreg_is_mp;
799 int size = cfg->vreg_is_mp_len;
801 while (vreg >= cfg->vreg_is_mp_len)
802 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
803 cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
805 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
807 cfg->vreg_is_mp [vreg] = TRUE;
811 type_from_stack_type (MonoInst *ins)
814 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
815 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
816 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
817 case STACK_R8: return &mono_defaults.double_class->byval_arg;
820 * this if used to be commented without any specific reason, but
821 * it breaks #80235 when commented
824 return &ins->klass->this_arg;
826 return &mono_defaults.object_class->this_arg;
828 /* ins->klass may not be set for ldnull.
829 * Also, if we have a boxed valuetype, we want an object lass,
830 * not the valuetype class
832 if (ins->klass && !ins->klass->valuetype)
833 return &ins->klass->byval_arg;
834 return &mono_defaults.object_class->byval_arg;
835 case STACK_VTYPE: return &ins->klass->byval_arg;
837 g_error ("stack type %d to montype not handled\n", ins->type);
843 mono_type_from_stack_type (MonoInst *ins)
845 return type_from_stack_type (ins);
849 * mono_add_ins_to_end:
851 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
854 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
859 MONO_ADD_INS (bb, inst);
863 switch (bb->last_ins->opcode) {
877 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
880 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
881 /* Need to insert the ins before the compare */
882 if (bb->code == bb->last_ins) {
883 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
887 if (bb->code->next == bb->last_ins) {
888 /* Only two instructions */
889 opcode = bb->code->opcode;
891 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
893 mono_bblock_insert_before_ins (bb, bb->code, inst);
895 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
898 opcode = bb->last_ins->prev->opcode;
900 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
902 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
904 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
909 MONO_ADD_INS (bb, inst);
915 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
917 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
918 MonoJumpInfoBBTable *table;
920 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
922 table->table_size = num_blocks;
924 ji->ip.label = label;
925 ji->type = MONO_PATCH_INFO_SWITCH;
926 ji->data.table = table;
927 ji->next = cfg->patch_info;
928 cfg->patch_info = ji;
931 static MonoMethodSignature *
932 mono_get_array_new_va_signature (int arity)
934 static GHashTable *sighash;
935 MonoMethodSignature *res;
940 sighash = g_hash_table_new (NULL, NULL);
942 else if ((res = (MonoMethodSignature *)g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
947 res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
950 if (ARCH_VARARG_ICALLS)
951 /* Only set this only some archs since not all backends can handle varargs+pinvoke */
952 res->call_convention = MONO_CALL_VARARG;
955 res->call_convention = MONO_CALL_C;
958 res->params [0] = &mono_defaults.int_class->byval_arg;
959 for (i = 0; i < arity; i++)
960 res->params [i + 1] = &mono_defaults.int_class->byval_arg;
962 res->ret = &mono_defaults.object_class->byval_arg;
964 g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
971 mono_get_array_new_va_icall (int rank)
973 MonoMethodSignature *esig;
974 char icall_name [256];
976 MonoJitICallInfo *info;
978 /* Need to register the icall so it gets an icall wrapper */
979 sprintf (icall_name, "ves_array_new_va_%d", rank);
982 info = mono_find_jit_icall_by_name (icall_name);
984 esig = mono_get_array_new_va_signature (rank);
985 name = g_strdup (icall_name);
986 info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
994 mini_class_is_system_array (MonoClass *klass)
996 if (klass->parent == mono_defaults.array_class)
1003 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
1005 MonoAssembly *assembly = method->klass->image->assembly;
1006 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
1008 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
1010 return mono_assembly_has_skip_verification (assembly);
1014 * mini_method_verify:
1016 * Verify the method using the verfier.
1018 * Returns true if the method is invalid.
1021 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1024 gboolean is_fulltrust;
1026 if (method->verification_success)
1029 if (!mono_verifier_is_enabled_for_method (method))
1032 /*skip verification implies the assembly must be */
1033 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
1035 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
1038 for (tmp = res; tmp; tmp = tmp->next) {
1039 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
1040 if (info->info.status == MONO_VERIFY_ERROR) {
1042 char *method_name = mono_method_full_name (method, TRUE);
1043 cfg->exception_type = info->exception_type;
1044 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1045 g_free (method_name);
1047 mono_free_verify_list (res);
1050 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
1052 char *method_name = mono_method_full_name (method, TRUE);
1053 char *msg = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1055 if (info->exception_type == MONO_EXCEPTION_METHOD_ACCESS)
1056 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "%s", msg);
1057 else if (info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)
1058 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "%s", msg);
1059 else if (info->exception_type == MONO_EXCEPTION_UNVERIFIABLE_IL)
1060 mono_error_set_generic_error (&cfg->error, "System.Security", "VerificationException", "%s", msg);
1061 if (!mono_error_ok (&cfg->error)) {
1062 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1065 cfg->exception_type = info->exception_type;
1066 cfg->exception_message = msg;
1068 g_free (method_name);
1070 mono_free_verify_list (res);
1074 mono_free_verify_list (res);
1076 method->verification_success = 1;
1080 /*Returns true if something went wrong*/
1082 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1084 MonoMethod *method_definition = method;
1085 gboolean dont_verify = method->klass->image->assembly->corlib_internal;
1087 while (method_definition->is_inflated) {
1088 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1089 method_definition = imethod->declaring;
1092 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1096 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1098 if (!domain_jit_info (domain)->dynamic_code_hash)
1099 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1100 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1103 static MonoJitDynamicMethodInfo*
1104 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1106 MonoJitDynamicMethodInfo *res;
1108 if (domain_jit_info (domain)->dynamic_code_hash)
1109 res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1117 GList *active, *inactive;
1122 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1124 MonoMethodVar *v1 = (MonoMethodVar*)a;
1125 MonoMethodVar *v2 = (MonoMethodVar*)b;
1129 else if (v1->interval->range && v2->interval->range)
1130 return v1->interval->range->from - v2->interval->range->from;
1131 else if (v1->interval->range)
1138 #define LSCAN_DEBUG(a) do { a; } while (0)
1140 #define LSCAN_DEBUG(a)
1144 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1146 int i, slot, offset, size;
1151 GList *vars = NULL, *l, *unhandled;
1152 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1155 gboolean reuse_slot;
1157 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1159 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1160 vtype_stack_slots = NULL;
1163 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1164 for (i = 0; i < cfg->num_varinfo; ++i)
1167 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1168 inst = cfg->varinfo [i];
1169 vmv = MONO_VARINFO (cfg, i);
1171 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1174 vars = g_list_prepend (vars, vmv);
1177 vars = g_list_sort (vars, compare_by_interval_start_pos_func);
1182 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1183 MonoMethodVar *current = unhandled->data;
1185 if (current->interval->range) {
1186 g_assert (current->interval->range->from >= i);
1187 i = current->interval->range->from;
1194 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1195 MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
1198 inst = cfg->varinfo [vmv->idx];
1200 t = mono_type_get_underlying_type (inst->inst_vtype);
1201 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1204 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1205 * pinvoke wrappers when they call functions returning structures */
1206 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1207 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1212 size = mini_type_stack_size (t, &ialign);
1215 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1220 if (cfg->disable_reuse_stack_slots)
1223 t = mini_get_underlying_type (t);
1225 case MONO_TYPE_GENERICINST:
1226 if (!mono_type_generic_inst_is_valuetype (t)) {
1227 slot_info = &scalar_stack_slots [t->type];
1231 case MONO_TYPE_VALUETYPE:
1232 if (!vtype_stack_slots)
1233 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1234 for (i = 0; i < nvtypes; ++i)
1235 if (t->data.klass == vtype_stack_slots [i].vtype)
1238 slot_info = &vtype_stack_slots [i];
1240 g_assert (nvtypes < 256);
1241 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1242 slot_info = &vtype_stack_slots [nvtypes];
1245 if (cfg->disable_reuse_ref_stack_slots)
1252 #if SIZEOF_VOID_P == 4
1257 if (cfg->disable_ref_noref_stack_slot_share) {
1258 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1263 case MONO_TYPE_CLASS:
1264 case MONO_TYPE_OBJECT:
1265 case MONO_TYPE_ARRAY:
1266 case MONO_TYPE_SZARRAY:
1267 case MONO_TYPE_STRING:
1268 /* Share non-float stack slots of the same size */
1269 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1270 if (cfg->disable_reuse_ref_stack_slots)
1275 slot_info = &scalar_stack_slots [t->type];
1279 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1283 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1285 if (!current->interval->range) {
1286 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1290 inst->flags |= MONO_INST_IS_DEAD;
1295 pos = current->interval->range->from;
1297 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1298 if (current->interval->range)
1299 LSCAN_DEBUG (mono_linterval_print (current->interval));
1300 LSCAN_DEBUG (printf ("\n"));
1302 /* Check for intervals in active which expired or inactive */
1304 /* FIXME: Optimize this */
1307 for (l = slot_info->active; l != NULL; l = l->next) {
1308 MonoMethodVar *v = (MonoMethodVar*)l->data;
1310 if (v->interval->last_range->to < pos) {
1311 slot_info->active = g_list_delete_link (slot_info->active, l);
1312 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1313 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1317 else if (!mono_linterval_covers (v->interval, pos)) {
1318 slot_info->inactive = g_list_append (slot_info->inactive, v);
1319 slot_info->active = g_list_delete_link (slot_info->active, l);
1320 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1327 /* Check for intervals in inactive which expired or active */
1329 /* FIXME: Optimize this */
1332 for (l = slot_info->inactive; l != NULL; l = l->next) {
1333 MonoMethodVar *v = (MonoMethodVar*)l->data;
1335 if (v->interval->last_range->to < pos) {
1336 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1337 // FIXME: Enabling this seems to cause impossible to debug crashes
1338 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1339 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1343 else if (mono_linterval_covers (v->interval, pos)) {
1344 slot_info->active = g_list_append (slot_info->active, v);
1345 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1346 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1354 * This also handles the case when the variable is used in an
1355 * exception region, as liveness info is not computed there.
1358 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1361 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1362 if (slot_info->slots) {
1363 slot = GPOINTER_TO_INT (slot_info->slots->data);
1365 slot_info->slots = slot_info->slots->next;
1368 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1370 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1376 static int count = 0;
1379 if (count == atoi (g_getenv ("COUNT3")))
1380 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1381 if (count > atoi (g_getenv ("COUNT3")))
1384 mono_print_ins (inst);
1389 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1391 if (inst->flags & MONO_INST_LMF) {
1392 size = sizeof (MonoLMF);
1393 align = sizeof (mgreg_t);
1400 if (slot == 0xffffff) {
1402 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1403 * efficient copying (and to work around the fact that OP_MEMCPY
1404 * and OP_MEMSET ignores alignment).
1406 if (MONO_TYPE_ISSTRUCT (t)) {
1407 align = MAX (align, sizeof (gpointer));
1408 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1413 offset += align - 1;
1414 offset &= ~(align - 1);
1418 offset += align - 1;
1419 offset &= ~(align - 1);
1424 if (*stack_align == 0)
1425 *stack_align = align;
1428 offsets [vmv->idx] = slot;
1431 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1432 if (scalar_stack_slots [i].active)
1433 g_list_free (scalar_stack_slots [i].active);
1435 for (i = 0; i < nvtypes; ++i) {
1436 if (vtype_stack_slots [i].active)
1437 g_list_free (vtype_stack_slots [i].active);
1440 cfg->stat_locals_stack_size += offset;
1442 *stack_size = offset;
1447 * mono_allocate_stack_slots:
1449 * Allocate stack slots for all non register allocated variables using a
1450 * linear scan algorithm.
1451 * Returns: an array of stack offsets.
1452 * STACK_SIZE is set to the amount of stack space needed.
1453 * STACK_ALIGN is set to the alignment needed by the locals area.
1456 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1458 int i, slot, offset, size;
1463 GList *vars = NULL, *l;
1464 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1467 gboolean reuse_slot;
1469 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1470 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1472 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1473 vtype_stack_slots = NULL;
1476 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1477 for (i = 0; i < cfg->num_varinfo; ++i)
1480 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1481 inst = cfg->varinfo [i];
1482 vmv = MONO_VARINFO (cfg, i);
1484 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1487 vars = g_list_prepend (vars, vmv);
1490 vars = mono_varlist_sort (cfg, vars, 0);
1492 *stack_align = sizeof(mgreg_t);
1493 for (l = vars; l; l = l->next) {
1494 vmv = (MonoMethodVar *)l->data;
1495 inst = cfg->varinfo [vmv->idx];
1497 t = mono_type_get_underlying_type (inst->inst_vtype);
1498 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1501 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1502 * pinvoke wrappers when they call functions returning structures */
1503 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1504 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1508 size = mini_type_stack_size (t, &ialign);
1511 if (mono_class_has_failure (mono_class_from_mono_type (t)))
1512 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1514 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1519 if (cfg->disable_reuse_stack_slots)
1522 t = mini_get_underlying_type (t);
1524 case MONO_TYPE_GENERICINST:
1525 if (!mono_type_generic_inst_is_valuetype (t)) {
1526 slot_info = &scalar_stack_slots [t->type];
1530 case MONO_TYPE_VALUETYPE:
1531 if (!vtype_stack_slots)
1532 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1533 for (i = 0; i < nvtypes; ++i)
1534 if (t->data.klass == vtype_stack_slots [i].vtype)
1537 slot_info = &vtype_stack_slots [i];
1539 g_assert (nvtypes < 256);
1540 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1541 slot_info = &vtype_stack_slots [nvtypes];
1544 if (cfg->disable_reuse_ref_stack_slots)
1551 #if SIZEOF_VOID_P == 4
1556 if (cfg->disable_ref_noref_stack_slot_share) {
1557 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1562 case MONO_TYPE_CLASS:
1563 case MONO_TYPE_OBJECT:
1564 case MONO_TYPE_ARRAY:
1565 case MONO_TYPE_SZARRAY:
1566 case MONO_TYPE_STRING:
1567 /* Share non-float stack slots of the same size */
1568 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1569 if (cfg->disable_reuse_ref_stack_slots)
1573 case MONO_TYPE_MVAR:
1574 slot_info = &scalar_stack_slots [t->type];
1577 slot_info = &scalar_stack_slots [t->type];
1582 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1583 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1585 /* expire old intervals in active */
1586 while (slot_info->active) {
1587 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1589 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1592 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1594 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1595 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1599 * This also handles the case when the variable is used in an
1600 * exception region, as liveness info is not computed there.
1603 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1606 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1607 if (slot_info->slots) {
1608 slot = GPOINTER_TO_INT (slot_info->slots->data);
1610 slot_info->slots = slot_info->slots->next;
1613 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1618 static int count = 0;
1622 if (count == atoi (g_getenv ("COUNT")))
1623 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1624 if (count > atoi (g_getenv ("COUNT")))
1627 mono_print_ins (inst);
1632 if (inst->flags & MONO_INST_LMF) {
1634 * This variable represents a MonoLMF structure, which has no corresponding
1635 * CLR type, so hard-code its size/alignment.
1637 size = sizeof (MonoLMF);
1638 align = sizeof (mgreg_t);
1645 if (slot == 0xffffff) {
1647 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1648 * efficient copying (and to work around the fact that OP_MEMCPY
1649 * and OP_MEMSET ignores alignment).
1651 if (MONO_TYPE_ISSTRUCT (t)) {
1652 align = MAX (align, sizeof (gpointer));
1653 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1655 * Align the size too so the code generated for passing vtypes in
1656 * registers doesn't overwrite random locals.
1658 size = (size + (align - 1)) & ~(align -1);
1663 offset += align - 1;
1664 offset &= ~(align - 1);
1668 offset += align - 1;
1669 offset &= ~(align - 1);
1674 *stack_align = MAX (*stack_align, align);
1677 offsets [vmv->idx] = slot;
1680 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1681 if (scalar_stack_slots [i].active)
1682 g_list_free (scalar_stack_slots [i].active);
1684 for (i = 0; i < nvtypes; ++i) {
1685 if (vtype_stack_slots [i].active)
1686 g_list_free (vtype_stack_slots [i].active);
1689 cfg->stat_locals_stack_size += offset;
1691 *stack_size = offset;
1695 #define EMUL_HIT_SHIFT 3
1696 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1697 /* small hit bitmap cache */
1698 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1699 static short emul_opcode_num = 0;
1700 static short emul_opcode_alloced = 0;
1701 static short *emul_opcode_opcodes;
1702 static MonoJitICallInfo **emul_opcode_map;
1705 mono_find_jit_opcode_emulation (int opcode)
1707 g_assert (opcode >= 0 && opcode <= OP_LAST);
1708 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1710 for (i = 0; i < emul_opcode_num; ++i) {
1711 if (emul_opcode_opcodes [i] == opcode)
1712 return emul_opcode_map [i];
1719 mini_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_throw)
1721 MonoJitICallInfo *info;
1722 MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
1724 g_assert (!sig->hasthis);
1725 g_assert (sig->param_count < 3);
1727 /* Opcode emulation functions are assumed to don't call mono_raise_exception () */
1728 info = mono_register_jit_icall_full (func, name, sig, no_throw, TRUE, symbol);
1730 if (emul_opcode_num >= emul_opcode_alloced) {
1731 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1732 emul_opcode_alloced += incr;
1733 emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1734 emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1736 emul_opcode_map [emul_opcode_num] = info;
1737 emul_opcode_opcodes [emul_opcode_num] = opcode;
1739 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1743 print_dfn (MonoCompile *cfg)
1751 char *method_name = mono_method_full_name (cfg->method, TRUE);
1752 g_print ("IR code for method %s\n", method_name);
1753 g_free (method_name);
1756 for (i = 0; i < cfg->num_bblocks; ++i) {
1757 bb = cfg->bblocks [i];
1758 /*if (bb->cil_code) {
1759 char* code1, *code2;
1760 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1761 if (bb->last_ins->cil_code)
1762 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1764 code2 = g_strdup ("");
1766 code1 [strlen (code1) - 1] = 0;
1767 code = g_strdup_printf ("%s -> %s", code1, code2);
1771 code = g_strdup ("\n");
1772 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1773 MONO_BB_FOR_EACH_INS (bb, c) {
1774 mono_print_ins_index (-1, c);
1777 g_print ("\tprev:");
1778 for (j = 0; j < bb->in_count; ++j) {
1779 g_print (" BB%d", bb->in_bb [j]->block_num);
1781 g_print ("\t\tsucc:");
1782 for (j = 0; j < bb->out_count; ++j) {
1783 g_print (" BB%d", bb->out_bb [j]->block_num);
1785 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1788 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1791 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
1793 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
1801 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
1803 MONO_ADD_INS (bb, inst);
1807 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1811 bb->code = ins_to_insert;
1813 /* Link with next */
1814 ins_to_insert->next = ins;
1816 ins->prev = ins_to_insert;
1818 if (bb->last_ins == NULL)
1819 bb->last_ins = ins_to_insert;
1821 /* Link with next */
1822 ins_to_insert->next = ins->next;
1824 ins->next->prev = ins_to_insert;
1826 /* Link with previous */
1827 ins->next = ins_to_insert;
1828 ins_to_insert->prev = ins;
1830 if (bb->last_ins == ins)
1831 bb->last_ins = ins_to_insert;
1836 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1841 ins->prev = ins_to_insert;
1842 bb->code = ins_to_insert;
1843 ins_to_insert->next = ins;
1844 if (bb->last_ins == NULL)
1845 bb->last_ins = ins_to_insert;
1847 /* Link with previous */
1849 ins->prev->next = ins_to_insert;
1850 ins_to_insert->prev = ins->prev;
1852 /* Link with next */
1853 ins->prev = ins_to_insert;
1854 ins_to_insert->next = ins;
1856 if (bb->code == ins)
1857 bb->code = ins_to_insert;
1862 * mono_verify_bblock:
1864 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1867 mono_verify_bblock (MonoBasicBlock *bb)
1869 MonoInst *ins, *prev;
1872 for (ins = bb->code; ins; ins = ins->next) {
1873 g_assert (ins->prev == prev);
1877 g_assert (!bb->last_ins->next);
1883 * Perform consistency checks on the JIT data structures and the IR
1886 mono_verify_cfg (MonoCompile *cfg)
1890 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
1891 mono_verify_bblock (bb);
1894 // This will free many fields in cfg to save
1895 // memory. Note that this must be safe to call
1896 // multiple times. It must be idempotent.
1898 mono_empty_compile (MonoCompile *cfg)
1900 mono_free_loop_info (cfg);
1902 // These live in the mempool, and so must be freed
1904 for (GSList *l = cfg->headers_to_free; l; l = l->next) {
1905 mono_metadata_free_mh ((MonoMethodHeader *)l->data);
1907 cfg->headers_to_free = NULL;
1910 //mono_mempool_stats (cfg->mempool);
1911 mono_mempool_destroy (cfg->mempool);
1912 cfg->mempool = NULL;
1915 g_free (cfg->varinfo);
1916 cfg->varinfo = NULL;
1922 mono_regstate_free (cfg->rs);
1928 mono_destroy_compile (MonoCompile *cfg)
1930 mono_empty_compile (cfg);
1933 mono_metadata_free_mh (cfg->header);
1936 g_hash_table_destroy (cfg->spvars);
1938 g_hash_table_destroy (cfg->exvars);
1940 g_list_free (cfg->ldstr_list);
1942 if (cfg->token_info_hash)
1943 g_hash_table_destroy (cfg->token_info_hash);
1945 if (cfg->abs_patches)
1946 g_hash_table_destroy (cfg->abs_patches);
1948 mono_debug_free_method (cfg);
1950 g_free (cfg->varinfo);
1952 g_free (cfg->exception_message);
1957 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
1959 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1963 ji->data.target = target;
1964 ji->next = cfg->patch_info;
1966 cfg->patch_info = ji;
1970 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
1972 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1976 ji->relocation = relocation;
1977 ji->data.target = target;
1978 ji->next = cfg->patch_info;
1980 cfg->patch_info = ji;
1984 mono_remove_patch_info (MonoCompile *cfg, int ip)
1986 MonoJumpInfo **ji = &cfg->patch_info;
1989 if ((*ji)->ip.i == ip)
1992 ji = &((*ji)->next);
1997 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
1999 ins->inst_offset = native_offset;
2000 g_ptr_array_add (cfg->seq_points, ins);
2002 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
2003 bb->last_seq_point = ins;
2008 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
2010 MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
2013 g_assert (offset == 0);
2015 entry->is_reg = is_reg;
2017 entry->offset = offset;
2021 if (var == cfg->args [0])
2022 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
2023 else if (var == cfg->rgctx_var)
2024 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
2028 mono_compile_create_vars (MonoCompile *cfg)
2030 MonoMethodSignature *sig;
2031 MonoMethodHeader *header;
2034 header = cfg->header;
2036 sig = mono_method_signature (cfg->method);
2038 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2039 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2040 /* Inhibit optimizations */
2041 cfg->ret->flags |= MONO_INST_VOLATILE;
2043 if (cfg->verbose_level > 2)
2044 g_print ("creating vars\n");
2046 cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2049 cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG);
2050 cfg->this_arg = cfg->args [0];
2053 for (i = 0; i < sig->param_count; ++i) {
2054 cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2057 if (cfg->verbose_level > 2) {
2059 printf ("\treturn : ");
2060 mono_print_ins (cfg->ret);
2064 printf ("\tthis: ");
2065 mono_print_ins (cfg->args [0]);
2068 for (i = 0; i < sig->param_count; ++i) {
2069 printf ("\targ [%d]: ", i);
2070 mono_print_ins (cfg->args [i + sig->hasthis]);
2074 cfg->locals_start = cfg->num_varinfo;
2075 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2077 if (cfg->verbose_level > 2)
2078 g_print ("creating locals\n");
2080 for (i = 0; i < header->num_locals; ++i) {
2081 if (cfg->verbose_level > 2)
2082 g_print ("\tlocal [%d]: ", i);
2083 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2086 if (cfg->verbose_level > 2)
2087 g_print ("locals done\n");
2090 if (COMPILE_LLVM (cfg))
2091 mono_llvm_create_vars (cfg);
2093 mono_arch_create_vars (cfg);
2095 mono_arch_create_vars (cfg);
2098 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2099 MonoInst *lmf_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2100 lmf_var->flags |= MONO_INST_VOLATILE;
2101 lmf_var->flags |= MONO_INST_LMF;
2102 cfg->lmf_var = lmf_var;
2107 mono_print_code (MonoCompile *cfg, const char* msg)
2111 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2112 mono_print_bb (bb, msg);
2116 mono_postprocess_patches (MonoCompile *cfg)
2118 MonoJumpInfo *patch_info;
2121 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2122 switch (patch_info->type) {
2123 case MONO_PATCH_INFO_ABS: {
2124 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
2127 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2131 //printf ("TEST %s %p\n", info->name, patch_info->data.target);
2132 /* for these array methods we currently register the same function pointer
2133 * since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
2134 * will return the incorrect one depending on the order they are registered.
2135 * See tests/test-arr.cs
2137 if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
2138 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2139 patch_info->data.name = info->name;
2143 if (patch_info->type == MONO_PATCH_INFO_ABS) {
2144 if (cfg->abs_patches) {
2145 MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2147 patch_info->type = abs_ji->type;
2148 patch_info->data.target = abs_ji->data.target;
2155 case MONO_PATCH_INFO_SWITCH: {
2157 if (cfg->method->dynamic) {
2158 table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2160 table = (void **)mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
2163 for (i = 0; i < patch_info->data.table->table_size; i++) {
2164 /* Might be NULL if the switch is eliminated */
2165 if (patch_info->data.table->table [i]) {
2166 g_assert (patch_info->data.table->table [i]->native_offset);
2167 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2172 patch_info->data.table->table = (MonoBasicBlock**)table;
2175 case MONO_PATCH_INFO_METHOD_JUMP: {
2176 MonoJumpList *jlist;
2177 MonoDomain *domain = cfg->domain;
2178 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2180 mono_domain_lock (domain);
2181 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
2183 jlist = (MonoJumpList *)mono_domain_alloc0 (domain, sizeof (MonoJumpList));
2184 g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist);
2186 jlist->list = g_slist_prepend (jlist->list, ip);
2187 mono_domain_unlock (domain);
2198 mono_codegen (MonoCompile *cfg)
2201 int max_epilog_size;
2203 MonoDomain *code_domain;
2204 guint unwindlen = 0;
2206 if (mono_using_xdebug)
2208 * Recent gdb versions have trouble processing symbol files containing
2209 * overlapping address ranges, so allocate all code from the code manager
2210 * of the root domain. (#666152).
2212 code_domain = mono_get_root_domain ();
2214 code_domain = cfg->domain;
2216 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2217 cfg->spill_count = 0;
2218 /* we reuse dfn here */
2219 /* bb->dfn = bb_count++; */
2221 mono_arch_lowering_pass (cfg, bb);
2223 if (cfg->opt & MONO_OPT_PEEPHOLE)
2224 mono_arch_peephole_pass_1 (cfg, bb);
2226 mono_local_regalloc (cfg, bb);
2228 if (cfg->opt & MONO_OPT_PEEPHOLE)
2229 mono_arch_peephole_pass_2 (cfg, bb);
2231 if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
2232 mono_bb_deduplicate_op_il_seq_points (cfg, bb);
2235 code = mono_arch_emit_prolog (cfg);
2237 cfg->code_len = code - cfg->native_code;
2238 cfg->prolog_end = cfg->code_len;
2239 cfg->cfa_reg = cfg->cur_cfa_reg;
2240 cfg->cfa_offset = cfg->cur_cfa_offset;
2242 mono_debug_open_method (cfg);
2244 /* emit code all basic blocks */
2245 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2246 bb->native_offset = cfg->code_len;
2247 bb->real_native_offset = cfg->code_len;
2248 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2249 mono_arch_output_basic_block (cfg, bb);
2250 bb->native_length = cfg->code_len - bb->native_offset;
2252 if (bb == cfg->bb_exit) {
2253 cfg->epilog_begin = cfg->code_len;
2254 mono_arch_emit_epilog (cfg);
2255 cfg->epilog_end = cfg->code_len;
2258 if (bb->clause_hole)
2259 mono_cfg_add_try_hole (cfg, bb->clause_hole, cfg->native_code + bb->native_offset, bb);
2262 mono_arch_emit_exceptions (cfg);
2264 max_epilog_size = 0;
2266 /* we always allocate code in cfg->domain->code_mp to increase locality */
2267 cfg->code_size = cfg->code_len + max_epilog_size;
2269 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2271 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2272 unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
2275 if (cfg->method->dynamic) {
2276 /* Allocate the code into a separate memory pool so it can be freed */
2277 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2278 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2279 mono_domain_lock (cfg->domain);
2280 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2281 mono_domain_unlock (cfg->domain);
2283 if (mono_using_xdebug)
2284 /* See the comment for cfg->code_domain */
2285 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2287 code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
2289 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2292 if (cfg->thunk_area) {
2293 cfg->thunks_offset = cfg->code_size + unwindlen;
2294 cfg->thunks = code + cfg->thunks_offset;
2295 memset (cfg->thunks, 0, cfg->thunk_area);
2299 memcpy (code, cfg->native_code, cfg->code_len);
2300 g_free (cfg->native_code);
2301 cfg->native_code = code;
2302 code = cfg->native_code + cfg->code_len;
2304 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2305 mono_postprocess_patches (cfg);
2307 #ifdef VALGRIND_JIT_REGISTER_MAP
2308 if (valgrind_register){
2309 char* nm = mono_method_full_name (cfg->method, TRUE);
2310 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2315 if (cfg->verbose_level > 0) {
2316 char* nm = mono_method_get_full_name (cfg->method);
2317 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2319 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
2324 gboolean is_generic = FALSE;
2326 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2327 mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
2332 g_assert (is_generic);
2335 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2336 mono_arch_save_unwind_info (cfg);
2339 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2344 for (ji = cfg->patch_info; ji; ji = ji->next) {
2345 if (cfg->compile_aot) {
2347 case MONO_PATCH_INFO_BB:
2348 case MONO_PATCH_INFO_LABEL:
2351 /* No need to patch these */
2356 if (ji->type == MONO_PATCH_INFO_NONE)
2359 target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors, &cfg->error);
2360 if (!mono_error_ok (&cfg->error)) {
2361 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2364 mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
2368 mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors, &cfg->error);
2369 if (!is_ok (&cfg->error)) {
2370 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2375 if (cfg->method->dynamic) {
2376 if (mono_using_xdebug)
2377 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2379 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2381 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2383 MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
2385 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2387 mono_debug_close_method (cfg);
2389 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2390 mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2395 compute_reachable (MonoBasicBlock *bb)
2399 if (!(bb->flags & BB_VISITED)) {
2400 bb->flags |= BB_VISITED;
2401 for (i = 0; i < bb->out_count; ++i)
2402 compute_reachable (bb->out_bb [i]);
2406 static void mono_bb_ordering (MonoCompile *cfg)
2409 /* Depth-first ordering on basic blocks */
2410 cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
2412 cfg->max_block_num = cfg->num_bblocks;
2414 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
2415 if (cfg->num_bblocks != dfn + 1) {
2418 cfg->num_bblocks = dfn + 1;
2420 /* remove unreachable code, because the code in them may be
2421 * inconsistent (access to dead variables for example) */
2422 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2423 bb->flags &= ~BB_VISITED;
2424 compute_reachable (cfg->bb_entry);
2425 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2426 if (bb->flags & BB_EXCEPTION_HANDLER)
2427 compute_reachable (bb);
2428 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2429 if (!(bb->flags & BB_VISITED)) {
2430 if (cfg->verbose_level > 1)
2431 g_print ("found unreachable code in BB%d\n", bb->block_num);
2432 bb->code = bb->last_ins = NULL;
2433 while (bb->out_count)
2434 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
2437 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2438 bb->flags &= ~BB_VISITED;
2443 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2446 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2447 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2449 MONO_INST_NEW (cfg, ins, OP_BR);
2450 MONO_ADD_INS (bb, ins);
2451 ins->inst_target_bb = bb->next_bb;
2457 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2460 MonoMethodHeader *header;
2462 MonoJitInfoFlags flags = JIT_INFO_NONE;
2463 int num_clauses, num_holes = 0;
2464 guint32 stack_size = 0;
2466 g_assert (method_to_compile == cfg->method);
2467 header = cfg->header;
2470 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_GENERIC_JIT_INFO);
2472 if (cfg->arch_eh_jit_info) {
2473 MonoJitArgumentInfo *arg_info;
2474 MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register);
2477 * This cannot be computed during stack walking, as
2478 * mono_arch_get_argument_info () is not signal safe.
2480 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2481 stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
2484 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2487 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2488 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2490 if (cfg->thunk_area)
2491 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_THUNK_INFO);
2493 if (cfg->try_block_holes) {
2494 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2495 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2496 MonoExceptionClause *ec = hole->clause;
2497 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2498 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2499 g_assert (clause_last_bb);
2501 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2502 if (clause_last_bb->native_offset != hole_end)
2506 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_TRY_BLOCK_HOLES);
2507 if (G_UNLIKELY (cfg->verbose_level >= 4))
2508 printf ("Number of try block holes %d\n", num_holes);
2511 if (COMPILE_LLVM (cfg))
2512 num_clauses = cfg->llvm_ex_info_len;
2514 num_clauses = header->num_clauses;
2516 if (cfg->method->dynamic)
2517 jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2519 jinfo = (MonoJitInfo *)mono_domain_alloc0 (cfg->domain, mono_jit_info_size (flags, num_clauses, num_holes));
2520 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2521 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2523 if (COMPILE_LLVM (cfg))
2524 jinfo->from_llvm = TRUE;
2528 MonoGenericJitInfo *gi;
2529 GSList *loclist = NULL;
2531 gi = mono_jit_info_get_generic_jit_info (jinfo);
2534 if (cfg->method->dynamic)
2535 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2537 gi->generic_sharing_context = (MonoGenericSharingContext *)mono_domain_alloc0 (cfg->domain, sizeof (MonoGenericSharingContext));
2538 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2540 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2541 mini_method_get_context (method_to_compile)->method_inst ||
2542 method_to_compile->klass->valuetype) {
2543 g_assert (cfg->rgctx_var);
2548 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2549 mini_method_get_context (method_to_compile)->method_inst ||
2550 method_to_compile->klass->valuetype) {
2551 inst = cfg->rgctx_var;
2552 if (!COMPILE_LLVM (cfg))
2553 g_assert (inst->opcode == OP_REGOFFSET);
2554 loclist = cfg->rgctx_loclist;
2556 inst = cfg->args [0];
2557 loclist = cfg->this_loclist;
2561 /* Needed to handle async exceptions */
2565 gi->nlocs = g_slist_length (loclist);
2566 if (cfg->method->dynamic)
2567 gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2569 gi->locations = (MonoDwarfLocListEntry *)mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2571 for (l = loclist; l; l = l->next) {
2572 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2577 if (COMPILE_LLVM (cfg)) {
2578 g_assert (cfg->llvm_this_reg != -1);
2579 gi->this_in_reg = 0;
2580 gi->this_reg = cfg->llvm_this_reg;
2581 gi->this_offset = cfg->llvm_this_offset;
2582 } else if (inst->opcode == OP_REGVAR) {
2583 gi->this_in_reg = 1;
2584 gi->this_reg = inst->dreg;
2586 g_assert (inst->opcode == OP_REGOFFSET);
2588 g_assert (inst->inst_basereg == X86_EBP);
2589 #elif defined(TARGET_AMD64)
2590 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2592 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2594 gi->this_in_reg = 0;
2595 gi->this_reg = inst->inst_basereg;
2596 gi->this_offset = inst->inst_offset;
2601 MonoTryBlockHoleTableJitInfo *table;
2604 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2605 table->num_holes = (guint16)num_holes;
2607 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2608 guint32 start_bb_offset;
2609 MonoTryBlockHoleJitInfo *hole;
2610 TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
2611 MonoExceptionClause *ec = hole_data->clause;
2612 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2613 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2614 g_assert (clause_last_bb);
2616 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2617 if (clause_last_bb->native_offset == hole_end)
2620 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2621 hole = &table->holes [i++];
2622 hole->clause = hole_data->clause - &header->clauses [0];
2623 hole->offset = (guint32)hole_data->start_offset;
2624 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2626 if (G_UNLIKELY (cfg->verbose_level >= 4))
2627 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2629 g_assert (i == num_holes);
2632 if (jinfo->has_arch_eh_info) {
2633 MonoArchEHJitInfo *info;
2635 info = mono_jit_info_get_arch_eh_info (jinfo);
2637 info->stack_size = stack_size;
2640 if (cfg->thunk_area) {
2641 MonoThunkJitInfo *info;
2643 info = mono_jit_info_get_thunk_info (jinfo);
2644 info->thunks_offset = cfg->thunks_offset;
2645 info->thunks_size = cfg->thunk_area;
2648 if (COMPILE_LLVM (cfg)) {
2650 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2651 } else if (header->num_clauses) {
2654 for (i = 0; i < header->num_clauses; i++) {
2655 MonoExceptionClause *ec = &header->clauses [i];
2656 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2657 MonoBasicBlock *tblock;
2660 ei->flags = ec->flags;
2662 if (G_UNLIKELY (cfg->verbose_level >= 4))
2663 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
2665 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2666 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2668 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2669 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2671 ei->data.filter = cfg->native_code + tblock->native_offset;
2673 ei->data.catch_class = ec->data.catch_class;
2676 tblock = cfg->cil_offset_to_bb [ec->try_offset];
2678 g_assert (tblock->native_offset);
2679 ei->try_start = cfg->native_code + tblock->native_offset;
2680 if (tblock->extend_try_block) {
2682 * Extend the try block backwards to include parts of the previous call
2685 ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
2687 if (ec->try_offset + ec->try_len < header->code_size)
2688 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2690 tblock = cfg->bb_exit;
2691 if (G_UNLIKELY (cfg->verbose_level >= 4))
2692 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
2694 if (!tblock->native_offset) {
2696 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
2697 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2698 if (bb && bb->native_offset) {
2704 ei->try_end = cfg->native_code + tblock->native_offset;
2705 g_assert (tblock->native_offset);
2706 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
2708 ei->handler_start = cfg->native_code + tblock->native_offset;
2710 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2711 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2712 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
2713 if (hole->clause == ec && hole_end == ei->try_end) {
2714 if (G_UNLIKELY (cfg->verbose_level >= 4))
2715 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
2717 ei->try_end = cfg->native_code + hole->start_offset;
2722 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2724 if (ec->handler_offset + ec->handler_len < header->code_size) {
2725 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
2726 if (tblock->native_offset) {
2727 end_offset = tblock->native_offset;
2731 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
2732 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2733 if (bb && bb->native_offset) {
2738 end_offset = tblock->native_offset + tblock->native_length;
2741 end_offset = cfg->epilog_begin;
2743 ei->data.handler_end = cfg->native_code + end_offset;
2748 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
2750 for (i = 0; i < jinfo->num_clauses; i++) {
2751 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2752 int start = (guint8*)ei->try_start - cfg->native_code;
2753 int end = (guint8*)ei->try_end - cfg->native_code;
2754 int handler = (guint8*)ei->handler_start - cfg->native_code;
2755 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
2757 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
2761 if (cfg->encoded_unwind_ops) {
2762 /* Generated by LLVM */
2763 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
2764 g_free (cfg->encoded_unwind_ops);
2765 } else if (cfg->unwind_ops) {
2767 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
2768 guint32 unwind_desc;
2770 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
2772 if (cfg->has_unwind_info_for_epilog) {
2773 MonoArchEHJitInfo *info;
2775 info = mono_jit_info_get_arch_eh_info (jinfo);
2777 info->epilog_size = cfg->code_len - cfg->epilog_begin;
2779 jinfo->unwind_info = unwind_desc;
2780 g_free (unwind_info);
2782 jinfo->unwind_info = cfg->used_int_regs;
2788 /* Return whenever METHOD is a gsharedvt method */
2790 is_gsharedvt_method (MonoMethod *method)
2792 MonoGenericContext *context;
2793 MonoGenericInst *inst;
2796 if (!method->is_inflated)
2798 context = mono_method_get_context (method);
2799 inst = context->class_inst;
2801 for (i = 0; i < inst->type_argc; ++i)
2802 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2805 inst = context->method_inst;
2807 for (i = 0; i < inst->type_argc; ++i)
2808 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2815 is_open_method (MonoMethod *method)
2817 MonoGenericContext *context;
2819 if (!method->is_inflated)
2821 context = mono_method_get_context (method);
2822 if (context->class_inst && context->class_inst->is_open)
2824 if (context->method_inst && context->method_inst->is_open)
2830 mono_insert_nop_in_empty_bb (MonoCompile *cfg)
2833 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2837 MONO_INST_NEW (cfg, nop, OP_NOP);
2838 MONO_ADD_INS (bb, nop);
2842 mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
2844 MonoInst *poll_addr, *ins;
2846 if (cfg->disable_gc_safe_points)
2849 if (cfg->verbose_level > 1)
2850 printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
2852 g_assert (mono_threads_is_coop_enabled ());
2853 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
2855 MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
2856 ins->sreg1 = poll_addr->dreg;
2858 if (bblock->flags & BB_EXCEPTION_HANDLER) {
2859 MonoInst *eh_op = bblock->code;
2861 if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
2864 MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
2865 // skip all EH relateds ops
2866 while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
2868 next_eh_op = eh_op->next;
2872 mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
2873 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2874 } else if (bblock == cfg->bb_entry) {
2875 mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
2876 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2879 mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
2880 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2885 This code inserts safepoints into managed code at important code paths.
2888 -the first basic block
2889 -landing BB for exception handlers
2894 mono_insert_safepoints (MonoCompile *cfg)
2898 if (!mono_threads_is_coop_enabled ())
2901 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2902 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2903 g_assert (mono_threads_is_coop_enabled ());
2904 gpointer poll_func = &mono_threads_state_poll;
2906 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
2907 if (cfg->verbose_level > 1)
2908 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2913 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2914 if (cfg->verbose_level > 1)
2915 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2919 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2920 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2922 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
2923 (info->d.icall.func == mono_thread_interruption_checkpoint ||
2924 info->d.icall.func == mono_threads_exit_gc_safe_region_unbalanced)) {
2925 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2926 if (cfg->verbose_level > 1)
2927 printf ("SKIPPING SAFEPOINTS for wrapper %s\n", cfg->method->name);
2932 if (cfg->verbose_level > 1)
2933 printf ("INSERTING SAFEPOINTS\n");
2934 if (cfg->verbose_level > 2)
2935 mono_print_code (cfg, "BEFORE SAFEPOINTS");
2937 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2938 if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
2939 mono_create_gc_safepoint (cfg, bb);
2942 if (cfg->verbose_level > 2)
2943 mono_print_code (cfg, "AFTER SAFEPOINTS");
2949 mono_insert_branches_between_bblocks (MonoCompile *cfg)
2953 /* Add branches between non-consecutive bblocks */
2954 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2955 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
2956 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
2957 /* we are careful when inverting, since bugs like #59580
2958 * could show up when dealing with NaNs.
2960 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
2961 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
2962 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
2963 bb->last_ins->inst_false_bb = tmp;
2965 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
2967 MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
2968 inst->opcode = OP_BR;
2969 inst->inst_target_bb = bb->last_ins->inst_false_bb;
2970 mono_bblock_add_inst (bb, inst);
2975 if (cfg->verbose_level >= 4) {
2976 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2977 MonoInst *tree = bb->code;
2978 g_print ("DUMP BLOCK %d:\n", bb->block_num);
2981 for (; tree; tree = tree->next) {
2982 mono_print_ins_index (-1, tree);
2988 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2989 bb->max_vreg = cfg->next_vreg;
2994 init_backend (MonoBackend *backend)
2996 #ifdef MONO_ARCH_NEED_GOT_VAR
2997 backend->need_got_var = 1;
2999 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3000 backend->have_card_table_wb = 1;
3002 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3003 backend->have_op_generic_class_init = 1;
3005 #ifdef MONO_ARCH_EMULATE_MUL_DIV
3006 backend->emulate_mul_div = 1;
3008 #ifdef MONO_ARCH_EMULATE_DIV
3009 backend->emulate_div = 1;
3011 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3012 backend->emulate_long_shift_opts = 1;
3014 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3015 backend->have_objc_get_selector = 1;
3017 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3018 backend->have_generalized_imt_trampoline = 1;
3020 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3021 backend->gshared_supported = 1;
3023 if (MONO_ARCH_USE_FPSTACK)
3024 backend->use_fpstack = 1;
3025 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3026 backend->have_liverange_ops = 1;
3028 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
3029 backend->have_op_tail_call = 1;
3031 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3032 backend->monitor_enter_adjustment = 1;
3034 backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
3036 #if defined(__mono_ilp32__)
3039 #ifdef MONO_ARCH_HAVE_DUMMY_INIT
3040 backend->have_dummy_init = 1;
3042 #ifdef MONO_ARCH_NEED_DIV_CHECK
3043 backend->need_div_check = 1;
3045 #ifdef NO_UNALIGNED_ACCESS
3046 backend->no_unaligned_access = 1;
3048 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3049 backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
3051 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3052 backend->disable_div_with_mul = 1;
3057 * mini_method_compile:
3058 * @method: the method to compile
3059 * @opts: the optimization flags to use
3060 * @domain: the domain where the method will be compiled in
3061 * @flags: compilation flags
3062 * @parts: debug flag
3064 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3065 * field in the returned struct to see if compilation succeded.
3068 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
3070 MonoMethodHeader *header;
3071 MonoMethodSignature *sig;
3074 int i, code_size_ratio;
3075 gboolean try_generic_shared, try_llvm = FALSE;
3076 MonoMethod *method_to_compile, *method_to_register;
3077 gboolean method_is_gshared = FALSE;
3078 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3079 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3080 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3081 gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
3082 gboolean gsharedvt_method = FALSE;
3084 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3086 static gboolean verbose_method_inited;
3087 static char *verbose_method_name;
3089 InterlockedIncrement (&mono_jit_stats.methods_compiled);
3090 MONO_PROFILER_RAISE (jit_begin, (method));
3091 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3092 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3094 gsharedvt_method = is_gsharedvt_method (method);
3097 * In AOT mode, method can be the following:
3098 * - a gsharedvt method.
3099 * - a method inflated with type parameters. This is for ref/partial sharing.
3100 * - a method inflated with concrete types.
3103 if (is_open_method (method)) {
3104 try_generic_shared = TRUE;
3105 method_is_gshared = TRUE;
3107 try_generic_shared = FALSE;
3109 g_assert (opts & MONO_OPT_GSHARED);
3111 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3112 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
3113 if (mini_is_gsharedvt_sharable_method (method)) {
3115 if (!mono_debug_count ())
3116 try_generic_shared = FALSE;
3122 if (try_generic_shared && !mono_debug_count ())
3123 try_generic_shared = FALSE;
3126 if (opts & MONO_OPT_GSHARED) {
3127 if (try_generic_shared)
3128 mono_stats.generics_sharable_methods++;
3129 else if (mono_method_is_generic_impl (method))
3130 mono_stats.generics_unsharable_methods++;
3134 try_llvm = mono_use_llvm || llvm;
3138 if (method_is_gshared) {
3139 method_to_compile = method;
3141 if (try_generic_shared) {
3142 method_to_compile = mini_get_shared_method (method);
3143 g_assert (method_to_compile);
3145 method_to_compile = method;
3149 cfg = g_new0 (MonoCompile, 1);
3150 cfg->method = method_to_compile;
3151 cfg->mempool = mono_mempool_new ();
3153 cfg->run_cctors = run_cctors;
3154 cfg->domain = domain;
3155 cfg->verbose_level = mini_verbose;
3156 cfg->compile_aot = compile_aot;
3157 cfg->full_aot = full_aot;
3158 cfg->disable_omit_fp = debug_options.disable_omit_fp;
3159 cfg->skip_visibility = method->skip_visibility;
3160 cfg->orig_method = method;
3161 cfg->gen_seq_points = !debug_options.no_seq_points_compact_data || debug_options.gen_sdb_seq_points;
3162 cfg->gen_sdb_seq_points = debug_options.gen_sdb_seq_points;
3163 cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
3164 cfg->backend = current_backend;
3166 #ifdef PLATFORM_ANDROID
3167 if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
3168 /* FIXME: Why is this needed */
3169 cfg->gen_seq_points = FALSE;
3170 cfg->gen_sdb_seq_points = FALSE;
3173 if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
3174 /* We can't have seq points inside gc critical regions */
3175 cfg->gen_seq_points = FALSE;
3176 cfg->gen_sdb_seq_points = FALSE;
3178 /* coop requires loop detection to happen */
3179 if (mono_threads_is_coop_enabled ())
3180 cfg->opt |= MONO_OPT_LOOP;
3181 cfg->explicit_null_checks = debug_options.explicit_null_checks || (flags & JIT_FLAG_EXPLICIT_NULL_CHECKS);
3182 cfg->soft_breakpoints = debug_options.soft_breakpoints;
3183 cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
3184 cfg->disable_direct_icalls = disable_direct_icalls;
3185 cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
3186 if (try_generic_shared)
3187 cfg->gshared = TRUE;
3188 cfg->compile_llvm = try_llvm;
3189 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3190 if (cfg->compile_aot)
3191 cfg->method_index = aot_method_index;
3194 if (!mono_debug_count ())
3195 cfg->opt &= ~MONO_OPT_FLOAT32;
3198 cfg->opt &= ~MONO_OPT_SIMD;
3199 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3200 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3202 if (cfg->gen_seq_points)
3203 cfg->seq_points = g_ptr_array_new ();
3204 error_init (&cfg->error);
3206 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
3207 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3211 if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3212 MonoMethodInflated *inflated;
3213 MonoGenericContext *context;
3215 if (gsharedvt_method) {
3216 g_assert (method->is_inflated);
3217 inflated = (MonoMethodInflated*)method;
3218 context = &inflated->context;
3220 /* We are compiling a gsharedvt method directly */
3221 g_assert (compile_aot);
3223 g_assert (method_to_compile->is_inflated);
3224 inflated = (MonoMethodInflated*)method_to_compile;
3225 context = &inflated->context;
3228 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3229 cfg->gsctx_context = context;
3231 cfg->gsharedvt = TRUE;
3232 if (!cfg->llvm_only) {
3233 cfg->disable_llvm = TRUE;
3234 cfg->exception_message = g_strdup ("gsharedvt");
3239 method_to_register = method_to_compile;
3241 g_assert (method == method_to_compile);
3242 method_to_register = method;
3244 cfg->method_to_register = method_to_register;
3247 sig = mono_method_signature_checked (cfg->method, &err);
3249 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3250 cfg->exception_message = g_strdup (mono_error_get_message (&err));
3251 mono_error_cleanup (&err);
3252 if (MONO_METHOD_COMPILE_END_ENABLED ())
3253 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3257 header = cfg->header = mono_method_get_header_checked (cfg->method, &cfg->error);
3259 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3260 if (MONO_METHOD_COMPILE_END_ENABLED ())
3261 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3267 static gboolean inited;
3273 * Check for methods which cannot be compiled by LLVM early, to avoid
3274 * the extra compilation pass.
3276 if (COMPILE_LLVM (cfg)) {
3277 mono_llvm_check_method_supported (cfg);
3278 if (cfg->disable_llvm) {
3279 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3280 //nm = mono_method_full_name (cfg->method, TRUE);
3281 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
3284 if (cfg->llvm_only) {
3285 g_free (cfg->exception_message);
3286 cfg->disable_aot = TRUE;
3289 mono_destroy_compile (cfg);
3291 goto restart_compile;
3297 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3298 if (debug_options.mdb_optimizations) {
3299 cfg->disable_reuse_registers = TRUE;
3300 cfg->disable_reuse_stack_slots = TRUE;
3302 * This decreases the change the debugger will read registers/stack slots which are
3303 * not yet initialized.
3305 cfg->disable_initlocals_opt = TRUE;
3307 cfg->extend_live_ranges = TRUE;
3309 /* The debugger needs all locals to be on the stack or in a global register */
3310 cfg->disable_vreg_to_lvreg = TRUE;
3312 /* Don't remove unused variables when running inside the debugger since the user
3313 * may still want to view them. */
3314 cfg->disable_deadce_vars = TRUE;
3316 cfg->opt &= ~MONO_OPT_DEADCE;
3317 cfg->opt &= ~MONO_OPT_INLINE;
3318 cfg->opt &= ~MONO_OPT_COPYPROP;
3319 cfg->opt &= ~MONO_OPT_CONSPROP;
3321 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3322 cfg->disable_out_of_line_bblocks = TRUE;
3325 if (mono_using_xdebug) {
3327 * Make each variable use its own register/stack slot and extend
3328 * their liveness to cover the whole method, making them displayable
3329 * in gdb even after they are dead.
3331 cfg->disable_reuse_registers = TRUE;
3332 cfg->disable_reuse_stack_slots = TRUE;
3333 cfg->extend_live_ranges = TRUE;
3334 cfg->compute_precise_live_ranges = TRUE;
3337 mini_gc_init_cfg (cfg);
3339 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
3340 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
3342 /* These wrappers are using linkonce linkage, so they can't access GOT slots */
3343 if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
3344 cfg->disable_gc_safe_points = TRUE;
3345 /* This is safe, these wrappers only store to the stack */
3346 cfg->gen_write_barriers = FALSE;
3350 if (COMPILE_LLVM (cfg)) {
3351 cfg->opt |= MONO_OPT_ABCREM;
3354 if (!verbose_method_inited) {
3355 verbose_method_name = g_getenv ("MONO_VERBOSE_METHOD");
3356 verbose_method_inited = TRUE;
3358 if (verbose_method_name) {
3359 const char *name = verbose_method_name;
3361 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3362 MonoMethodDesc *desc;
3364 desc = mono_method_desc_new (name, TRUE);
3365 if (mono_method_desc_full_match (desc, cfg->method)) {
3366 cfg->verbose_level = 4;
3368 mono_method_desc_free (desc);
3370 if (strcmp (cfg->method->name, name) == 0)
3371 cfg->verbose_level = 4;
3375 cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3377 if (cfg->verbose_level > 0) {
3380 method_name = mono_method_get_full_name (method);
3381 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
3383 if (COMPILE_LLVM (cfg))
3384 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3385 else if (cfg->gsharedvt)
3386 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3387 else if (cfg->gshared)
3388 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3390 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3392 g_free (method_name);
3395 if (cfg->opt & MONO_OPT_ABCREM)
3396 cfg->opt |= MONO_OPT_SSA;
3398 cfg->rs = mono_regstate_new ();
3399 cfg->next_vreg = cfg->rs->next_vreg;
3401 /* FIXME: Fix SSA to handle branches inside bblocks */
3402 if (cfg->opt & MONO_OPT_SSA)
3403 cfg->enable_extended_bblocks = FALSE;
3406 * FIXME: This confuses liveness analysis because variables which are assigned after
3407 * a branch inside a bblock become part of the kill set, even though the assignment
3408 * might not get executed. This causes the optimize_initlocals pass to delete some
3409 * assignments which are needed.
3410 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3413 //cfg->enable_extended_bblocks = TRUE;
3415 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3416 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3417 if (mini_get_debug_options ()->break_on_unverified)
3423 * create MonoInst* which represents arguments and local variables
3425 mono_compile_create_vars (cfg);
3427 mono_cfg_dump_create_context (cfg);
3428 mono_cfg_dump_begin_group (cfg);
3430 MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
3431 mono_cfg_dump_ir (cfg, "method-to-ir");
3433 if (cfg->gdump_ctx != NULL) {
3434 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3435 mono_insert_nop_in_empty_bb (cfg);
3436 mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
3440 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3442 if (MONO_METHOD_COMPILE_END_ENABLED ())
3443 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3446 mono_destroy_compile (cfg);
3447 try_generic_shared = FALSE;
3448 goto restart_compile;
3450 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3452 if (MONO_METHOD_COMPILE_END_ENABLED ())
3453 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3454 /* cfg contains the details of the failure, so let the caller cleanup */
3458 cfg->stat_basic_blocks += cfg->num_bblocks;
3460 if (COMPILE_LLVM (cfg)) {
3463 /* The IR has to be in SSA form for LLVM */
3464 cfg->opt |= MONO_OPT_SSA;
3468 // Allow SSA on the result value
3469 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3471 // Add an explicit return instruction referencing the return value
3472 MONO_INST_NEW (cfg, ins, OP_SETRET);
3473 ins->sreg1 = cfg->ret->dreg;
3475 MONO_ADD_INS (cfg->bb_exit, ins);
3478 cfg->opt &= ~MONO_OPT_LINEARS;
3481 cfg->opt &= ~MONO_OPT_BRANCH;
3484 /* todo: remove code when we have verified that the liveness for try/catch blocks
3488 * Currently, this can't be commented out since exception blocks are not
3489 * processed during liveness analysis.
3490 * It is also needed, because otherwise the local optimization passes would
3491 * delete assignments in cases like this:
3493 * <something which throws>
3495 * This also allows SSA to be run on methods containing exception clauses, since
3496 * SSA will ignore variables marked VOLATILE.
3498 MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
3499 mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
3501 MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
3502 mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
3504 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3506 if (!COMPILE_LLVM (cfg)) {
3507 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
3508 mono_cfg_dump_ir (cfg, "decompose_long_opts");
3511 /* Should be done before branch opts */
3512 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
3513 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
3514 mono_cfg_dump_ir (cfg, "local_cprop");
3517 if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
3518 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
3519 if (cfg->gdump_ctx != NULL) {
3520 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3521 mono_insert_nop_in_empty_bb (cfg);
3523 mono_cfg_dump_ir (cfg, "decompose_typechecks");
3527 * Should be done after cprop which can do strength reduction on
3528 * some of these ops, after propagating immediates.
3530 if (cfg->has_emulated_ops) {
3531 MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
3532 mono_cfg_dump_ir (cfg, "local_emulate_ops");
3535 if (cfg->opt & MONO_OPT_BRANCH) {
3536 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
3537 mono_cfg_dump_ir (cfg, "optimize_branches");
3540 /* This must be done _before_ global reg alloc and _after_ decompose */
3541 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
3542 mono_cfg_dump_ir (cfg, "handle_global_vregs");
3543 if (cfg->opt & MONO_OPT_DEADCE) {
3544 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
3545 mono_cfg_dump_ir (cfg, "local_deadce");
3547 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
3548 MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
3549 mono_cfg_dump_ir (cfg, "local_alias_analysis");
3551 /* Disable this for LLVM to make the IR easier to handle */
3552 if (!COMPILE_LLVM (cfg)) {
3553 MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
3554 mono_cfg_dump_ir (cfg, "if_conversion");
3557 mono_threads_safepoint ();
3559 MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
3560 mono_cfg_dump_ir (cfg, "bb_ordering");
3562 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3564 * we disable some optimizations if there are too many variables
3565 * because JIT time may become too expensive. The actual number needs
3566 * to be tweaked and eventually the non-linear algorithms should be fixed.
3568 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3569 cfg->disable_ssa = TRUE;
3572 if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
3573 /* Disable llvm for overly complex methods */
3574 cfg->disable_ssa = TRUE;
3576 if (cfg->opt & MONO_OPT_LOOP) {
3577 MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
3578 MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
3581 MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, mono_insert_safepoints (cfg));
3582 mono_cfg_dump_ir (cfg, "insert_safepoints");
3584 /* after method_to_ir */
3586 if (MONO_METHOD_COMPILE_END_ENABLED ())
3587 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3592 if (header->num_clauses)
3593 cfg->disable_ssa = TRUE;
3596 //#define DEBUGSSA "logic_run"
3597 //#define DEBUGSSA_CLASS "Tests"
3600 if (!cfg->disable_ssa) {
3601 mono_local_cprop (cfg);
3604 mono_ssa_compute (cfg);
3608 if (cfg->opt & MONO_OPT_SSA) {
3609 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3611 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
3612 mono_cfg_dump_ir (cfg, "ssa_compute");
3615 if (cfg->verbose_level >= 2) {
3622 /* after SSA translation */
3624 if (MONO_METHOD_COMPILE_END_ENABLED ())
3625 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3629 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3630 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3632 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
3633 mono_cfg_dump_ir (cfg, "ssa_cprop");
3639 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3640 //mono_ssa_strength_reduction (cfg);
3642 if (cfg->opt & MONO_OPT_DEADCE) {
3643 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
3644 mono_cfg_dump_ir (cfg, "ssa_deadce");
3647 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
3648 MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
3649 mono_cfg_dump_ir (cfg, "perform_abc_removal");
3652 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
3653 mono_cfg_dump_ir (cfg, "ssa_remove");
3654 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
3655 mono_cfg_dump_ir (cfg, "local_cprop2");
3656 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
3657 mono_cfg_dump_ir (cfg, "handle_global_vregs2");
3658 if (cfg->opt & MONO_OPT_DEADCE) {
3659 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
3660 mono_cfg_dump_ir (cfg, "local_deadce2");
3663 if (cfg->opt & MONO_OPT_BRANCH) {
3664 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
3665 mono_cfg_dump_ir (cfg, "optimize_branches2");
3670 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3671 mono_ssa_loop_invariant_code_motion (cfg);
3672 mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
3673 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3674 if (cfg->opt & MONO_OPT_ABCREM) {
3675 mono_perform_abc_removal (cfg);
3676 mono_cfg_dump_ir (cfg, "abc_removal");
3680 /* after SSA removal */
3682 if (MONO_METHOD_COMPILE_END_ENABLED ())
3683 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3687 if (cfg->llvm_only && cfg->gsharedvt)
3688 mono_ssa_remove_gsharedvt (cfg);
3690 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3691 if (COMPILE_SOFT_FLOAT (cfg))
3692 mono_decompose_soft_float (cfg);
3694 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
3695 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS) {
3696 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
3697 mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
3701 #ifndef MONO_ARCH_GOT_REG
3706 g_assert (cfg->got_var_allocated);
3709 * Allways allocate the GOT var to a register, because keeping it
3710 * in memory will increase the number of live temporaries in some
3711 * code created by inssel.brg, leading to the well known spills+
3712 * branches problem. Testcase: mcs crash in
3713 * System.MonoCustomAttrs:GetCustomAttributes.
3715 #ifdef MONO_ARCH_GOT_REG
3716 got_reg = MONO_ARCH_GOT_REG;
3718 regs = mono_arch_get_global_int_regs (cfg);
3720 got_reg = GPOINTER_TO_INT (regs->data);
3723 cfg->got_var->opcode = OP_REGVAR;
3724 cfg->got_var->dreg = got_reg;
3725 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3729 * Have to call this again to process variables added since the first call.
3731 MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
3733 if (cfg->opt & MONO_OPT_LINEARS) {
3734 GList *vars, *regs, *l;
3736 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3737 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3738 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3739 MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
3741 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3742 regs = mono_arch_get_global_int_regs (cfg);
3743 /* Remove the reg reserved for holding the GOT address */
3745 for (l = regs; l; l = l->next) {
3746 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3747 regs = g_list_delete_link (regs, l);
3752 MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
3753 mono_cfg_dump_ir (cfg, "linear_scan");
3757 //mono_print_code (cfg, "");
3761 /* variables are allocated after decompose, since decompose could create temps */
3762 if (!COMPILE_LLVM (cfg)) {
3763 MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
3764 mono_cfg_dump_ir (cfg, "arch_allocate_vars");
3765 if (cfg->exception_type)
3770 mono_allocate_gsharedvt_vars (cfg);
3772 if (!COMPILE_LLVM (cfg)) {
3773 gboolean need_local_opts;
3774 MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
3775 mono_cfg_dump_ir (cfg, "spill_global_vars");
3777 if (need_local_opts || cfg->compile_aot) {
3778 /* To optimize code created by spill_global_vars */
3779 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
3780 if (cfg->opt & MONO_OPT_DEADCE)
3781 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
3782 mono_cfg_dump_ir (cfg, "needs_local_opts");
3786 mono_insert_branches_between_bblocks (cfg);
3788 if (COMPILE_LLVM (cfg)) {
3792 /* The IR has to be in SSA form for LLVM */
3793 if (!(cfg->comp_done & MONO_COMP_SSA)) {
3794 cfg->exception_message = g_strdup ("SSA disabled.");
3795 cfg->disable_llvm = TRUE;
3798 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3799 mono_decompose_array_access_opts (cfg);
3801 if (!cfg->disable_llvm)
3802 mono_llvm_emit_method (cfg);
3803 if (cfg->disable_llvm) {
3804 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3805 //nm = mono_method_full_name (cfg->method, TRUE);
3806 printf ("LLVM failed for '%s': %s\n", method->name, cfg->exception_message);
3809 if (cfg->llvm_only) {
3810 cfg->disable_aot = TRUE;
3813 mono_destroy_compile (cfg);
3815 goto restart_compile;
3818 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
3819 nm = mono_method_full_name (cfg->method, TRUE);
3820 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3822 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
3827 MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
3828 mono_cfg_dump_ir (cfg, "codegen");
3829 if (cfg->exception_type)
3833 if (COMPILE_LLVM (cfg))
3834 InterlockedIncrement (&mono_jit_stats.methods_with_llvm);
3836 InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
3838 MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
3840 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3841 if (cfg->extend_live_ranges) {
3842 /* Extend live ranges to cover the whole method */
3843 for (i = 0; i < cfg->num_varinfo; ++i)
3844 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
3848 MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
3849 MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg));
3851 if (!cfg->compile_aot) {
3852 mono_save_xdebug_info (cfg);
3853 mono_lldb_save_method_info (cfg);
3856 if (cfg->verbose_level >= 2) {
3857 char *id = mono_method_full_name (cfg->method, FALSE);
3858 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
3862 if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
3863 mono_domain_lock (cfg->domain);
3864 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
3866 if (cfg->method->dynamic)
3867 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
3868 mono_domain_unlock (cfg->domain);
3873 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
3876 /* collect statistics */
3877 #ifndef DISABLE_PERFCOUNTERS
3878 mono_perfcounters->jit_methods++;
3879 mono_perfcounters->jit_bytes += header->code_size;
3881 mono_jit_stats.allocated_code_size += cfg->code_len;
3882 code_size_ratio = cfg->code_len;
3883 if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) {
3884 mono_jit_stats.biggest_method_size = code_size_ratio;
3885 g_free (mono_jit_stats.biggest_method);
3886 mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3888 code_size_ratio = (code_size_ratio * 100) / header->code_size;
3889 if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
3890 mono_jit_stats.max_code_size_ratio = code_size_ratio;
3891 g_free (mono_jit_stats.max_ratio_method);
3892 mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3894 mono_jit_stats.native_code_size += cfg->code_len;
3896 if (MONO_METHOD_COMPILE_END_ENABLED ())
3897 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3899 mono_cfg_dump_close_group (cfg);
3905 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3908 MonoGenericContainer *container;
3909 MonoGenericInst *ginst;
3911 if (mono_class_is_ginst (klass)) {
3912 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
3913 ginst = mono_class_get_generic_class (klass)->context.class_inst;
3914 } else if (mono_class_is_gtd (klass) && context_used) {
3915 container = mono_class_get_generic_container (klass);
3916 ginst = container->context.class_inst;
3921 for (i = 0; i < container->type_argc; ++i) {
3923 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3925 type = ginst->type_argv [i];
3926 if (mini_type_is_reference (type))
3933 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3935 return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
3939 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
3941 TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
3942 hole->clause = clause;
3943 hole->start_offset = start - cfg->native_code;
3944 hole->basic_block = bb;
3946 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
3950 mono_cfg_set_exception (MonoCompile *cfg, int type)
3952 cfg->exception_type = type;
3955 /* Assumes ownership of the MSG argument */
3957 mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
3959 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3960 mono_error_set_generic_error (&cfg->error, "System", "InvalidProgramException", "%s", msg);
3963 #endif /* DISABLE_JIT */
3966 create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
3968 MonoDomain *domain = mono_get_root_domain ();
3973 if (info->uw_info) {
3974 uw_info = info->uw_info;
3975 info_len = info->uw_info_len;
3977 uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
3980 jinfo = (MonoJitInfo *)mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
3981 jinfo->d.method = wrapper;
3982 jinfo->code_start = info->code;
3983 jinfo->code_size = info->code_size;
3984 jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
3992 GTimer *mono_time_track_start ()
3994 return g_timer_new ();
3997 void mono_time_track_end (double *time, GTimer *timer)
3999 g_timer_stop (timer);
4000 *time += g_timer_elapsed (timer, NULL);
4001 g_timer_destroy (timer);
4004 void mono_update_jit_stats (MonoCompile *cfg)
4006 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4007 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4008 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4009 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4010 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4011 mono_jit_stats.regvars += cfg->stat_n_regvars;
4012 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4013 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4014 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4018 * mono_jit_compile_method_inner:
4020 * Main entry point for the JIT.
4023 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoError *error)
4026 gpointer code = NULL;
4027 MonoJitInfo *jinfo, *info;
4029 MonoException *ex = NULL;
4031 MonoMethod *prof_method, *shared;
4035 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4036 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
4038 MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method;
4040 if (!piinfo->addr) {
4041 if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)
4042 piinfo->addr = mono_lookup_internal_call (method);
4043 else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)
4045 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4047 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4050 mono_lookup_pinvoke_call (method, NULL, NULL);
4052 nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
4053 gpointer compiled_method = mono_compile_method_checked (nm, error);
4054 return_val_if_nok (error, NULL);
4055 code = mono_get_addr_from_ftnptr (compiled_method);
4056 jinfo = mono_jit_info_table_find (target_domain, (char *)code);
4058 jinfo = mono_jit_info_table_find (mono_domain_get (), (char *)code);
4060 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4062 } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
4063 const char *name = method->name;
4064 char *full_name, *msg;
4067 if (method->klass->parent == mono_defaults.multicastdelegate_class) {
4068 if (*name == '.' && (strcmp (name, ".ctor") == 0)) {
4069 MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("ves_icall_mono_delegate_ctor");
4072 * We need to make sure this wrapper
4073 * is compiled because it might end up
4074 * in an (M)RGCTX if generic sharing
4075 * is enabled, and would be called
4076 * indirectly. If it were a
4077 * trampoline we'd try to patch that
4078 * indirect call, which is not
4081 return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
4082 } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
4083 if (mono_llvm_only) {
4084 nm = mono_marshal_get_delegate_invoke (method, NULL);
4085 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4086 mono_error_assert_ok (error);
4087 return mono_get_addr_from_ftnptr (compiled_ptr);
4089 return mono_create_delegate_trampoline (target_domain, method->klass);
4090 } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
4091 nm = mono_marshal_get_delegate_begin_invoke (method);
4092 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4093 mono_error_assert_ok (error);
4094 return mono_get_addr_from_ftnptr (compiled_ptr);
4095 } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) {
4096 nm = mono_marshal_get_delegate_end_invoke (method);
4097 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4098 mono_error_assert_ok (error);
4099 return mono_get_addr_from_ftnptr (compiled_ptr);
4103 full_name = mono_method_full_name (method, TRUE);
4104 msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
4105 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
4106 mono_error_set_exception_instance (error, ex);
4112 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
4113 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
4115 if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) {
4116 static MonoTrampInfo *in_tinfo, *out_tinfo;
4117 MonoTrampInfo *tinfo;
4119 gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN;
4121 if (is_in && in_tinfo)
4122 return in_tinfo->code;
4123 else if (!is_in && out_tinfo)
4124 return out_tinfo->code;
4127 * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH
4129 * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms
4132 mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo);
4134 mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE);
4135 jinfo = create_jit_info_for_trampoline (method, tinfo);
4136 mono_jit_info_table_add (mono_get_root_domain (), jinfo);
4145 if (mono_aot_only) {
4146 char *fullname = mono_method_full_name (method, TRUE);
4147 mono_error_set_execution_engine (error, "Attempting to JIT compile method '%s' while running in aot-only mode. See https://developer.xamarin.com/guides/ios/advanced_topics/limitations/ for more information.\n", fullname);
4153 jit_timer = mono_time_track_start ();
4154 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
4155 double jit_time = 0.0;
4156 mono_time_track_end (&jit_time, jit_timer);
4157 mono_jit_stats.jit_time += jit_time;
4159 prof_method = cfg->method;
4161 switch (cfg->exception_type) {
4162 case MONO_EXCEPTION_NONE:
4164 case MONO_EXCEPTION_TYPE_LOAD:
4165 case MONO_EXCEPTION_MISSING_FIELD:
4166 case MONO_EXCEPTION_MISSING_METHOD:
4167 case MONO_EXCEPTION_FILE_NOT_FOUND:
4168 case MONO_EXCEPTION_BAD_IMAGE:
4169 case MONO_EXCEPTION_INVALID_PROGRAM: {
4170 /* Throw a type load exception if needed */
4171 if (cfg->exception_ptr) {
4172 ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
4174 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4175 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4176 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4177 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4178 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4179 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4180 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4181 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
4182 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4183 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4184 else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
4185 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
4187 g_assert_not_reached ();
4191 case MONO_EXCEPTION_MONO_ERROR:
4192 // FIXME: MonoError has no copy ctor
4193 g_assert (!mono_error_ok (&cfg->error));
4194 ex = mono_error_convert_to_exception (&cfg->error);
4197 g_assert_not_reached ();
4201 MONO_PROFILER_RAISE (jit_failed, (method));
4203 mono_destroy_compile (cfg);
4204 mono_error_set_exception_instance (error, ex);
4209 if (mono_method_is_generic_sharable (method, FALSE))
4210 shared = mini_get_shared_method (method);
4214 mono_domain_lock (target_domain);
4216 /* Check if some other thread already did the job. In this case, we can
4217 discard the code this thread generated. */
4219 info = mini_lookup_method (target_domain, method, shared);
4221 /* We can't use a domain specific method in another domain */
4222 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4223 code = info->code_start;
4225 discarded_jit_time += jit_time;
4229 /* The lookup + insert is atomic since this is done inside the domain lock */
4230 mono_domain_jit_code_hash_lock (target_domain);
4231 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4232 mono_domain_jit_code_hash_unlock (target_domain);
4234 code = cfg->native_code;
4236 if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
4237 mono_stats.generics_shared_methods++;
4239 mono_stats.gsharedvt_methods++;
4242 jinfo = cfg->jit_info;
4245 * Update global stats while holding a lock, instead of doing many
4246 * InterlockedIncrement operations during JITting.
4248 mono_update_jit_stats (cfg);
4250 mono_destroy_compile (cfg);
4253 if (domain_jit_info (target_domain)->jump_target_hash) {
4254 MonoJumpInfo patch_info;
4255 MonoJumpList *jlist;
4257 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (target_domain)->jump_target_hash, method);
4259 patch_info.next = NULL;
4260 patch_info.ip.i = 0;
4261 patch_info.type = MONO_PATCH_INFO_METHOD_JUMP;
4262 patch_info.data.method = method;
4263 g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method);
4265 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
4266 for (tmp = jlist->list; tmp; tmp = tmp->next) {
4267 gpointer target = mono_resolve_patch_target (NULL, target_domain, (guint8 *)tmp->data, &patch_info, TRUE, error);
4268 if (!mono_error_ok (error))
4270 mono_arch_patch_code_new (NULL, target_domain, (guint8 *)tmp->data, &patch_info, target);
4273 for (tmp = jlist->list; tmp; tmp = tmp->next) {
4274 mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE, error);
4282 /* Update llvm callees */
4283 if (domain_jit_info (target_domain)->llvm_jit_callees) {
4284 GSList *callees = g_hash_table_lookup (domain_jit_info (target_domain)->llvm_jit_callees, method);
4287 for (l = callees; l; l = l->next) {
4288 gpointer *addr = (gpointer*)l->data;
4294 mono_emit_jit_map (jinfo);
4296 mono_domain_unlock (target_domain);
4298 if (!mono_error_ok (error))
4301 vtable = mono_class_vtable (target_domain, method->klass);
4303 g_assert (mono_class_has_failure (method->klass));
4304 mono_error_set_for_class_failure (error, method->klass);
4308 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4309 if (mono_marshal_method_from_wrapper (method)) {
4310 /* Native func wrappers have no method */
4311 /* The profiler doesn't know about wrappers, so pass the original icall method */
4312 MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
4315 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4316 if (prof_method != method)
4317 MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
4319 if (!(method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE ||
4320 method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK ||
4321 method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE)) {
4322 if (!mono_runtime_class_init_full (vtable, error))
4329 * mini_get_underlying_type:
4331 * Return the type the JIT will use during compilation.
4332 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4333 * For gsharedvt types, it will return the original VAR/MVAR.
4336 mini_get_underlying_type (MonoType *type)
4338 return mini_type_get_underlying_type (type);
4342 mini_jit_init (void)
4344 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
4345 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &discarded_jit_time);
4347 mono_os_mutex_init_recursive (&jit_mutex);
4349 current_backend = g_new0 (MonoBackend, 1);
4350 init_backend (current_backend);
4355 mini_jit_cleanup (void)
4358 g_free (emul_opcode_map);
4359 g_free (emul_opcode_opcodes);
4365 mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
4367 g_assert_not_reached ();
4370 void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
4372 g_assert_not_reached ();
4377 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4380 mono_llvm_cpp_throw_exception (void)
4382 g_assert_not_reached ();
4390 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
4392 g_assert_not_reached ();
4397 mono_destroy_compile (MonoCompile *cfg)
4399 g_assert_not_reached ();
4403 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
4405 g_assert_not_reached ();
4408 #endif /* DISABLE_JIT */