#define mono_jit_unlock() mono_mutex_unlock (&jit_mutex)
static mono_mutex_t jit_mutex;
-/* Whenever to check for pending exceptions in managed-to-native wrappers */
-gboolean check_for_pending_exc = TRUE;
-
gpointer
mono_realloc_native_code (MonoCompile *cfg)
{
guint
mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- g_assert (mini_type_var_is_vt (cfg, type));
+ g_assert (mini_type_var_is_vt (type));
return OP_STOREV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
guint
mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_I1:
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- g_assert (cfg->generic_sharing_context);
- g_assert (mini_type_var_is_vt (cfg, type));
+ g_assert (cfg->gshared);
+ g_assert (mini_type_var_is_vt (type));
return OP_LOADV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
static guint
mini_type_to_ldind (MonoCompile* cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
- if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
- g_assert (mini_type_var_is_vt (cfg, type));
+ type = mini_get_underlying_type (type);
+ if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
+ g_assert (mini_type_var_is_vt (type));
return CEE_LDOBJ;
}
return mono_type_to_ldind (type);
guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
- if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
- g_assert (mini_type_var_is_vt (cfg, type));
+ type = mini_get_underlying_type (type);
+ if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
+ g_assert (mini_type_var_is_vt (type));
return CEE_STOBJ;
}
return mono_type_to_stind (type);
int num = cfg->num_varinfo;
gboolean regpair;
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
if (type->byref) {
mono_mark_vreg_as_mp (cfg, vreg);
} else {
- if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (cfg, type)) {
+ if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (type)) {
inst->flags |= MONO_INST_GC_TRACK;
mono_mark_vreg_as_ref (cfg, vreg);
}
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
int dreg;
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
if (mono_type_is_long (type))
dreg = mono_alloc_dreg (cfg, STACK_I8);
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
- if (mono_security_enabled ())
- return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
- if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
else {
int ialign;
- size = mini_type_stack_size (NULL, t, &ialign);
+ size = mini_type_stack_size (t, &ialign);
align = ialign;
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- t = mini_get_underlying_type (cfg, t);
+ t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
- if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
} else {
int ialign;
- size = mini_type_stack_size (NULL, t, &ialign);
+ size = mini_type_stack_size (t, &ialign);
align = ialign;
if (mono_class_from_mono_type (t)->exception_type)
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- t = mini_get_underlying_type (cfg, t);
+ t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
void
mono_destroy_compile (MonoCompile *cfg)
{
+#ifndef DISABLE_JIT
GSList *l;
if (cfg->header)
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
+#endif
}
#ifndef DISABLE_JIT
MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
+ if (!MONO_ARCH_HAVE_TLS_GET)
+ return NULL;
+
/*
* TLS offsets might be different at AOT time, so load them from a GOT slot and
* use a different opcode.
*/
if (cfg->compile_aot) {
- if (MONO_ARCH_HAVE_TLS_GET && ARCH_HAVE_TLS_GET_REG) {
+ if (ARCH_HAVE_TLS_GET_REG) {
MonoInst *ins, *c;
EMIT_NEW_TLS_OFFSETCONST (cfg, c, key);
int max_epilog_size;
guint8 *code;
MonoDomain *code_domain;
+ guint unwindlen = 0;
if (mono_using_xdebug)
/*
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- if (!cfg->globalra)
- mono_local_regalloc (cfg, bb);
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
- bb_deduplicate_op_il_seq_points (cfg, bb);
+ mono_bb_deduplicate_op_il_seq_points (cfg, bb);
}
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
#endif
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
- if (cfg->method->dynamic) {
- guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
+
+ if (cfg->method->dynamic) {
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
- cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic (cfg->thunk_area);
+ cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
mono_domain_lock (cfg->domain);
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
if (mono_using_xdebug)
/* See the comment for cfg->code_domain */
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
else
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
} else {
- guint unwindlen = 0;
-#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
-#endif
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
}
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif
+ if (cfg->thunk_area) {
+ cfg->thunks_offset = cfg->code_size + unwindlen;
+ cfg->thunks = code + cfg->thunks_offset;
+ memset (cfg->thunks, 0, cfg->thunk_area);
+ }
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
is_generic = TRUE;
}
- if (cfg->generic_sharing_context)
+ if (cfg->gshared)
g_assert (is_generic);
}
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
- mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->dynamic_info ? cfg->dynamic_info->code_mp : NULL, cfg->run_cctors);
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ {
+ MonoJumpInfo *ji;
+ gpointer target;
+
+ for (ji = cfg->patch_info; ji; ji = ji->next) {
+ if (cfg->compile_aot) {
+ switch (ji->type) {
+ case MONO_PATCH_INFO_BB:
+ case MONO_PATCH_INFO_LABEL:
+ break;
+ default:
+ /* No need to patch these */
+ continue;
+ }
+ }
+
+ if (ji->type == MONO_PATCH_INFO_NONE)
+ continue;
+
+ target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors);
+ mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
+ }
+ }
+#else
+ mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
+#endif
if (cfg->method->dynamic) {
if (mono_using_xdebug)
g_assert (method_to_compile == cfg->method);
header = cfg->header;
- if (cfg->generic_sharing_context)
+ if (cfg->gshared)
flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
if (cfg->arch_eh_jit_info) {
* mono_arch_get_argument_info () is not signal safe.
*/
arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
- stack_size = mono_arch_get_argument_info (cfg->generic_sharing_context, sig, sig->param_count, arg_info);
+ stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
if (stack_size)
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
+
+ if (cfg->thunk_area)
+ flags |= JIT_INFO_HAS_THUNK_INFO;
+
if (cfg->try_block_holes) {
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = tmp->data;
printf ("Number of try block holes %d\n", num_holes);
}
- if (mono_security_method_has_declsec (cfg->method_to_register))
- flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
if (COMPILE_LLVM (cfg))
num_clauses = cfg->llvm_ex_info_len;
else
if (COMPILE_LLVM (cfg))
jinfo->from_llvm = TRUE;
- if (cfg->generic_sharing_context) {
+ if (cfg->gshared) {
MonoInst *inst;
MonoGenericJitInfo *gi;
GSList *loclist = NULL;
info->stack_size = stack_size;
}
+ if (cfg->thunk_area) {
+ MonoThunkJitInfo *info;
+
+ info = mono_jit_info_get_thunk_info (jinfo);
+ info->thunks_offset = cfg->thunks_offset;
+ info->thunks_size = cfg->thunk_area;
+ }
+
if (COMPILE_LLVM (cfg)) {
if (num_clauses)
memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
#ifndef DISABLE_JIT
+#if defined(__native_client_codegen__) || USE_COOP_GC
+
static void
mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
{
-#if defined(__native_client_codegen__) || USE_COOP_GC
-
MonoInst *poll_addr, *ins;
- if (cfg->verbose_level)
+ if (cfg->verbose_level > 1)
printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
#if defined(__native_client_codegen__)
if (bblock->flags & BB_EXCEPTION_HANDLER) {
MonoInst *eh_op = bblock->code;
- // we only skip the ops that start EH blocks.
- if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ)
+ if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
eh_op = NULL;
+ } else {
+ MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
+ // skip all EH relateds ops
+ while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
+ eh_op = next_eh_op;
+ next_eh_op = eh_op->next;
+ }
+ }
mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
}
-
-#endif
}
/*
static void
mono_insert_safepoints (MonoCompile *cfg)
{
-#if defined(__native_client_codegen__) || defined(USE_COOP_GC)
MonoBasicBlock *bb;
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ gpointer poll_func = &mono_nacl_gc;
+#elif defined(USE_COOP_GC)
+ gpointer poll_func = &mono_threads_state_poll;
+#else
+ gpointer poll_func = NULL;
+#endif
+
+ if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
+ if (cfg->verbose_level > 1)
+ printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
+ return;
+ }
+ }
+
- if (cfg->verbose_level)
+ if (cfg->verbose_level > 1)
printf ("INSERTING SAFEPOINTS\n");
+ if (cfg->verbose_level > 2)
+ mono_print_code (cfg, "BEFORE SAFEPOINTS");
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
mono_create_gc_safepoint (cfg, bb);
}
-#endif
+
+ if (cfg->verbose_level > 2)
+ mono_print_code (cfg, "AFTER SAFEPOINTS");
+
+}
+
+#else
+
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
}
+#endif
+
/*
* mini_method_compile:
* @method: the method to compile
* field in the returned struct to see if compilation succeded.
*/
MonoCompile*
-mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
+mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
{
MonoMethodHeader *header;
MonoMethodSignature *sig;
if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
+ gsharedvt_method = is_gsharedvt_method (method);
+
/*
* In AOT mode, method can be the following:
- * - the generic method definition. In this case, we are compiling the fully shared
- * version of the method, i.e. the version where all the type parameters are
- * reference types.
* - a gsharedvt method.
- * - a method inflated with type parameters. This is for partial sharing.
+ * - a method inflated with type parameters. This is for ref/partial sharing.
* - a method inflated with concrete types.
*/
- if (compile_aot)
- try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_full (method, TRUE, FALSE, FALSE)));
- else
+ if (compile_aot) {
+ if (is_open_method (method)) {
+ try_generic_shared = TRUE;
+ method_is_gshared = TRUE;
+ } else {
+ try_generic_shared = FALSE;
+ }
+ g_assert (opts & MONO_OPT_GSHARED);
+ } else {
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
+ if (mini_is_gsharedvt_sharable_method (method)) {
+ if (!mono_debug_count ())
+ try_generic_shared = FALSE;
+ }
+ }
/*
if (try_generic_shared && !mono_debug_count ())
mono_stats.generics_unsharable_methods++;
}
- if (mini_is_gsharedvt_sharable_method (method)) {
- if (!mono_debug_count ())
- try_generic_shared = FALSE;
- if (compile_aot)
- try_generic_shared = FALSE;
- }
-
- gsharedvt_method = is_gsharedvt_method (method);
- if (gsharedvt_method || (compile_aot && is_open_method (method))) {
- /* We are AOTing a gshared method directly */
- method_is_gshared = TRUE;
- g_assert (compile_aot);
- try_generic_shared = TRUE;
- }
-
#ifdef ENABLE_LLVM
try_llvm = mono_use_llvm || llvm;
#endif
cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
cfg->disable_direct_icalls = disable_direct_icalls;
if (try_generic_shared)
- cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->gsctx;
+ cfg->gshared = TRUE;
cfg->compile_llvm = try_llvm;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
+ if (cfg->compile_aot)
+ cfg->method_index = aot_method_index;
if (!mono_debug_count ())
cfg->opt &= ~MONO_OPT_FLOAT32;
return cfg;
}
- if (cfg->generic_sharing_context && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
+ if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
cfg->gsharedvt = TRUE;
// FIXME:
cfg->disable_llvm = TRUE;
+ cfg->exception_message = g_strdup ("gsharedvt");
}
- if (cfg->generic_sharing_context) {
+ if (cfg->gshared) {
method_to_register = method_to_compile;
- cfg->gshared = TRUE;
} else {
g_assert (method == method_to_compile);
method_to_register = method;
char *method_name;
method_name = mono_method_full_name (method, TRUE);
- g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->generic_sharing_context && !cfg->gsharedvt) ? "gshared " : "", method_name);
+ g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
/*
if (COMPILE_LLVM (cfg))
g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->gsharedvt)
g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
- else if (cfg->generic_sharing_context)
+ else if (cfg->gshared)
g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
g_free (method_name);
}
- if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
- /*
- if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
- cfg->globalra = TRUE;
- */
-
- //cfg->globalra = TRUE;
-
- //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
- // cfg->globalra = TRUE;
-
- {
- static int count = 0;
- count ++;
-
- /*
- if (g_getenv ("COUNT2")) {
- cfg->globalra = TRUE;
- if (count == atoi (g_getenv ("COUNT2")))
- printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
- if (count > atoi (g_getenv ("COUNT2")))
- cfg->globalra = FALSE;
- }
- */
- }
-
- if (header->clauses)
- cfg->globalra = FALSE;
-
- if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
- /* The code in the prolog clobbers caller saved registers */
- cfg->globalra = FALSE;
-
- // FIXME: Disable globalra in case of tracing/profiling
-
- if (cfg->method->save_lmf)
- /* The LMF saving code might clobber caller saved registers */
- cfg->globalra = FALSE;
-
- if (header->code_size > 5000)
- // FIXME:
- /* Too large bblocks could overflow the ins positions */
- cfg->globalra = FALSE;
-
cfg->rs = mono_regstate_new ();
- if (cfg->globalra)
- cfg->rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS;
cfg->next_vreg = cfg->rs->next_vreg;
/* FIXME: Fix SSA to handle branches inside bblocks */
*/
mono_compile_create_vars (cfg);
- /* SSAPRE is not supported on linear IR */
- cfg->opt &= ~MONO_OPT_SSAPRE;
-
i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE);
if (i < 0) {
if (!COMPILE_LLVM (cfg))
mono_if_conversion (cfg);
- if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
- mono_remove_critical_edges (cfg);
+ MONO_SUSPEND_CHECK ();
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- mono_perform_ssapre (cfg);
- //mono_local_cprop (cfg);
- }
-
if (cfg->opt & MONO_OPT_DEADCE)
mono_ssa_deadce (cfg);
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
- if (cfg->opt & MONO_OPT_BRANCH) {
- MonoBasicBlock *bb;
-
+ if (cfg->opt & MONO_OPT_BRANCH)
mono_optimize_branches (cfg);
-
- /* Have to recompute cfg->bblocks and bb->dfn */
- if (cfg->globalra) {
- mono_remove_critical_edges (cfg);
-
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- bb->dfn = 0;
-
- /* Depth-first ordering on basic blocks */
- cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
-
- dfn = 0;
- df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
- cfg->num_bblocks = dfn + 1;
- }
- }
}
#endif
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->globalra) {
- MonoBasicBlock *bb;
-
- /* Have to do this before regalloc since it can create vregs */
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- mono_arch_lowering_pass (cfg, bb);
-
- mono_global_regalloc (cfg);
- }
-
- if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
+ if (cfg->opt & MONO_OPT_LINEARS) {
GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_arch_allocate_vars (cfg);
if (cfg->exception_type)
return cfg;
MonoBasicBlock *bb;
gboolean need_local_opts;
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_spill_global_vars (cfg, &need_local_opts);
if (need_local_opts || cfg->compile_aot) {
}
}
- if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ if (cfg->verbose_level >= 4) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *tree = bb->code;
g_print ("DUMP BLOCK %d:\n", bb->block_num);
#else
MonoCompile*
-mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
+mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
{
g_assert_not_reached ();
return NULL;
else
mono_lookup_pinvoke_call (method, NULL, NULL);
}
- nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, mono_aot_only);
+ nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
jinfo = mono_jit_info_table_find (target_domain, code);
if (!jinfo)
*/
return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
} else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
return mono_create_delegate_trampoline (target_domain, method->klass);
-#else
- nm = mono_marshal_get_delegate_invoke (method, NULL);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
-#endif
} else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
nm = mono_marshal_get_delegate_begin_invoke (method);
return mono_get_addr_from_ftnptr (mono_compile_method (nm));
jit_timer = g_timer_new ();
- cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0);
+ cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
prof_method = cfg->method;
g_timer_stop (jit_timer);
else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
- ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message);
+ ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
ex = mono_get_exception_bad_image_format (cfg->exception_message);
else
case MONO_EXCEPTION_FIELD_ACCESS:
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FieldAccessException", cfg->exception_message);
break;
-#ifndef DISABLE_SECURITY
- /* this can only be set if the security manager is active */
- case MONO_EXCEPTION_SECURITY_LINKDEMAND: {
- MonoSecurityManager* secman = mono_security_manager_get_methods ();
- MonoObject *exc = NULL;
- gpointer args [2];
-
- args [0] = &cfg->exception_data;
- args [1] = &method;
- mono_runtime_invoke (secman->linkdemandsecurityexception, NULL, args, &exc);
-
- ex = (MonoException*)exc;
- break;
- }
-#endif
case MONO_EXCEPTION_OBJECT_SUPPLIED: {
MonoException *exp = cfg->exception_ptr;
MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
code = cfg->native_code;
- if (cfg->generic_sharing_context && mono_method_is_generic_sharable (method, FALSE))
+ if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
mono_stats.generics_shared_methods++;
if (cfg->gsharedvt)
mono_stats.gsharedvt_methods++;
mono_jit_stats.regvars += cfg->stat_n_regvars;
mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
- mono_jit_stats.cas_demand_generation += cfg->stat_cas_demand_generation;
mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
mono_destroy_compile (cfg);
/* These patches are applied after a method has been installed, no target munging is needed. */
nacl_allow_target_modification (FALSE);
#endif
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ for (tmp = jlist->list; tmp; tmp = tmp->next) {
+ gpointer target = mono_resolve_patch_target (NULL, target_domain, tmp->data, &patch_info, TRUE);
+ mono_arch_patch_code_new (NULL, target_domain, tmp->data, &patch_info, target);
+ }
+#else
for (tmp = jlist->list; tmp; tmp = tmp->next)
- mono_arch_patch_code (NULL, target_domain, tmp->data, &patch_info, NULL, TRUE);
+ mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE);
+#endif
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif
}
gpointer
-mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
+mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
g_assert_not_reached ();
return NULL;
}
gpointer
-mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
+mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
NOT_IMPLEMENTED;
return NULL;
#endif
+#ifndef ENABLE_LLVM
+void
+mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
+{
+ g_assert_not_reached ();
+}
+
+void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
+{
+ g_assert_not_reached ();
+}
+#endif
+
#ifdef USE_JUMP_TABLES
#define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
}
#endif
-/*
- * mini_replace_type:
- *
- * Replace the type used in the metadata stream with what the JIT will actually use during compilation.
-*/
-MonoType*
-mini_replace_type (MonoType *type)
-{
- type = mono_type_get_underlying_type (type);
- return mini_native_type_replace_type (type);
-}
-
/*
* mini_get_underlying_type:
*
* For gsharedvt types, it will return the original VAR/MVAR.
*/
MonoType*
-mini_get_underlying_type (MonoCompile *cfg, MonoType *type)
+mini_get_underlying_type (MonoType *type)
{
- type = mini_type_get_underlying_type (cfg->generic_sharing_context, type);
- return mini_native_type_replace_type (type);
+ return mini_type_get_underlying_type (type);
}
void