switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_STOREI1_MEMBASE_REG;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_STOREI2_MEMBASE_REG;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_LOADU1_MEMBASE;
case MONO_TYPE_I2:
return OP_LOADI2_MEMBASE;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_LOADU2_MEMBASE;
case MONO_TYPE_I4:
return OP_LOADI4_MEMBASE;
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
- if (mono_security_enabled ())
- return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
+ t = mini_get_underlying_type (cfg, t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- if (t->byref) {
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- } else {
- switch (t->type) {
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (t)) {
- slot_info = &scalar_stack_slots [t->type];
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- if (!vtype_stack_slots)
- vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
- for (i = 0; i < nvtypes; ++i)
- if (t->data.klass == vtype_stack_slots [i].vtype)
- break;
- if (i < nvtypes)
- slot_info = &vtype_stack_slots [i];
- else {
- g_assert (nvtypes < 256);
- vtype_stack_slots [nvtypes].vtype = t->data.klass;
- slot_info = &vtype_stack_slots [nvtypes];
- nvtypes ++;
- }
- if (cfg->disable_reuse_ref_stack_slots)
- reuse_slot = FALSE;
+ t = mini_get_underlying_type (cfg, t);
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
+ break;
- case MONO_TYPE_PTR:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
#if SIZEOF_VOID_P == 4
- case MONO_TYPE_I4:
+ case MONO_TYPE_I4:
#else
- case MONO_TYPE_I8:
+ case MONO_TYPE_I8:
#endif
- if (cfg->disable_ref_noref_stack_slot_share) {
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- break;
- }
- /* Fall through */
-
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_STRING:
- /* Share non-float stack slots of the same size */
- slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
- if (cfg->disable_reuse_ref_stack_slots)
- reuse_slot = FALSE;
- break;
- case MONO_TYPE_VAR:
- case MONO_TYPE_MVAR:
- slot_info = &scalar_stack_slots [t->type];
- break;
- default:
- slot_info = &scalar_stack_slots [t->type];
+ if (cfg->disable_ref_noref_stack_slot_share) {
+ slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
+ /* Fall through */
+
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ slot_info = &scalar_stack_slots [t->type];
+ break;
+ default:
+ slot_info = &scalar_stack_slots [t->type];
+ break;
}
slot = 0xffffff;
emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
-/*
- * For JIT icalls implemented in C.
- * NAME should be the same as the name of the C function whose address is FUNC.
- * If SAVE is TRUE, no wrapper is generated. This is for perf critical icalls which
- * can't throw exceptions.
- */
-static void
-register_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall_full (func, name, sig, save, FALSE, save ? name : NULL);
-}
-
-/* Register a jit icall which doesn't throw exceptions through mono_raise_exception () */
-static void
-register_icall_noraise (gpointer func, const char *name, const char *sigstr)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall_full (func, name, sig, TRUE, TRUE, name);
-}
-
-static void
-register_dyn_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall (func, name, sig, save);
-}
-
static void
print_dfn (MonoCompile *cfg) {
int i, j;
void
mono_destroy_compile (MonoCompile *cfg)
{
+#ifndef DISABLE_JIT
GSList *l;
if (cfg->header)
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
+#endif
}
#ifndef DISABLE_JIT
#endif /* !DISABLE_JIT */
-static gboolean
-mini_tls_key_supported (MonoTlsKey key)
-{
- if (!MONO_ARCH_HAVE_TLS_GET)
- return FALSE;
-
- return mini_get_tls_offset (key) != -1;
-}
-
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
int max_epilog_size;
guint8 *code;
MonoDomain *code_domain;
+ guint unwindlen = 0;
if (mono_using_xdebug)
/*
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- if (!cfg->globalra)
- mono_local_regalloc (cfg, bb);
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
- if (cfg->gen_seq_points && !cfg->gen_seq_points_debug_data)
- bb_deduplicate_op_il_seq_points (cfg, bb);
+ if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
+ mono_bb_deduplicate_op_il_seq_points (cfg, bb);
}
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
#endif
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
- if (cfg->method->dynamic) {
- guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
+
+ if (cfg->method->dynamic) {
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
if (mono_using_xdebug)
/* See the comment for cfg->code_domain */
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
else
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
} else {
- guint unwindlen = 0;
-#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
-#endif
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
}
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif
+ if (cfg->thunk_area) {
+ cfg->thunks_offset = cfg->code_size + unwindlen;
+ cfg->thunks = code + cfg->thunks_offset;
+ memset (cfg->thunks, 0, cfg->thunk_area);
+ }
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
- mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->dynamic_info ? cfg->dynamic_info->code_mp : NULL, cfg->run_cctors);
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ {
+ MonoJumpInfo *ji;
+ gpointer target;
+
+ for (ji = cfg->patch_info; ji; ji = ji->next) {
+ if (cfg->compile_aot) {
+ switch (ji->type) {
+ case MONO_PATCH_INFO_BB:
+ case MONO_PATCH_INFO_LABEL:
+ break;
+ default:
+ /* No need to patch these */
+ continue;
+ }
+ }
+
+ if (ji->type == MONO_PATCH_INFO_NONE)
+ continue;
+
+ target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors);
+ mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
+ }
+ }
+#else
+ mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
+#endif
if (cfg->method->dynamic) {
if (mono_using_xdebug)
if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
+
+ if (cfg->thunk_area)
+ flags |= JIT_INFO_HAS_THUNK_INFO;
+
if (cfg->try_block_holes) {
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = tmp->data;
printf ("Number of try block holes %d\n", num_holes);
}
- if (mono_security_method_has_declsec (cfg->method_to_register))
- flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
if (COMPILE_LLVM (cfg))
num_clauses = cfg->llvm_ex_info_len;
else
info->stack_size = stack_size;
}
+ if (cfg->thunk_area) {
+ MonoThunkJitInfo *info;
+
+ info = mono_jit_info_get_thunk_info (jinfo);
+ info->thunks_offset = cfg->thunks_offset;
+ info->thunks_size = cfg->thunk_area;
+ }
+
if (COMPILE_LLVM (cfg)) {
if (num_clauses)
memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
ei->flags = ec->flags;
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
+
/*
* The spvars are needed by mono_arch_install_handler_block_guard ().
*/
}
#endif
-static MonoType*
-get_gsharedvt_type (MonoType *t)
-{
- MonoGenericParam *par = t->data.generic_param;
- MonoGenericParam *copy;
- MonoType *res;
- MonoImage *image = NULL;
-
- /*
- * Create an anonymous gparam with a different serial so normal gshared and gsharedvt methods have
- * a different instantiation.
- */
- g_assert (mono_generic_param_info (par));
- if (par->owner) {
- image = par->owner->image;
-
- mono_image_lock (image);
- if (!image->gsharedvt_types)
- image->gsharedvt_types = g_hash_table_new (NULL, NULL);
- res = g_hash_table_lookup (image->gsharedvt_types, par);
- mono_image_unlock (image);
- if (res)
- return res;
- copy = mono_image_alloc0 (image, sizeof (MonoGenericParamFull));
- memcpy (copy, par, sizeof (MonoGenericParamFull));
- } else {
- copy = g_memdup (par, sizeof (MonoGenericParam));
- }
- copy->owner = NULL;
- // FIXME:
- copy->image = mono_defaults.corlib;
- copy->serial = 1;
- res = mono_metadata_type_dup (NULL, t);
- res->data.generic_param = copy;
-
- if (par->owner) {
- mono_image_lock (image);
- /* Duplicates are ok */
- g_hash_table_insert (image->gsharedvt_types, par, res);
- mono_image_unlock (image);
- }
-
- return res;
-}
-
-static gboolean
-is_gsharedvt_type (MonoType *t)
-{
- return (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->serial == 1;
-}
-
/* Return whenever METHOD is a gsharedvt method */
static gboolean
is_gsharedvt_method (MonoMethod *method)
inst = context->class_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
- if (is_gsharedvt_type (inst->type_argv [i]))
+ if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
inst = context->method_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
- if (is_gsharedvt_type (inst->type_argv [i]))
+ if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
return FALSE;
}
#ifndef DISABLE_JIT
+
+#if defined(__native_client_codegen__) || USE_COOP_GC
+
+static void
+mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
+{
+ MonoInst *poll_addr, *ins;
+ if (cfg->verbose_level)
+ printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
+
+#if defined(__native_client_codegen__)
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&__nacl_thread_suspension_needed);
+#else
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
+#endif
+
+ MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
+ ins->sreg1 = poll_addr->dreg;
+
+ if (bblock->flags & BB_EXCEPTION_HANDLER) {
+ MonoInst *eh_op = bblock->code;
+
+ // we only skip the ops that start EH blocks.
+ if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ)
+ eh_op = NULL;
+
+ mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ } else if (bblock == cfg->bb_entry) {
+ mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+
+ } else {
+ mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ }
+}
+
+/*
+This code inserts safepoints into managed code at important code paths.
+Those are:
+
+-the first basic block
+-landing BB for exception handlers
+-loop body starts.
+
+*/
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+
+ if (cfg->verbose_level)
+ printf ("INSERTING SAFEPOINTS\n");
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
+ mono_create_gc_safepoint (cfg, bb);
+ }
+}
+
+#else
+
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+}
+
+#endif
+
/*
* mini_method_compile:
* @method: the method to compile
MonoMethodHeader *header;
MonoMethodSignature *sig;
MonoError err;
- guint8 *ip;
MonoCompile *cfg;
int dfn, i, code_size_ratio;
-#ifndef DISABLE_SSA
- gboolean deadce_has_run = FALSE;
-#endif
gboolean try_generic_shared, try_llvm = FALSE;
MonoMethod *method_to_compile, *method_to_register;
gboolean method_is_gshared = FALSE;
gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
+ gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
+ gboolean gsharedvt_method = FALSE;
#ifdef ENABLE_LLVM
gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
#endif
if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
+ /*
+ * In AOT mode, method can be the following:
+ * - the generic method definition. In this case, we are compiling the fully shared
+ * version of the method, i.e. the version where all the type parameters are
+ * reference types.
+ * - a gsharedvt method.
+ * - a method inflated with type parameters. This is for partial sharing.
+ * - a method inflated with concrete types.
+ */
if (compile_aot)
- /*
- * We might get passed the original generic method definition or
- * instances with type parameters.
- * FIXME: Remove the method->klass->generic_class limitation.
- */
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_full (method, TRUE, FALSE, FALSE)));
else
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
+ /*
+ if (try_generic_shared && !mono_debug_count ())
+ try_generic_shared = FALSE;
+ */
+
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
mono_stats.generics_sharable_methods++;
try_generic_shared = FALSE;
}
- if (is_gsharedvt_method (method) || (compile_aot && is_open_method (method))) {
+ gsharedvt_method = is_gsharedvt_method (method);
+ if (gsharedvt_method || (compile_aot && is_open_method (method))) {
/* We are AOTing a gshared method directly */
method_is_gshared = TRUE;
g_assert (compile_aot);
cfg->full_aot = full_aot;
cfg->skip_visibility = method->skip_visibility;
cfg->orig_method = method;
- cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_seq_points_debug_data;
- cfg->gen_seq_points_debug_data = debug_options.gen_seq_points_debug_data;
+ cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_sdb_seq_points;
+ cfg->gen_sdb_seq_points = debug_options.gen_sdb_seq_points;
+
+#ifdef PLATFORM_ANDROID
+ if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
+ /* FIXME: Why is this needed */
+ cfg->gen_seq_points = FALSE;
+ cfg->gen_sdb_seq_points = FALSE;
+ }
+#endif
+ /* coop / nacl requires loop detection to happen */
+#if defined(__native_client_codegen__) || defined(USE_COOP_GC)
+ cfg->opt |= MONO_OPT_LOOP;
+#endif
cfg->explicit_null_checks = debug_options.explicit_null_checks;
cfg->soft_breakpoints = debug_options.soft_breakpoints;
cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
+ cfg->disable_direct_icalls = disable_direct_icalls;
if (try_generic_shared)
cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->gsctx;
cfg->compile_llvm = try_llvm;
return cfg;
}
- if (cfg->generic_sharing_context && (mini_is_gsharedvt_sharable_method (method) || method_is_gshared)) {
+ if (cfg->generic_sharing_context && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
- if (method_is_gshared) {
+ if (gsharedvt_method) {
g_assert (method->is_inflated);
inflated = (MonoMethodInflated*)method;
context = &inflated->context;
}
}
- ip = (guint8 *)header->code;
-
cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
g_free (method_name);
}
- if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
- /*
- if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
- cfg->globalra = TRUE;
- */
-
- //cfg->globalra = TRUE;
-
- //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
- // cfg->globalra = TRUE;
-
- {
- static int count = 0;
- count ++;
-
- /*
- if (g_getenv ("COUNT2")) {
- cfg->globalra = TRUE;
- if (count == atoi (g_getenv ("COUNT2")))
- printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
- if (count > atoi (g_getenv ("COUNT2")))
- cfg->globalra = FALSE;
- }
- */
- }
-
- if (header->clauses)
- cfg->globalra = FALSE;
-
- if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
- /* The code in the prolog clobbers caller saved registers */
- cfg->globalra = FALSE;
-
- // FIXME: Disable globalra in case of tracing/profiling
-
- if (cfg->method->save_lmf)
- /* The LMF saving code might clobber caller saved registers */
- cfg->globalra = FALSE;
-
- if (header->code_size > 5000)
- // FIXME:
- /* Too large bblocks could overflow the ins positions */
- cfg->globalra = FALSE;
-
cfg->rs = mono_regstate_new ();
- if (cfg->globalra)
- cfg->rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS;
cfg->next_vreg = cfg->rs->next_vreg;
/* FIXME: Fix SSA to handle branches inside bblocks */
*/
mono_compile_create_vars (cfg);
- /* SSAPRE is not supported on linear IR */
- cfg->opt &= ~MONO_OPT_SSAPRE;
-
i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE);
if (i < 0) {
if (!COMPILE_LLVM (cfg))
mono_if_conversion (cfg);
- if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
- mono_remove_critical_edges (cfg);
-
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
mono_compute_natural_loops (cfg);
}
+ mono_insert_safepoints (cfg);
+
/* after method_to_ir */
if (parts == 1) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- mono_perform_ssapre (cfg);
- //mono_local_cprop (cfg);
- }
-
- if (cfg->opt & MONO_OPT_DEADCE) {
+ if (cfg->opt & MONO_OPT_DEADCE)
mono_ssa_deadce (cfg);
- deadce_has_run = TRUE;
- }
if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
mono_perform_abc_removal (cfg);
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
- if (cfg->opt & MONO_OPT_BRANCH) {
- MonoBasicBlock *bb;
-
+ if (cfg->opt & MONO_OPT_BRANCH)
mono_optimize_branches (cfg);
-
- /* Have to recompute cfg->bblocks and bb->dfn */
- if (cfg->globalra) {
- mono_remove_critical_edges (cfg);
-
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- bb->dfn = 0;
-
- /* Depth-first ordering on basic blocks */
- cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
-
- dfn = 0;
- df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
- cfg->num_bblocks = dfn + 1;
- }
- }
}
#endif
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->globalra) {
- MonoBasicBlock *bb;
-
- /* Have to do this before regalloc since it can create vregs */
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- mono_arch_lowering_pass (cfg, bb);
-
- mono_global_regalloc (cfg);
- }
-
- if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
+ if (cfg->opt & MONO_OPT_LINEARS) {
GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_arch_allocate_vars (cfg);
if (cfg->exception_type)
return cfg;
MonoBasicBlock *bb;
gboolean need_local_opts;
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_spill_global_vars (cfg, &need_local_opts);
if (need_local_opts || cfg->compile_aot) {
}
}
- if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ if (cfg->verbose_level >= 4) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *tree = bb->code;
g_print ("DUMP BLOCK %d:\n", bb->block_num);
*/
return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
} else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
return mono_create_delegate_trampoline (target_domain, method->klass);
-#else
- nm = mono_marshal_get_delegate_invoke (method, NULL);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
-#endif
} else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
nm = mono_marshal_get_delegate_begin_invoke (method);
return mono_get_addr_from_ftnptr (mono_compile_method (nm));
else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
- ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message);
+ ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
ex = mono_get_exception_bad_image_format (cfg->exception_message);
else
case MONO_EXCEPTION_FIELD_ACCESS:
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FieldAccessException", cfg->exception_message);
break;
-#ifndef DISABLE_SECURITY
- /* this can only be set if the security manager is active */
- case MONO_EXCEPTION_SECURITY_LINKDEMAND: {
- MonoSecurityManager* secman = mono_security_manager_get_methods ();
- MonoObject *exc = NULL;
- gpointer args [2];
-
- args [0] = &cfg->exception_data;
- args [1] = &method;
- mono_runtime_invoke (secman->linkdemandsecurityexception, NULL, args, &exc);
-
- ex = (MonoException*)exc;
- break;
- }
-#endif
case MONO_EXCEPTION_OBJECT_SUPPLIED: {
MonoException *exp = cfg->exception_ptr;
MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
mono_jit_stats.regvars += cfg->stat_n_regvars;
mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
- mono_jit_stats.cas_demand_generation += cfg->stat_cas_demand_generation;
mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
mono_destroy_compile (cfg);
/* These patches are applied after a method has been installed, no target munging is needed. */
nacl_allow_target_modification (FALSE);
#endif
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ for (tmp = jlist->list; tmp; tmp = tmp->next) {
+ gpointer target = mono_resolve_patch_target (NULL, target_domain, tmp->data, &patch_info, TRUE);
+ mono_arch_patch_code_new (NULL, target_domain, tmp->data, &patch_info, target);
+ }
+#else
for (tmp = jlist->list; tmp; tmp = tmp->next)
- mono_arch_patch_code (NULL, target_domain, tmp->data, &patch_info, NULL, TRUE);
+ mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE);
+#endif
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif