switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_STOREI1_MEMBASE_REG;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_STOREI2_MEMBASE_REG;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
return OP_LOADU1_MEMBASE;
case MONO_TYPE_I2:
return OP_LOADI2_MEMBASE;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
return OP_LOADU2_MEMBASE;
case MONO_TYPE_I4:
return OP_LOADI4_MEMBASE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
+ t = mini_get_underlying_type (cfg, t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- if (t->byref) {
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- } else {
- switch (t->type) {
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (t)) {
- slot_info = &scalar_stack_slots [t->type];
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- if (!vtype_stack_slots)
- vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
- for (i = 0; i < nvtypes; ++i)
- if (t->data.klass == vtype_stack_slots [i].vtype)
- break;
- if (i < nvtypes)
- slot_info = &vtype_stack_slots [i];
- else {
- g_assert (nvtypes < 256);
- vtype_stack_slots [nvtypes].vtype = t->data.klass;
- slot_info = &vtype_stack_slots [nvtypes];
- nvtypes ++;
- }
- if (cfg->disable_reuse_ref_stack_slots)
- reuse_slot = FALSE;
+ t = mini_get_underlying_type (cfg, t);
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
+ break;
- case MONO_TYPE_PTR:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
#if SIZEOF_VOID_P == 4
- case MONO_TYPE_I4:
+ case MONO_TYPE_I4:
#else
- case MONO_TYPE_I8:
+ case MONO_TYPE_I8:
#endif
- if (cfg->disable_ref_noref_stack_slot_share) {
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- break;
- }
- /* Fall through */
-
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_STRING:
- /* Share non-float stack slots of the same size */
- slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
- if (cfg->disable_reuse_ref_stack_slots)
- reuse_slot = FALSE;
- break;
- case MONO_TYPE_VAR:
- case MONO_TYPE_MVAR:
- slot_info = &scalar_stack_slots [t->type];
- break;
- default:
- slot_info = &scalar_stack_slots [t->type];
+ if (cfg->disable_ref_noref_stack_slot_share) {
+ slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
+ /* Fall through */
+
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ if (cfg->disable_reuse_ref_stack_slots)
+ reuse_slot = FALSE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ slot_info = &scalar_stack_slots [t->type];
+ break;
+ default:
+ slot_info = &scalar_stack_slots [t->type];
+ break;
}
slot = 0xffffff;
emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
-/*
- * For JIT icalls implemented in C.
- * NAME should be the same as the name of the C function whose address is FUNC.
- * If SAVE is TRUE, no wrapper is generated. This is for perf critical icalls which
- * can't throw exceptions.
- */
-static void
-register_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall_full (func, name, sig, save, FALSE, save ? name : NULL);
-}
-
-/* Register a jit icall which doesn't throw exceptions through mono_raise_exception () */
-static void
-register_icall_noraise (gpointer func, const char *name, const char *sigstr)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall_full (func, name, sig, TRUE, TRUE, name);
-}
-
-static void
-register_dyn_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
-{
- MonoMethodSignature *sig;
-
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
- sig = NULL;
-
- mono_register_jit_icall (func, name, sig, save);
-}
-
static void
print_dfn (MonoCompile *cfg) {
int i, j;
#endif /* !DISABLE_JIT */
-static gboolean
-mini_tls_key_supported (MonoTlsKey key)
-{
- if (!MONO_ARCH_HAVE_TLS_GET)
- return FALSE;
-
- return mini_get_tls_offset (key) != -1;
-}
-
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
- if (cfg->gen_seq_points && !cfg->gen_seq_points_debug_data)
+ if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
bb_deduplicate_op_il_seq_points (cfg, bb);
}
#endif
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
- cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
+ cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic (cfg->thunk_area);
mono_domain_lock (cfg->domain);
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
ei->flags = ec->flags;
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
+
/*
* The spvars are needed by mono_arch_install_handler_block_guard ().
*/
}
#endif
-static MonoType*
-get_gsharedvt_type (MonoType *t)
-{
- MonoGenericParam *par = t->data.generic_param;
- MonoGenericParam *copy;
- MonoType *res;
- MonoImage *image = NULL;
-
- /*
- * Create an anonymous gparam with a different serial so normal gshared and gsharedvt methods have
- * a different instantiation.
- */
- g_assert (mono_generic_param_info (par));
- if (par->owner) {
- image = par->owner->image;
-
- mono_image_lock (image);
- if (!image->gsharedvt_types)
- image->gsharedvt_types = g_hash_table_new (NULL, NULL);
- res = g_hash_table_lookup (image->gsharedvt_types, par);
- mono_image_unlock (image);
- if (res)
- return res;
- copy = mono_image_alloc0 (image, sizeof (MonoGenericParamFull));
- memcpy (copy, par, sizeof (MonoGenericParamFull));
- } else {
- copy = g_memdup (par, sizeof (MonoGenericParam));
- }
- copy->owner = NULL;
- // FIXME:
- copy->image = mono_defaults.corlib;
- copy->serial = 1;
- res = mono_metadata_type_dup (NULL, t);
- res->data.generic_param = copy;
-
- if (par->owner) {
- mono_image_lock (image);
- /* Duplicates are ok */
- g_hash_table_insert (image->gsharedvt_types, par, res);
- mono_image_unlock (image);
- }
-
- return res;
-}
-
-static gboolean
-is_gsharedvt_type (MonoType *t)
-{
- return (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->serial == 1;
-}
-
/* Return whenever METHOD is a gsharedvt method */
static gboolean
is_gsharedvt_method (MonoMethod *method)
inst = context->class_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
- if (is_gsharedvt_type (inst->type_argv [i]))
+ if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
inst = context->method_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
- if (is_gsharedvt_type (inst->type_argv [i]))
+ if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
return FALSE;
}
#ifndef DISABLE_JIT
+
+#if defined(__native_client_codegen__) || USE_COOP_GC
+
+static void
+mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
+{
+ MonoInst *poll_addr, *ins;
+ if (cfg->verbose_level)
+ printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
+
+#if defined(__native_client_codegen__)
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&__nacl_thread_suspension_needed);
+#else
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
+#endif
+
+ MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
+ ins->sreg1 = poll_addr->dreg;
+
+ if (bblock->flags & BB_EXCEPTION_HANDLER) {
+ MonoInst *eh_op = bblock->code;
+
+ // we only skip the ops that start EH blocks.
+ if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ)
+ eh_op = NULL;
+
+ mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ } else if (bblock == cfg->bb_entry) {
+ mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+
+ } else {
+ mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ }
+}
+
+/*
+This code inserts safepoints into managed code at important code paths.
+Those are:
+
+-the first basic block
+-landing BB for exception handlers
+-loop body starts.
+
+*/
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+
+ if (cfg->verbose_level)
+ printf ("INSERTING SAFEPOINTS\n");
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
+ mono_create_gc_safepoint (cfg, bb);
+ }
+}
+
+#else
+
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+}
+
+#endif
+
/*
* mini_method_compile:
* @method: the method to compile
MonoMethodHeader *header;
MonoMethodSignature *sig;
MonoError err;
- guint8 *ip;
MonoCompile *cfg;
int dfn, i, code_size_ratio;
-#ifndef DISABLE_SSA
- gboolean deadce_has_run = FALSE;
-#endif
gboolean try_generic_shared, try_llvm = FALSE;
MonoMethod *method_to_compile, *method_to_register;
gboolean method_is_gshared = FALSE;
gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
+ gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
+ gboolean gsharedvt_method = FALSE;
#ifdef ENABLE_LLVM
gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
#endif
if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
+ /*
+ * In AOT mode, method can be the following:
+ * - the generic method definition. In this case, we are compiling the fully shared
+ * version of the method, i.e. the version where all the type parameters are
+ * reference types.
+ * - a gsharedvt method.
+ * - a method inflated with type parameters. This is for partial sharing.
+ * - a method inflated with concrete types.
+ */
if (compile_aot)
- /*
- * We might get passed the original generic method definition or
- * instances with type parameters.
- * FIXME: Remove the method->klass->generic_class limitation.
- */
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_full (method, TRUE, FALSE, FALSE)));
else
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
+ /*
+ if (try_generic_shared && !mono_debug_count ())
+ try_generic_shared = FALSE;
+ */
+
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
mono_stats.generics_sharable_methods++;
try_generic_shared = FALSE;
}
- if (is_gsharedvt_method (method) || (compile_aot && is_open_method (method))) {
+ gsharedvt_method = is_gsharedvt_method (method);
+ if (gsharedvt_method || (compile_aot && is_open_method (method))) {
/* We are AOTing a gshared method directly */
method_is_gshared = TRUE;
g_assert (compile_aot);
cfg->full_aot = full_aot;
cfg->skip_visibility = method->skip_visibility;
cfg->orig_method = method;
- cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_seq_points_debug_data;
- cfg->gen_seq_points_debug_data = debug_options.gen_seq_points_debug_data;
+ cfg->gen_seq_points = debug_options.gen_seq_points_compact_data || debug_options.gen_sdb_seq_points;
+ cfg->gen_sdb_seq_points = debug_options.gen_sdb_seq_points;
+
+#ifdef PLATFORM_ANDROID
+ if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
+ /* FIXME: Why is this needed */
+ cfg->gen_seq_points = FALSE;
+ cfg->gen_sdb_seq_points = FALSE;
+ }
+#endif
+ /* coop / nacl requires loop detection to happen */
+#if defined(__native_client_codegen__) || defined(USE_COOP_GC)
+ cfg->opt |= MONO_OPT_LOOP;
+#endif
cfg->explicit_null_checks = debug_options.explicit_null_checks;
cfg->soft_breakpoints = debug_options.soft_breakpoints;
cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
+ cfg->disable_direct_icalls = disable_direct_icalls;
if (try_generic_shared)
cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->gsctx;
cfg->compile_llvm = try_llvm;
return cfg;
}
- if (cfg->generic_sharing_context && (mini_is_gsharedvt_sharable_method (method) || method_is_gshared)) {
+ if (cfg->generic_sharing_context && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
- if (method_is_gshared) {
+ if (gsharedvt_method) {
g_assert (method->is_inflated);
inflated = (MonoMethodInflated*)method;
context = &inflated->context;
}
}
- ip = (guint8 *)header->code;
-
cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
g_free (method_name);
}
- if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
/*
*/
mono_compile_create_vars (cfg);
- /* SSAPRE is not supported on linear IR */
- cfg->opt &= ~MONO_OPT_SSAPRE;
-
i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE);
if (i < 0) {
if (!COMPILE_LLVM (cfg))
mono_if_conversion (cfg);
- if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
+ if (cfg->globalra)
mono_remove_critical_edges (cfg);
/* Depth-first ordering on basic blocks */
mono_compute_natural_loops (cfg);
}
+ mono_insert_safepoints (cfg);
+
/* after method_to_ir */
if (parts == 1) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- mono_perform_ssapre (cfg);
- //mono_local_cprop (cfg);
- }
-
- if (cfg->opt & MONO_OPT_DEADCE) {
+ if (cfg->opt & MONO_OPT_DEADCE)
mono_ssa_deadce (cfg);
- deadce_has_run = TRUE;
- }
if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
mono_perform_abc_removal (cfg);