#define mono_jit_unlock() mono_mutex_unlock (&jit_mutex)
static mono_mutex_t jit_mutex;
-/* Whenever to check for pending exceptions in managed-to-native wrappers */
-gboolean check_for_pending_exc = TRUE;
+#ifndef DISABLE_JIT
gpointer
mono_realloc_native_code (MonoCompile *cfg)
}
#endif /* __native_client_codegen__ */
+#ifdef USE_JUMP_TABLES
+
+#define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
+
+typedef struct MonoJumpTableChunk {
+ guint32 total;
+ guint32 active;
+ struct MonoJumpTableChunk *previous;
+ /* gpointer entries[total]; */
+} MonoJumpTableChunk;
+
+static MonoJumpTableChunk* g_jumptable;
+#define mono_jumptable_lock() mono_mutex_lock (&jumptable_mutex)
+#define mono_jumptable_unlock() mono_mutex_unlock (&jumptable_mutex)
+static mono_mutex_t jumptable_mutex;
+
+static MonoJumpTableChunk*
+mono_create_jumptable_chunk (guint32 max_entries)
+{
+ guint32 size = sizeof (MonoJumpTableChunk) + max_entries * sizeof(gpointer);
+ MonoJumpTableChunk *chunk = (MonoJumpTableChunk*) g_new0 (guchar, size);
+ chunk->total = max_entries;
+ return chunk;
+}
+
+void
+mono_jumptable_init (void)
+{
+ if (g_jumptable == NULL) {
+ mono_mutex_init_recursive (&jumptable_mutex);
+ g_jumptable = mono_create_jumptable_chunk (DEFAULT_JUMPTABLE_CHUNK_ELEMENTS);
+ }
+}
+
+gpointer*
+mono_jumptable_add_entry (void)
+{
+ return mono_jumptable_add_entries (1);
+}
+
+gpointer*
+mono_jumptable_add_entries (guint32 entries)
+{
+ guint32 index;
+ gpointer *result;
+
+ mono_jumptable_init ();
+ mono_jumptable_lock ();
+ index = g_jumptable->active;
+ if (index + entries >= g_jumptable->total) {
+ /*
+ * Grow jumptable, by adding one more chunk.
+ * We cannot realloc jumptable, as there could be pointers
+ * to existing jump table entries in the code, so instead
+ * we just add one more chunk.
+ */
+ guint32 max_entries = entries;
+ MonoJumpTableChunk *new_chunk;
+
+ if (max_entries < DEFAULT_JUMPTABLE_CHUNK_ELEMENTS)
+ max_entries = DEFAULT_JUMPTABLE_CHUNK_ELEMENTS;
+ new_chunk = mono_create_jumptable_chunk (max_entries);
+ /* Link old jumptable, so that we could free it up later. */
+ new_chunk->previous = g_jumptable;
+ g_jumptable = new_chunk;
+ index = 0;
+ }
+ g_jumptable->active = index + entries;
+ result = (gpointer*)((guchar*)g_jumptable + sizeof(MonoJumpTableChunk)) + index;
+ mono_jumptable_unlock();
+
+ return result;
+}
+
+void
+mono_jumptable_cleanup (void)
+{
+ if (g_jumptable) {
+ MonoJumpTableChunk *current = g_jumptable, *prev;
+ while (current != NULL) {
+ prev = current->previous;
+ g_free (current);
+ current = prev;
+ }
+ g_jumptable = NULL;
+ mono_mutex_destroy (&jumptable_mutex);
+ }
+}
+
+gpointer*
+mono_jumptable_get_entry (guint8 *code_ptr)
+{
+ return mono_arch_jumptable_entry_from_code (code_ptr);
+}
+
+#endif /* USE_JUMP_TABLES */
+
typedef struct {
MonoExceptionClause *clause;
MonoBasicBlock *basic_block;
guint
mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- g_assert (mini_type_var_is_vt (cfg, type));
+ g_assert (mini_type_var_is_vt (type));
return OP_STOREV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
guint
mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_I1:
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- g_assert (cfg->generic_sharing_context);
- g_assert (mini_type_var_is_vt (cfg, type));
+ g_assert (cfg->gshared);
+ g_assert (mini_type_var_is_vt (type));
return OP_LOADV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
return -1;
}
-static guint
-mini_type_to_ldind (MonoCompile* cfg, MonoType *type)
-{
- type = mini_get_underlying_type (cfg, type);
- if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
- g_assert (mini_type_var_is_vt (cfg, type));
- return CEE_LDOBJ;
- }
- return mono_type_to_ldind (type);
-}
-
-#ifndef DISABLE_JIT
-
guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
- type = mini_get_underlying_type (cfg, type);
- if (cfg->generic_sharing_context && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
- g_assert (mini_type_var_is_vt (cfg, type));
+ type = mini_get_underlying_type (type);
+ if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
+ g_assert (mini_type_var_is_vt (type));
return CEE_STOBJ;
}
return mono_type_to_stind (type);
bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
}
-#endif
-
static void
set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
{
#define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
#define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
-#ifdef DISABLE_JIT
-
-MonoInst*
-mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
-{
- return NULL;
-}
-
-#else
-
MonoInst*
mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
int num = cfg->num_varinfo;
gboolean regpair;
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
if (type->byref) {
mono_mark_vreg_as_mp (cfg, vreg);
} else {
- if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (cfg, type)) {
+ if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (type)) {
inst->flags |= MONO_INST_GC_TRACK;
mono_mark_vreg_as_ref (cfg, vreg);
}
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
int dreg;
- type = mini_get_underlying_type (cfg, type);
+ type = mini_get_underlying_type (type);
if (mono_type_is_long (type))
dreg = mono_alloc_dreg (cfg, STACK_I8);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
-/*
- * Transform a MonoInst into a load from the variable of index var_index.
- */
-void
-mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index)
-{
- memset (dest, 0, sizeof (MonoInst));
- dest->inst_i0 = cfg->varinfo [var_index];
- dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype);
- type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest);
- dest->klass = dest->inst_i0->klass;
-}
-
MonoInst*
mini_get_int_to_float_spill_area (MonoCompile *cfg)
{
#endif
}
-#endif
-
void
mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
{
}
static MonoType*
-type_from_stack_type (MonoInst *ins) {
+type_from_stack_type (MonoInst *ins)
+{
switch (ins->type) {
case STACK_I4: return &mono_defaults.int32_class->byval_arg;
case STACK_I8: return &mono_defaults.int64_class->byval_arg;
}
MonoType*
-mono_type_from_stack_type (MonoInst *ins) {
+mono_type_from_stack_type (MonoInst *ins)
+{
return type_from_stack_type (ins);
}
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
- if (mono_security_enabled ())
- return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
return 1;
}
-#ifndef DISABLE_JIT
-
#if 0
#define LSCAN_DEBUG(a) do { a; } while (0)
#else
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
- if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
else {
int ialign;
- size = mini_type_stack_size (NULL, t, &ialign);
+ size = mini_type_stack_size (t, &ialign);
align = ialign;
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- t = mini_get_underlying_type (cfg, t);
+ t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
- if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
+ if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
} else {
int ialign;
- size = mini_type_stack_size (NULL, t, &ialign);
+ size = mini_type_stack_size (t, &ialign);
align = ialign;
if (mono_class_from_mono_type (t)->exception_type)
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
- t = mini_get_underlying_type (cfg, t);
+ t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
return offsets;
}
-#else
-
-gint32*
-mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
-{
- g_assert_not_reached ();
- return NULL;
-}
-
-#endif /* DISABLE_JIT */
-
#define EMUL_HIT_SHIFT 3
#define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
/* small hit bitmap cache */
}
static void
-print_dfn (MonoCompile *cfg) {
+print_dfn (MonoCompile *cfg)
+{
int i, j;
char *code;
MonoBasicBlock *bb;
g_free (cfg);
}
-#ifndef DISABLE_JIT
-
static MonoInst*
mono_create_tls_get_offset (MonoCompile *cfg, int offset)
{
MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
+ if (!MONO_ARCH_HAVE_TLS_GET)
+ return NULL;
+
/*
* TLS offsets might be different at AOT time, so load them from a GOT slot and
* use a different opcode.
*/
if (cfg->compile_aot) {
- if (MONO_ARCH_HAVE_TLS_GET && ARCH_HAVE_TLS_GET_REG) {
+ if (ARCH_HAVE_TLS_GET_REG) {
MonoInst *ins, *c;
EMIT_NEW_TLS_OFFSETCONST (cfg, c, key);
return mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
}
-#endif /* !DISABLE_JIT */
-
-
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
}
-#ifndef DISABLE_JIT
-
static void
mono_compile_create_vars (MonoCompile *cfg)
{
int max_epilog_size;
guint8 *code;
MonoDomain *code_domain;
+ guint unwindlen = 0;
if (mono_using_xdebug)
/*
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- if (!cfg->globalra)
- mono_local_regalloc (cfg, bb);
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
- bb_deduplicate_op_il_seq_points (cfg, bb);
+ mono_bb_deduplicate_op_il_seq_points (cfg, bb);
}
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
#endif
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
- if (cfg->method->dynamic) {
- guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
+
+ if (cfg->method->dynamic) {
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
- cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic (cfg->thunk_area);
+ cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
mono_domain_lock (cfg->domain);
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
if (mono_using_xdebug)
/* See the comment for cfg->code_domain */
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
else
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
} else {
- guint unwindlen = 0;
-#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
- unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
-#endif
- code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
}
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif
+ if (cfg->thunk_area) {
+ cfg->thunks_offset = cfg->code_size + unwindlen;
+ cfg->thunks = code + cfg->thunks_offset;
+ memset (cfg->thunks, 0, cfg->thunk_area);
+ }
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
is_generic = TRUE;
}
- if (cfg->generic_sharing_context)
+ if (cfg->gshared)
g_assert (is_generic);
}
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
- mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->dynamic_info ? cfg->dynamic_info->code_mp : NULL, cfg->run_cctors);
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ {
+ MonoJumpInfo *ji;
+ gpointer target;
+
+ for (ji = cfg->patch_info; ji; ji = ji->next) {
+ if (cfg->compile_aot) {
+ switch (ji->type) {
+ case MONO_PATCH_INFO_BB:
+ case MONO_PATCH_INFO_LABEL:
+ break;
+ default:
+ /* No need to patch these */
+ continue;
+ }
+ }
+
+ if (ji->type == MONO_PATCH_INFO_NONE)
+ continue;
+
+ target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors);
+ mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
+ }
+ }
+#else
+ mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
+#endif
if (cfg->method->dynamic) {
if (mono_using_xdebug)
}
}
-#endif /* #ifndef DISABLE_JIT */
-
-static MonoJitInfo*
-create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
-{
- MonoDomain *domain = mono_get_root_domain ();
- MonoJitInfo *jinfo;
- guint8 *uw_info;
- guint32 info_len;
-
- if (info->uw_info) {
- uw_info = info->uw_info;
- info_len = info->uw_info_len;
- } else {
- uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
- }
-
- jinfo = mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
- jinfo->d.method = wrapper;
- jinfo->code_start = info->code;
- jinfo->code_size = info->code_size;
- jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
-
- if (!info->uw_info)
- g_free (uw_info);
-
- return jinfo;
-}
-
-#ifndef DISABLE_JIT
-
static MonoJitInfo*
create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
{
g_assert (method_to_compile == cfg->method);
header = cfg->header;
- if (cfg->generic_sharing_context)
+ if (cfg->gshared)
flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
if (cfg->arch_eh_jit_info) {
* mono_arch_get_argument_info () is not signal safe.
*/
arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
- stack_size = mono_arch_get_argument_info (cfg->generic_sharing_context, sig, sig->param_count, arg_info);
+ stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
if (stack_size)
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
+
+ if (cfg->thunk_area)
+ flags |= JIT_INFO_HAS_THUNK_INFO;
+
if (cfg->try_block_holes) {
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = tmp->data;
printf ("Number of try block holes %d\n", num_holes);
}
- if (mono_security_method_has_declsec (cfg->method_to_register))
- flags |= JIT_INFO_HAS_ARCH_EH_INFO;
-
if (COMPILE_LLVM (cfg))
num_clauses = cfg->llvm_ex_info_len;
else
if (COMPILE_LLVM (cfg))
jinfo->from_llvm = TRUE;
- if (cfg->generic_sharing_context) {
+ if (cfg->gshared) {
MonoInst *inst;
MonoGenericJitInfo *gi;
GSList *loclist = NULL;
info->stack_size = stack_size;
}
+ if (cfg->thunk_area) {
+ MonoThunkJitInfo *info;
+
+ info = mono_jit_info_get_thunk_info (jinfo);
+ info->thunks_offset = cfg->thunks_offset;
+ info->thunks_size = cfg->thunk_area;
+ }
+
if (COMPILE_LLVM (cfg)) {
if (num_clauses)
memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
return jinfo;
}
-#endif
/* Return whenever METHOD is a gsharedvt method */
static gboolean
return FALSE;
}
-#ifndef DISABLE_JIT
-/*
- * mini_method_compile:
- * @method: the method to compile
- * @opts: the optimization flags to use
- * @domain: the domain where the method will be compiled in
- * @flags: compilation flags
- * @parts: debug flag
- *
- * Returns: a MonoCompile* pointer. Caller must check the exception_type
- * field in the returned struct to see if compilation succeded.
- */
-MonoCompile*
-mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
+#if defined(__native_client_codegen__) || USE_COOP_GC
+
+static void
+mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
{
- MonoMethodHeader *header;
- MonoMethodSignature *sig;
- MonoError err;
- MonoCompile *cfg;
- int dfn, i, code_size_ratio;
- gboolean try_generic_shared, try_llvm = FALSE;
- MonoMethod *method_to_compile, *method_to_register;
- gboolean method_is_gshared = FALSE;
- gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
- gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
- gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
- gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
- gboolean gsharedvt_method = FALSE;
-#ifdef ENABLE_LLVM
- gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
+ MonoInst *poll_addr, *ins;
+ if (cfg->verbose_level > 1)
+ printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
+
+#if defined(__native_client_codegen__)
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&__nacl_thread_suspension_needed);
+#else
+ NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
#endif
- static gboolean verbose_method_inited;
- static const char *verbose_method_name;
- InterlockedIncrement (&mono_jit_stats.methods_compiled);
- if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION)
- mono_profiler_method_jit (method);
- if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
- MONO_PROBE_METHOD_COMPILE_BEGIN (method);
+ MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
+ ins->sreg1 = poll_addr->dreg;
- /*
- * In AOT mode, method can be the following:
- * - the generic method definition. In this case, we are compiling the fully shared
- * version of the method, i.e. the version where all the type parameters are
- * reference types.
- * - a gsharedvt method.
- * - a method inflated with type parameters. This is for partial sharing.
- * - a method inflated with concrete types.
- */
- if (compile_aot)
- try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && ((method->is_generic || method->klass->generic_container) || (!method->klass->generic_class && mono_method_is_generic_sharable_full (method, TRUE, FALSE, FALSE)));
- else
- try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
+ if (bblock->flags & BB_EXCEPTION_HANDLER) {
+ MonoInst *eh_op = bblock->code;
- /*
+ if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
+ eh_op = NULL;
+ } else {
+ MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
+ // skip all EH relateds ops
+ while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
+ eh_op = next_eh_op;
+ next_eh_op = eh_op->next;
+ }
+ }
+
+ mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ } else if (bblock == cfg->bb_entry) {
+ mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+
+ } else {
+ mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
+ mono_bblock_insert_after_ins (bblock, poll_addr, ins);
+ }
+}
+
+/*
+This code inserts safepoints into managed code at important code paths.
+Those are:
+
+-the first basic block
+-landing BB for exception handlers
+-loop body starts.
+
+*/
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ gpointer poll_func = &mono_nacl_gc;
+#elif defined(USE_COOP_GC)
+ gpointer poll_func = &mono_threads_state_poll;
+#else
+ gpointer poll_func = NULL;
+#endif
+
+ if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
+ if (cfg->verbose_level > 1)
+ printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
+ return;
+ }
+ }
+
+ if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ if (cfg->verbose_level > 1)
+ printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
+ return;
+ }
+
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
+
+ if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == mono_thread_interruption_checkpoint) {
+ /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
+ if (cfg->verbose_level > 1)
+ printf ("SKIPPING SAFEPOINTS for the interruption checkpoint icall\n");
+ return;
+ }
+ }
+
+ if (cfg->verbose_level > 1)
+ printf ("INSERTING SAFEPOINTS\n");
+ if (cfg->verbose_level > 2)
+ mono_print_code (cfg, "BEFORE SAFEPOINTS");
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
+ mono_create_gc_safepoint (cfg, bb);
+ }
+
+ if (cfg->verbose_level > 2)
+ mono_print_code (cfg, "AFTER SAFEPOINTS");
+
+}
+
+#else
+
+static void
+mono_insert_safepoints (MonoCompile *cfg)
+{
+}
+
+#endif
+
+/*
+ * mini_method_compile:
+ * @method: the method to compile
+ * @opts: the optimization flags to use
+ * @domain: the domain where the method will be compiled in
+ * @flags: compilation flags
+ * @parts: debug flag
+ *
+ * Returns: a MonoCompile* pointer. Caller must check the exception_type
+ * field in the returned struct to see if compilation succeded.
+ */
+MonoCompile*
+mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
+{
+ MonoMethodHeader *header;
+ MonoMethodSignature *sig;
+ MonoError err;
+ MonoCompile *cfg;
+ int dfn, i, code_size_ratio;
+ gboolean try_generic_shared, try_llvm = FALSE;
+ MonoMethod *method_to_compile, *method_to_register;
+ gboolean method_is_gshared = FALSE;
+ gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
+ gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
+ gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
+ gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
+ gboolean gsharedvt_method = FALSE;
+#ifdef ENABLE_LLVM
+ gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
+#endif
+ static gboolean verbose_method_inited;
+ static const char *verbose_method_name;
+
+ InterlockedIncrement (&mono_jit_stats.methods_compiled);
+ if (mono_profiler_get_events () & MONO_PROFILE_JIT_COMPILATION)
+ mono_profiler_method_jit (method);
+ if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
+ MONO_PROBE_METHOD_COMPILE_BEGIN (method);
+
+ gsharedvt_method = is_gsharedvt_method (method);
+
+ /*
+ * In AOT mode, method can be the following:
+ * - a gsharedvt method.
+ * - a method inflated with type parameters. This is for ref/partial sharing.
+ * - a method inflated with concrete types.
+ */
+ if (compile_aot) {
+ if (is_open_method (method)) {
+ try_generic_shared = TRUE;
+ method_is_gshared = TRUE;
+ } else {
+ try_generic_shared = FALSE;
+ }
+ g_assert (opts & MONO_OPT_GSHARED);
+ } else {
+ try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
+ (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable (method, FALSE);
+ if (mini_is_gsharedvt_sharable_method (method)) {
+ if (!mono_debug_count ())
+ try_generic_shared = FALSE;
+ }
+ }
+
+ /*
if (try_generic_shared && !mono_debug_count ())
try_generic_shared = FALSE;
*/
mono_stats.generics_unsharable_methods++;
}
- if (mini_is_gsharedvt_sharable_method (method)) {
- if (!mono_debug_count ())
- try_generic_shared = FALSE;
- if (compile_aot)
- try_generic_shared = FALSE;
- }
-
- gsharedvt_method = is_gsharedvt_method (method);
- if (gsharedvt_method || (compile_aot && is_open_method (method))) {
- /* We are AOTing a gshared method directly */
- method_is_gshared = TRUE;
- g_assert (compile_aot);
- try_generic_shared = TRUE;
- }
-
#ifdef ENABLE_LLVM
try_llvm = mono_use_llvm || llvm;
#endif
cfg->gen_sdb_seq_points = FALSE;
}
#endif
+ /* coop / nacl requires loop detection to happen */
+#if defined(__native_client_codegen__) || defined(USE_COOP_GC)
+ cfg->opt |= MONO_OPT_LOOP;
+#endif
cfg->explicit_null_checks = debug_options.explicit_null_checks;
cfg->soft_breakpoints = debug_options.soft_breakpoints;
cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
cfg->disable_direct_icalls = disable_direct_icalls;
if (try_generic_shared)
- cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->gsctx;
+ cfg->gshared = TRUE;
cfg->compile_llvm = try_llvm;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
+ if (cfg->compile_aot)
+ cfg->method_index = aot_method_index;
if (!mono_debug_count ())
cfg->opt &= ~MONO_OPT_FLOAT32;
return cfg;
}
- if (cfg->generic_sharing_context && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
+ if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
cfg->gsharedvt = TRUE;
// FIXME:
cfg->disable_llvm = TRUE;
+ cfg->exception_message = g_strdup ("gsharedvt");
}
- if (cfg->generic_sharing_context) {
+ if (cfg->gshared) {
method_to_register = method_to_compile;
- cfg->gshared = TRUE;
} else {
g_assert (method == method_to_compile);
method_to_register = method;
char *method_name;
method_name = mono_method_full_name (method, TRUE);
- g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->generic_sharing_context && !cfg->gsharedvt) ? "gshared " : "", method_name);
+ g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
/*
if (COMPILE_LLVM (cfg))
g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->gsharedvt)
g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
- else if (cfg->generic_sharing_context)
+ else if (cfg->gshared)
g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
g_free (method_name);
}
- if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
- /*
- if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
- cfg->globalra = TRUE;
- */
-
- //cfg->globalra = TRUE;
-
- //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
- // cfg->globalra = TRUE;
-
- {
- static int count = 0;
- count ++;
-
- /*
- if (g_getenv ("COUNT2")) {
- cfg->globalra = TRUE;
- if (count == atoi (g_getenv ("COUNT2")))
- printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
- if (count > atoi (g_getenv ("COUNT2")))
- cfg->globalra = FALSE;
- }
- */
- }
-
- if (header->clauses)
- cfg->globalra = FALSE;
-
- if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
- /* The code in the prolog clobbers caller saved registers */
- cfg->globalra = FALSE;
-
- // FIXME: Disable globalra in case of tracing/profiling
-
- if (cfg->method->save_lmf)
- /* The LMF saving code might clobber caller saved registers */
- cfg->globalra = FALSE;
-
- if (header->code_size > 5000)
- // FIXME:
- /* Too large bblocks could overflow the ins positions */
- cfg->globalra = FALSE;
-
cfg->rs = mono_regstate_new ();
- if (cfg->globalra)
- cfg->rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS;
cfg->next_vreg = cfg->rs->next_vreg;
/* FIXME: Fix SSA to handle branches inside bblocks */
*/
mono_compile_create_vars (cfg);
- /* SSAPRE is not supported on linear IR */
- cfg->opt &= ~MONO_OPT_SSAPRE;
-
i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE);
if (i < 0) {
if (!COMPILE_LLVM (cfg))
mono_if_conversion (cfg);
- if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
- mono_remove_critical_edges (cfg);
+ MONO_SUSPEND_CHECK ();
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
mono_compute_natural_loops (cfg);
}
+ mono_insert_safepoints (cfg);
+
/* after method_to_ir */
if (parts == 1) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- mono_perform_ssapre (cfg);
- //mono_local_cprop (cfg);
- }
-
if (cfg->opt & MONO_OPT_DEADCE)
mono_ssa_deadce (cfg);
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
- if (cfg->opt & MONO_OPT_BRANCH) {
- MonoBasicBlock *bb;
-
+ if (cfg->opt & MONO_OPT_BRANCH)
mono_optimize_branches (cfg);
-
- /* Have to recompute cfg->bblocks and bb->dfn */
- if (cfg->globalra) {
- mono_remove_critical_edges (cfg);
-
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- bb->dfn = 0;
-
- /* Depth-first ordering on basic blocks */
- cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
-
- dfn = 0;
- df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
- cfg->num_bblocks = dfn + 1;
- }
- }
}
#endif
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->globalra) {
- MonoBasicBlock *bb;
-
- /* Have to do this before regalloc since it can create vregs */
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
- mono_arch_lowering_pass (cfg, bb);
-
- mono_global_regalloc (cfg);
- }
-
- if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
+ if (cfg->opt & MONO_OPT_LINEARS) {
GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_arch_allocate_vars (cfg);
if (cfg->exception_type)
return cfg;
MonoBasicBlock *bb;
gboolean need_local_opts;
- if (!cfg->globalra && !COMPILE_LLVM (cfg)) {
+ if (!COMPILE_LLVM (cfg)) {
mono_spill_global_vars (cfg, &need_local_opts);
if (need_local_opts || cfg->compile_aot) {
}
}
- if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ if (cfg->verbose_level >= 4) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *tree = bb->code;
g_print ("DUMP BLOCK %d:\n", bb->block_num);
return cfg;
}
-#else
+void*
+mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
+{
+ return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
+}
-MonoCompile*
-mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts)
+void
+mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
{
- g_assert_not_reached ();
- return NULL;
+ TryBlockHole *hole = mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
+ hole->clause = clause;
+ hole->start_offset = start - cfg->native_code;
+ hole->basic_block = bb;
+
+ cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
+}
+
+void
+mono_cfg_set_exception (MonoCompile *cfg, int type)
+{
+ cfg->exception_type = type;
}
#endif /* DISABLE_JIT */
+static MonoJitInfo*
+create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
+{
+ MonoDomain *domain = mono_get_root_domain ();
+ MonoJitInfo *jinfo;
+ guint8 *uw_info;
+ guint32 info_len;
+
+ if (info->uw_info) {
+ uw_info = info->uw_info;
+ info_len = info->uw_info_len;
+ } else {
+ uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
+ }
+
+ jinfo = mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
+ jinfo->d.method = wrapper;
+ jinfo->code_start = info->code;
+ jinfo->code_size = info->code_size;
+ jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
+
+ if (!info->uw_info)
+ g_free (uw_info);
+
+ return jinfo;
+}
+
+/*
+ * mono_jit_compile_method_inner:
+ *
+ * Main entry point for the JIT.
+ */
gpointer
mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoException **jit_ex)
{
else
mono_lookup_pinvoke_call (method, NULL, NULL);
}
- nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, mono_aot_only);
+ nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
jinfo = mono_jit_info_table_find (target_domain, code);
if (!jinfo)
*/
return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
} else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
-#ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
return mono_create_delegate_trampoline (target_domain, method->klass);
-#else
- nm = mono_marshal_get_delegate_invoke (method, NULL);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
-#endif
} else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
nm = mono_marshal_get_delegate_begin_invoke (method);
return mono_get_addr_from_ftnptr (mono_compile_method (nm));
jit_timer = g_timer_new ();
- cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0);
+ cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
prof_method = cfg->method;
g_timer_stop (jit_timer);
else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
- ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message);
+ ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
ex = mono_get_exception_bad_image_format (cfg->exception_message);
else
case MONO_EXCEPTION_FIELD_ACCESS:
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FieldAccessException", cfg->exception_message);
break;
-#ifndef DISABLE_SECURITY
- /* this can only be set if the security manager is active */
- case MONO_EXCEPTION_SECURITY_LINKDEMAND: {
- MonoSecurityManager* secman = mono_security_manager_get_methods ();
- MonoObject *exc = NULL;
- gpointer args [2];
-
- args [0] = &cfg->exception_data;
- args [1] = &method;
- mono_runtime_invoke (secman->linkdemandsecurityexception, NULL, args, &exc);
-
- ex = (MonoException*)exc;
- break;
- }
-#endif
case MONO_EXCEPTION_OBJECT_SUPPLIED: {
MonoException *exp = cfg->exception_ptr;
MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
code = cfg->native_code;
- if (cfg->generic_sharing_context && mono_method_is_generic_sharable (method, FALSE))
+ if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
mono_stats.generics_shared_methods++;
if (cfg->gsharedvt)
mono_stats.gsharedvt_methods++;
mono_jit_stats.regvars += cfg->stat_n_regvars;
mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
- mono_jit_stats.cas_demand_generation += cfg->stat_cas_demand_generation;
mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
mono_destroy_compile (cfg);
/* These patches are applied after a method has been installed, no target munging is needed. */
nacl_allow_target_modification (FALSE);
#endif
+#ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
+ for (tmp = jlist->list; tmp; tmp = tmp->next) {
+ gpointer target = mono_resolve_patch_target (NULL, target_domain, tmp->data, &patch_info, TRUE);
+ mono_arch_patch_code_new (NULL, target_domain, tmp->data, &patch_info, target);
+ }
+#else
for (tmp = jlist->list; tmp; tmp = tmp->next)
- mono_arch_patch_code (NULL, target_domain, tmp->data, &patch_info, NULL, TRUE);
+ mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE);
+#endif
#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_allow_target_modification (TRUE);
#endif
return code;
}
-#ifndef DISABLE_JIT
-
-void*
-mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments) {
- return mono_arch_instrument_epilog_full (cfg, func, p, enable_arguments, FALSE);
-}
-
-void
-mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
-{
- TryBlockHole *hole = mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
- hole->clause = clause;
- hole->start_offset = start - cfg->native_code;
- hole->basic_block = bb;
-
- cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
-}
-
-void
-mono_cfg_set_exception (MonoCompile *cfg, int type)
-{
- cfg->exception_type = type;
-}
-
-#endif
-
/* Dummy versions of some arch specific functions to avoid ifdefs at call sites */
#ifndef MONO_ARCH_GSHAREDVT_SUPPORTED
}
gpointer
-mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
+mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
g_assert_not_reached ();
return NULL;
}
gpointer
-mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, MonoGenericSharingContext *gsctx, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
+mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
NOT_IMPLEMENTED;
return NULL;
#endif
-#ifdef USE_JUMP_TABLES
-#define DEFAULT_JUMPTABLE_CHUNK_ELEMENTS 128
-
-typedef struct MonoJumpTableChunk {
- guint32 total;
- guint32 active;
- struct MonoJumpTableChunk *previous;
- /* gpointer entries[total]; */
-} MonoJumpTableChunk;
-
-static MonoJumpTableChunk* g_jumptable;
-#define mono_jumptable_lock() mono_mutex_lock (&jumptable_mutex)
-#define mono_jumptable_unlock() mono_mutex_unlock (&jumptable_mutex)
-static mono_mutex_t jumptable_mutex;
-
-static MonoJumpTableChunk*
-mono_create_jumptable_chunk (guint32 max_entries)
-{
- guint32 size = sizeof (MonoJumpTableChunk) + max_entries * sizeof(gpointer);
- MonoJumpTableChunk *chunk = (MonoJumpTableChunk*) g_new0 (guchar, size);
- chunk->total = max_entries;
- return chunk;
-}
-
+#ifndef ENABLE_LLVM
void
-mono_jumptable_init (void)
+mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
{
- if (g_jumptable == NULL) {
- mono_mutex_init_recursive (&jumptable_mutex);
- g_jumptable = mono_create_jumptable_chunk (DEFAULT_JUMPTABLE_CHUNK_ELEMENTS);
- }
-}
-
-gpointer*
-mono_jumptable_add_entry (void)
-{
- return mono_jumptable_add_entries (1);
-}
-
-gpointer*
-mono_jumptable_add_entries (guint32 entries)
-{
- guint32 index;
- gpointer *result;
-
- mono_jumptable_init ();
- mono_jumptable_lock ();
- index = g_jumptable->active;
- if (index + entries >= g_jumptable->total) {
- /*
- * Grow jumptable, by adding one more chunk.
- * We cannot realloc jumptable, as there could be pointers
- * to existing jump table entries in the code, so instead
- * we just add one more chunk.
- */
- guint32 max_entries = entries;
- MonoJumpTableChunk *new_chunk;
-
- if (max_entries < DEFAULT_JUMPTABLE_CHUNK_ELEMENTS)
- max_entries = DEFAULT_JUMPTABLE_CHUNK_ELEMENTS;
- new_chunk = mono_create_jumptable_chunk (max_entries);
- /* Link old jumptable, so that we could free it up later. */
- new_chunk->previous = g_jumptable;
- g_jumptable = new_chunk;
- index = 0;
- }
- g_jumptable->active = index + entries;
- result = (gpointer*)((guchar*)g_jumptable + sizeof(MonoJumpTableChunk)) + index;
- mono_jumptable_unlock();
-
- return result;
-}
-
-void
-mono_jumptable_cleanup (void)
-{
- if (g_jumptable) {
- MonoJumpTableChunk *current = g_jumptable, *prev;
- while (current != NULL) {
- prev = current->previous;
- g_free (current);
- current = prev;
- }
- g_jumptable = NULL;
- mono_mutex_destroy (&jumptable_mutex);
- }
+ g_assert_not_reached ();
}
-gpointer*
-mono_jumptable_get_entry (guint8 *code_ptr)
+void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
{
- return mono_arch_jumptable_entry_from_code (code_ptr);
+ g_assert_not_reached ();
}
#endif
-/*
- * mini_replace_type:
- *
- * Replace the type used in the metadata stream with what the JIT will actually use during compilation.
-*/
-MonoType*
-mini_replace_type (MonoType *type)
-{
- type = mono_type_get_underlying_type (type);
- return mini_native_type_replace_type (type);
-}
-
/*
* mini_get_underlying_type:
*
* For gsharedvt types, it will return the original VAR/MVAR.
*/
MonoType*
-mini_get_underlying_type (MonoCompile *cfg, MonoType *type)
+mini_get_underlying_type (MonoType *type)
{
- type = mini_type_get_underlying_type (cfg->generic_sharing_context, type);
- return mini_native_type_replace_type (type);
+ return mini_type_get_underlying_type (type);
}
void
void
mini_jit_cleanup (void)
{
+#ifndef DISABLE_JIT
g_free (emul_opcode_map);
g_free (emul_opcode_opcodes);
+#endif
}
+
+#ifdef DISABLE_JIT
+
+MonoCompile*
+mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+void
+mono_destroy_compile (MonoCompile *cfg)
+{
+ g_assert_not_reached ();
+}
+
+void
+mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
+{
+ g_assert_not_reached ();
+}
+
+#endif /* DISABLE_JIT */