#include "mini-gc.h"
#include "debugger-agent.h"
+#if defined(HAVE_KW_THREAD)
+#define MINI_FAST_TLS_SET(x,y) x = y
+#define MINI_FAST_TLS_GET(x) x
+#define MINI_FAST_TLS_INIT(x)
+#define MINI_FAST_TLS_DECLARE(x) static __thread gpointer x MONO_TLS_FAST;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#elif (defined(__APPLE__) && defined(__i386__))
+#define MINI_FAST_TLS_SET(x,y) pthread_setspecific(x, y)
+#define MINI_FAST_TLS_GET(x) pthread_getspecific(x)
+#define MINI_FAST_TLS_INIT(x) pthread_key_create(&x, NULL)
+#define MINI_FAST_TLS_DECLARE(x) static pthread_key_t x;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) y = (gint32) x
+#else
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#endif
+
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
-/* helper methods signature */
-/* FIXME: Make these static again */
-MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
-MonoMethodSignature *helper_sig_domain_get = NULL;
-MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
-MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
-MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
-MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
-MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
-
-#ifdef __native_client_codegen__
-/* Default alignment for Native Client is 32-byte. */
-guint8 nacl_align_byte = 0xe0;
-#endif
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
guint32 mono_jit_tls_id = -1;
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_jit_tls MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_jit_tls);
+#endif
+
+#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
+#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#endif
MonoTraceSpec *mono_jit_trace_calls = NULL;
gboolean mono_dont_free_global_codeman;
+gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#if defined(__default_codegen__)
+ return g_realloc (cfg->native_code, cfg->code_size);
+#elif defined(__native_client_codegen__)
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment );
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ g_assert_not_reached ();
+ return cfg->native_code;
+#endif
+}
+
#ifdef __native_client_codegen__
/* Prevent instructions from straddling a 32-byte alignment boundary. */
}
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
+ * which we are generating code, return a pointer to the destination in the
+ * dynamic code segment into which the code will be copied when
+ * mono_global_codeman_commit is called.
+ * LOCKING: Acquires the jit lock.
+ */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ void *dest;
+ mono_jit_lock ();
+ dest = nacl_code_manager_get_code_dest (global_codeman, data);
+ mono_jit_unlock ();
+ return dest;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+ mono_jit_lock ();
+ mono_code_manager_commit (global_codeman, data, size, newsize);
+ mono_jit_unlock ();
+}
+
+/*
+ * Convenience function which calls mono_global_codeman_commit to validate and
+ * copy the code. The caller sets *buf_base and *buf_size to the start and size
+ * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
+ * the byte after the last instruction byte. On return, *buf_base will point to
+ * the start of the copied in the code segment, and *code_end will point after
+ * the end of the copied code.
+ */
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+ guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
+ mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
+ *code_end = tmp + (*code_end - *buf_base);
+ *buf_base = tmp;
+}
+#else
+/* no-op versions of Native Client functions */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ return data;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+}
+
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+}
+
+#endif /* __native_client__ */
+
/**
* mono_create_unwind_op:
*
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
+ if (cfg->compute_gc_maps) {
+ if (type->byref) {
+ mono_mark_vreg_as_mp (cfg, vreg);
+ } else {
+ MonoType *t = mini_type_get_underlying_type (NULL, type);
+ if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || MONO_TYPE_IS_REFERENCE (t)) {
+ inst->flags |= MONO_INST_GC_TRACK;
+ mono_mark_vreg_as_ref (cfg, vreg);
+ }
+ }
+ }
+
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
+void
+mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_ref_len) {
+ gboolean *tmp = cfg->vreg_is_ref;
+ int size = cfg->vreg_is_ref_len;
+
+ while (vreg >= cfg->vreg_is_ref_len)
+ cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
+ cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
+ if (size)
+ memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_ref [vreg] = TRUE;
+}
+
+void
+mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_mp_len) {
+ gboolean *tmp = cfg->vreg_is_mp;
+ int size = cfg->vreg_is_mp_len;
+
+ while (vreg >= cfg->vreg_is_mp_len)
+ cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
+ cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
+ if (size)
+ memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_mp [vreg] = TRUE;
+}
+
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
- if (method->wrapper_type != MONO_WRAPPER_NONE)
+ if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
*
* Returns true if the method is invalid.
*/
-gboolean
-mini_method_verify (MonoCompile *cfg, MonoMethod *method)
+static gboolean
+mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
GSList *tmp, *res;
gboolean is_fulltrust;
res = mono_method_verify_with_current_settings (method, cfg->skip_visibility);
if ((error = mono_loader_get_last_error ())) {
- cfg->exception_type = error->exception_type;
+ if (fail_compile)
+ cfg->exception_type = error->exception_type;
+ else
+ mono_loader_clear_error ();
if (res)
mono_free_verify_list (res);
return TRUE;
for (tmp = res; tmp; tmp = tmp->next) {
MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
if (info->info.status == MONO_VERIFY_ERROR) {
+ if (fail_compile) {
char *method_name = mono_method_full_name (method, TRUE);
- cfg->exception_type = info->exception_type;
- cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ cfg->exception_type = info->exception_type;
+ cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ g_free (method_name);
+ }
mono_free_verify_list (res);
- g_free (method_name);
return TRUE;
}
if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
- char *method_name = mono_method_full_name (method, TRUE);
- cfg->exception_type = info->exception_type;
- cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ if (fail_compile) {
+ char *method_name = mono_method_full_name (method, TRUE);
+ cfg->exception_type = info->exception_type;
+ cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
+ g_free (method_name);
+ }
mono_free_verify_list (res);
- g_free (method_name);
return TRUE;
}
}
return FALSE;
}
-/*Returns true is something went wrong*/
-static gboolean
-mono_compile_is_broken (MonoCompile *cfg)
+/*Returns true if something went wrong*/
+gboolean
+mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
- MonoMethod *method = cfg->method;
MonoMethod *method_definition = method;
gboolean dont_verify = mini_assembly_can_skip_verification (cfg->domain, method);
dont_verify |= method->klass->image->assembly->corlib_internal;
method_definition = imethod->declaring;
}
- return !dont_verify && mini_method_verify (cfg, method_definition);
-}
-
-static void
-create_helper_signature (void)
-{
- helper_sig_domain_get = mono_create_icall_signature ("ptr");
- helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
- helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
- helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
- helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
+ return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
}
static gconstpointer
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
- *stack_align = sizeof (gpointer);
+ *stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
MonoBasicBlock *bb;
MonoInst *c;
- g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
+ {
+ char *method_name = mono_method_full_name (cfg->method, TRUE);
+ g_print ("IR code for method %s\n", method_name);
+ g_free (method_name);
+ }
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
{
if (ins == NULL) {
ins = bb->code;
+ if (ins)
+ ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
g_hash_table_destroy (cfg->abs_patches);
mono_mempool_destroy (cfg->mempool);
+ mono_debug_free_method (cfg);
+
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
}
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_lmf_addr MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_lmf_addr);
#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
/*
* When this is defined, the current lmf is stored in this tls variable instead of in
* jit_tls->lmf.
*/
-static __thread gpointer mono_lmf MONO_TLS_FAST;
+MINI_FAST_TLS_DECLARE(mono_lmf);
#endif
#endif
gint32
mono_get_jit_tls_offset (void)
{
-#ifdef HAVE_KW_THREAD
+#ifdef MINI_HAVE_FAST_TLS
int offset;
- MONO_THREAD_VAR_OFFSET (mono_jit_tls, offset);
+ MINI_THREAD_VAR_OFFSET (mono_jit_tls, offset);
return offset;
#else
return -1;
gint32
mono_get_lmf_tls_offset (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf,offset);
return offset;
#else
return -1;
mono_get_lmf_addr_tls_offset (void)
{
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
return offset;
}
MonoLMF *
mono_get_lmf (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- return mono_lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ return MINI_FAST_TLS_GET (mono_lmf);
#else
MonoJitTlsData *jit_tls;
if ((jit_tls = TlsGetValue (mono_jit_tls_id)))
return jit_tls->lmf;
-
- g_assert_not_reached ();
+ /*
+ * We do not assert here because this function can be called from
+ * mini-gc.c on a thread that has not executed any managed code, yet
+ * (the thread object allocation can trigger a collection).
+ */
return NULL;
#endif
}
MonoLMF **
mono_get_lmf_addr (void)
{
-#ifdef HAVE_KW_THREAD
- return mono_lmf_addr;
+#ifdef MINI_HAVE_FAST_TLS
+ return MINI_FAST_TLS_GET (mono_lmf_addr);
#else
MonoJitTlsData *jit_tls;
void
mono_set_lmf (MonoLMF *lmf)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- mono_lmf = lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
#endif
(*mono_get_lmf_addr ()) = lmf;
}
+static void
+mono_set_jit_tls (MonoJitTlsData *jit_tls)
+{
+ TlsSetValue (mono_jit_tls_id, jit_tls);
+
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_jit_tls, jit_tls);
+#endif
+}
+
+static void
+mono_set_lmf_addr (gpointer lmf_addr)
+{
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_lmf_addr, lmf_addr);
+#endif
+}
+
/* Called by native->managed wrappers */
void
mono_jit_thread_attach (MonoDomain *domain)
*/
domain = mono_get_root_domain ();
-#ifdef HAVE_KW_THREAD
- if (!mono_lmf_addr) {
+#ifdef MINI_HAVE_FAST_TLS
+ if (!MINI_FAST_TLS_GET (mono_lmf_addr)) {
mono_thread_attach (domain);
}
#else
jit_tls = g_new0 (MonoJitTlsData, 1);
- TlsSetValue (mono_jit_tls_id, jit_tls);
-
-#ifdef HAVE_KW_THREAD
- mono_jit_tls = jit_tls;
-#endif
-
jit_tls->abort_func = abort_func;
jit_tls->end_of_stack = stack_start;
+ mono_set_jit_tls (jit_tls);
+
lmf = g_new0 (MonoLMF, 1);
-#ifdef MONO_ARCH_INIT_TOP_LMF_ENTRY
MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf);
-#else
- lmf->ebp = -1;
-#endif
jit_tls->first_lmf = lmf;
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
/* jit_tls->lmf is unused */
- mono_lmf = lmf;
- mono_lmf_addr = &mono_lmf;
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
+ mono_set_lmf_addr (&mono_lmf);
#else
-#if defined(HAVE_KW_THREAD)
- mono_lmf_addr = &jit_tls->lmf;
-#endif
+ mono_set_lmf_addr (&jit_tls->lmf);
jit_tls->lmf = lmf;
#endif
return jit_tls;
}
+static void
+free_jit_tls_data (MonoJitTlsData *jit_tls)
+{
+ mono_arch_free_jit_tls_data (jit_tls);
+ mono_free_altstack (jit_tls);
+
+ g_free (jit_tls->first_lmf);
+ g_free (jit_tls);
+}
+
static void
mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func)
{
mono_debugger_thread_created (tid, thread->root_domain_thread, jit_tls, func);
if (thread)
thread->jit_data = jit_tls;
+
+ mono_arch_cpu_init ();
}
void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL;
thread->jit_data = jit_tls;
if (mono_profiler_get_events () & MONO_PROFILE_STATISTICAL)
mono_runtime_setup_stat_profiler ();
+
+ mono_arch_cpu_init ();
}
static void
-mini_thread_cleanup (MonoThread *thread)
+mini_thread_cleanup (MonoInternalThread *thread)
{
- MonoInternalThread *internal = thread->internal_thread;
- MonoJitTlsData *jit_tls = internal->jit_data;
+ MonoJitTlsData *jit_tls = thread->jit_data;
if (jit_tls) {
mono_debugger_thread_cleanup (jit_tls);
- mono_arch_free_jit_tls_data (jit_tls);
-
- mono_free_altstack (jit_tls);
- g_free (jit_tls->first_lmf);
- g_free (jit_tls);
- internal->jit_data = NULL;
/* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff
* It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's
*
* The current offender is mono_thread_manage which cleanup threads from the outside.
*/
- if (internal == mono_thread_internal_current ()) {
- TlsSetValue (mono_jit_tls_id, NULL);
-
-#ifdef HAVE_KW_THREAD
- mono_jit_tls = NULL;
- mono_lmf_addr = NULL;
-#if defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- mono_lmf = NULL;
-#endif
-#endif
+ if (thread == mono_thread_internal_current ()) {
+ mono_set_lmf (NULL);
+ mono_set_jit_tls (NULL);
+ mono_set_lmf_addr (NULL);
}
+
+ free_jit_tls_data (jit_tls);
+
+ thread->jit_data = NULL;
}
}
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* Need to transform to the destination address, it's */
+ /* emitted as an immediate in the code. */
+ target = nacl_inverse_modify_patch_target(ip);
+#else
target = ip;
+#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
+#if defined(__native_client__) && defined(__native_client_codegen__)
+#if defined(TARGET_AMD64)
+ /* This target is an absolute address, not relative to the */
+ /* current code being emitted on AMD64. */
+ target = nacl_inverse_modify_patch_target(target);
+#endif
+#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
gpointer *jump_table;
int i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak, but we don't care if we're */
+ /* not deleting JIT'd methods anyway */
+ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
+#endif
- for (i = 0; i < patch_info->data.table->table_size; i++)
+ for (i = 0; i < patch_info->data.table->table_size; i++) {
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* 'code' is relative to the current code blob, we */
+ /* need to do this transform on it to make the */
+ /* pointers in this table absolute */
+ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#endif
+ }
+
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* jump_table is in the data section, we need to transform */
+ /* it here so when it gets modified in amd64_patch it will */
+ /* then point back to the absolute data address */
+ target = nacl_inverse_modify_patch_target (jump_table);
+#else
target = jump_table;
+#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
g_assert_not_reached ();
#endif
break;
+ case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
+ int card_table_shift_bits;
+ gpointer card_table_mask;
+
+ target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
+ break;
+ }
default:
g_assert_not_reached ();
}
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak. */
+ /* TODO: can we free this when */
+ /* making the final jump table? */
+ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
+#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
GSList *list;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* When this jump target gets evaluated, the method */
+ /* will be installed in the dynamic code section, */
+ /* not at the location of cfg->native_code. */
+ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
+#endif
mono_domain_lock (domain);
if (!domain_jit_info (domain)->jump_target_hash)
cfg->seq_point_info = info;
// FIXME: dynamic methods
- mono_domain_lock (domain);
- g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
- mono_domain_unlock (domain);
+ if (!cfg->compile_aot) {
+ mono_domain_lock (domain);
+ // FIXME: How can the lookup succeed ?
+ if (!g_hash_table_lookup (domain_jit_info (domain)->seq_points, cfg->method_to_register))
+ g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
+ mono_domain_unlock (domain);
+ }
g_ptr_array_free (cfg->seq_points, TRUE);
cfg->seq_points = NULL;
int max_epilog_size;
guint8 *code;
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ void *code_dest;
+
+ /* This keeps patch targets from being transformed during
+ * ordinary method compilation, for local branches and jumps.
+ */
+ nacl_allow_target_modification (FALSE);
+#endif
+
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
+ bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
}
}
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
#endif
code = mono_domain_code_reserve (cfg->domain, cfg->code_size + unwindlen);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ nacl_allow_target_modification (TRUE);
+#endif
memcpy (code, cfg->native_code, cfg->code_len);
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ g_free (cfg->native_code);
+#elif defined(__native_client_codegen__)
if (cfg->native_code_alloc) {
g_free (cfg->native_code_alloc);
cfg->native_code_alloc = 0;
else if (cfg->native_code) {
g_free (cfg->native_code);
}
-#else
- g_free (cfg->native_code);
-#endif
+#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
-
-#ifdef __native_client_codegen__
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (!cfg->compile_aot) {
+ if (cfg->method->dynamic) {
+ code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
+ } else {
+ code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
+ }
+ }
+#endif
+
+#if defined(__native_client_codegen__)
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
} else {
mono_domain_code_commit (cfg->domain, cfg->native_code, cfg->code_size, cfg->code_len);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ cfg->native_code = code_dest;
+#endif
mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
}
}
+static void
+mono_handle_out_of_line_bblock (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ MONO_ADD_INS (bb, ins);
+ ins->inst_target_bb = bb->next_bb;
+ }
+ }
+}
+
static MonoJitInfo*
create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
{
tblock = cfg->cil_offset_to_bb [ec->try_offset];
g_assert (tblock);
- ei->try_start = cfg->native_code + tblock->native_offset;
g_assert (tblock->native_offset);
+ ei->try_start = cfg->native_code + tblock->native_offset;
+ if (tblock->extend_try_block) {
+ /*
+ * Extend the try block backwards to include parts of the previous call
+ * instruction.
+ */
+ ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
+ }
tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (tblock);
+ if (!tblock->native_offset) {
+ int j, end;
+ for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
+ MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
+ if (bb && bb->native_offset) {
+ tblock = bb;
+ break;
+ }
+ }
+ }
ei->try_end = cfg->native_code + tblock->native_offset;
g_assert (tblock->native_offset);
tblock = cfg->cil_offset_to_bb [ec->handler_offset];
mono_profiler_method_jit (method);
if (MONO_PROBE_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
-
+
if (compile_aot)
/*
* We might get passed the original generic method definition or
cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
+ char *method_name;
if (COMPILE_LLVM (cfg))
- g_print ("converting llvm method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->generic_sharing_context)
- g_print ("converting shared method %s\n", mono_method_full_name (method_to_compile, TRUE));
+ g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
- g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
+ g_free (method_name);
}
if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
//cfg->enable_extended_bblocks = TRUE;
/*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
- if (mono_compile_is_broken (cfg))
+ if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
+ if (mini_get_debug_options ()->break_on_unverified)
+ G_BREAKPOINT ();
return cfg;
+ }
/*
* create MonoInst* which represents arguments and local variables
*/
mono_liveness_handle_exception_clauses (cfg);
+ mono_handle_out_of_line_bblock (cfg);
+
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
if (!COMPILE_LLVM (cfg))
else
InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
- if (cfg->verbose_level >= 2) {
- char *id = mono_method_full_name (cfg->method, FALSE);
- mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
- g_free (id);
- }
-
cfg->jit_info = create_jit_info (cfg, method_to_compile);
#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
}
#endif
- mono_save_xdebug_info (cfg);
+ if (!cfg->compile_aot)
+ mono_save_xdebug_info (cfg);
mini_gc_create_gc_map (cfg);
mono_save_seq_point_info (cfg);
+ if (cfg->verbose_level >= 2) {
+ char *id = mono_method_full_name (cfg->method, FALSE);
+ mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
+ g_free (id);
+ }
+
if (!cfg->compile_aot) {
mono_domain_lock (cfg->domain);
mono_jit_info_table_add (cfg->domain, cfg->jit_info);
g_free (name);
}
}
+
+gboolean
+mono_jit_map_is_enabled (void)
+{
+ return perf_map_file != NULL;
+}
+
#endif
static gpointer
MonoVTable *vtable;
MonoException *ex = NULL;
guint32 prof_options;
+ GTimer *jit_timer;
+ MonoMethod *prof_method;
#ifdef MONO_USE_AOT_COMPILER
if (opt & MONO_OPT_AOT) {
mono_lookup_pinvoke_call (method, NULL, NULL);
}
nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, FALSE);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ jinfo = mono_jit_info_table_find (target_domain, code);
+ if (!jinfo)
+ jinfo = mono_jit_info_table_find (mono_domain_get (), code);
+ if (jinfo)
+ mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ return code;
//if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
//mono_debug_add_wrapper (method, nm);
return NULL;
}
+ jit_timer = g_timer_new ();
+
cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0);
+ prof_method = cfg->method;
+
+ g_timer_stop (jit_timer);
+ mono_jit_stats.jit_time += g_timer_elapsed (jit_timer, NULL);
+ g_timer_destroy (jit_timer);
switch (cfg->exception_type) {
case MONO_EXCEPTION_NONE:
ex = exp;
break;
}
+ case MONO_EXCEPTION_OUT_OF_MEMORY:
+ ex = mono_domain_get ()->out_of_memory_ex;
+ break;
default:
g_assert_not_reached ();
}
/* The profiler doesn't know about wrappers, so pass the original icall method */
mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK);
}
- } else {
- mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ }
+ mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ if (prof_method != method) {
+ mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK);
}
}
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
MonoVTable *vtable;
+ MonoException *tmpEx;
mono_jit_stats.methods_lookups++;
vtable = mono_class_vtable (domain, method->klass);
g_assert (vtable);
- mono_runtime_class_init (vtable);
+ tmpEx = mono_runtime_class_init_full (vtable, ex == NULL);
+ if (tmpEx) {
+ *ex = tmpEx;
+ return NULL;
+ }
return mono_create_ftnptr (target_domain, info->code_start);
}
}
#if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW)
if (mono_arch_is_int_overflow (ctx, info))
- exc = mono_get_exception_arithmetic ();
+ /*
+ * The spec says this throws ArithmeticException, but MS throws the derived
+ * OverflowException.
+ */
+ exc = mono_get_exception_overflow ();
else
exc = mono_get_exception_divide_by_zero ();
#else
{
MonoJitInfo *ji;
MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ gpointer fault_addr = NULL;
GET_CONTEXT;
#endif
#if !defined(HOST_WIN32) && defined(HAVE_SIG_INFO)
+ fault_addr = info->si_addr;
if (mono_aot_is_pagefault (info->si_addr)) {
mono_aot_handle_pagefault (info->si_addr);
return;
if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr))
return;
+#ifdef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX
+ /* info->si_addr seems to be NULL on some kernels when handling stack overflows */
+ fault_addr = info->si_addr;
+ if (fault_addr == NULL) {
+ MonoContext mctx;
+
+ mono_arch_sigctx_to_monoctx (ctx, &mctx);
+
+ fault_addr = MONO_CONTEXT_GET_SP (&mctx);
+ }
+#endif
+
/* The hard-guard page has been hit: there is not much we can do anymore
* Print a hopefully clear message and abort.
*/
if (jit_tls->stack_size &&
- ABS ((guint8*)info->si_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 32768) {
+ ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) {
const char *method;
/* we don't do much now, but we can warn the user with a useful message */
fprintf (stderr, "Stack overflow: IP: %p, fault addr: %p\n", mono_arch_ip_from_context (ctx), (gpointer)info->si_addr);
static void
register_jit_stats (void)
{
- mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_LONG, &mono_jit_stats.methods_compiled);
- mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_LONG, &mono_jit_stats.methods_aot);
+ mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_compiled);
+ mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_aot);
mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm);
mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm);
+ mono_counters_register ("Total time spent JITting (sec)", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &mono_jit_stats.jit_time);
}
static void runtime_invoke_info_free (gpointer value);
mini_debugger_init ();
#endif
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_INIT (mono_jit_tls);
+ MINI_FAST_TLS_INIT (mono_lmf_addr);
+#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
+ MINI_FAST_TLS_INIT (mono_lmf);
+#endif
+#endif
+
#ifdef MONO_ARCH_HAVE_TLS_GET
mono_runtime_set_has_tls_get (MONO_ARCH_HAVE_TLS_GET);
#else
#endif
mono_install_callbacks (&callbacks);
+
+ if (getenv ("MONO_DEBUG") != NULL)
+ mini_parse_debug_options ();
mono_arch_cpu_init ();
mini_gc_init ();
- if (getenv ("MONO_DEBUG") != NULL)
- mini_parse_debug_options ();
-
if (getenv ("MONO_XDEBUG")) {
char *xdebug_opts = getenv ("MONO_XDEBUG");
mono_xdebug_init (xdebug_opts);
}
#ifdef ENABLE_LLVM
- if (!mono_llvm_load (NULL)) {
- mono_use_llvm = FALSE;
- fprintf (stderr, "Mono Warning: llvm support could not be loaded.\n");
+ if (mono_use_llvm) {
+ if (!mono_llvm_load (NULL)) {
+ mono_use_llvm = FALSE;
+ fprintf (stderr, "Mono Warning: llvm support could not be loaded.\n");
+ }
}
if (mono_use_llvm)
mono_llvm_init ();
mono_icall_init ();
+ /* This should come after mono_init () too */
+ mini_gc_init ();
+
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
ves_icall_get_frame_info);
mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace",
mono_add_internal_call ("Mono.Runtime::mono_runtime_install_handlers",
mono_runtime_install_handlers);
-
- create_helper_signature ();
+ mono_create_helper_signatures ();
register_jit_stats ();
register_icall (mono_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
register_icall (mono_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
-#if MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION
register_icall (mono_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
"void ptr", TRUE);
-#endif
register_icall (mono_thread_get_undeniable_exception, "mono_thread_get_undeniable_exception", "object", FALSE);
register_icall (mono_thread_interruption_checkpoint, "mono_thread_interruption_checkpoint", "void", FALSE);
register_icall (mono_thread_force_interruption_checkpoint, "mono_thread_force_interruption_checkpoint", "void", FALSE);
register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE);
register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE);
+#endif
/*
* NOTE, NOTE, NOTE, NOTE:
* when adding emulation for some opcodes, remember to also add a dummy
mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
+#if defined(__default_codegen__)
mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE);
+#elif defined(__native_client_codegen__)
+ mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE);
+#endif
#endif
#ifdef MONO_ARCH_SOFT_FLOAT
register_icall (mono_gc_wbarrier_value_copy_bitmap, "mono_gc_wbarrier_value_copy_bitmap", "void ptr ptr int int", FALSE);
+ register_icall (mono_object_castclass_with_cache, "mono_object_castclass_with_cache", "object object ptr ptr", FALSE);
+ register_icall (mono_object_isinst_with_cache, "mono_object_isinst_with_cache", "object object ptr ptr", FALSE);
+
#endif
mono_generic_sharing_init ();
mono_runtime_cleanup (domain);
#endif
+ free_jit_tls_data (TlsGetValue (mono_jit_tls_id));
+
mono_icall_cleanup ();
mono_runtime_cleanup_handlers ();
mono_llvm_cleanup ();
#endif
+ mono_aot_cleanup ();
+
mono_trampolines_cleanup ();
mono_unwind_cleanup ();
mono_arch_cleanup ();
+ mono_generic_sharing_cleanup ();
+
mono_cleanup ();
mono_trace_cleanup ();
cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
}
+void
+mono_cfg_set_exception (MonoCompile *cfg, int type)
+{
+ cfg->exception_type = type;
+}
+
#endif