* Dietmar Maurer (dietmar@ximian.com)
*
* Copyright 2002-2003 Ximian, Inc.
- * Coprygith 2003-2010 Novell, Inc.
+ * Copyright 2003-2010 Novell, Inc.
*/
#define MONO_LLVM_IN_MINI 1
#include "mini-gc.h"
#include "debugger-agent.h"
+#if defined(HAVE_KW_THREAD)
+#define MINI_FAST_TLS_SET(x,y) x = y
+#define MINI_FAST_TLS_GET(x) x
+#define MINI_FAST_TLS_INIT(x)
+#define MINI_FAST_TLS_DECLARE(x) static __thread gpointer x MONO_TLS_FAST;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#elif (defined(__APPLE__) && defined(__i386__))
+#define MINI_FAST_TLS_SET(x,y) pthread_setspecific(x, y)
+#define MINI_FAST_TLS_GET(x) pthread_getspecific(x)
+#define MINI_FAST_TLS_INIT(x) pthread_key_create(&x, NULL)
+#define MINI_FAST_TLS_DECLARE(x) static pthread_key_t x;
+#define MINI_HAVE_FAST_TLS
+#define MINI_THREAD_VAR_OFFSET(x,y) y = (gint32) x
+#else
+#define MINI_THREAD_VAR_OFFSET(x,y) MONO_THREAD_VAR_OFFSET(x,y)
+#endif
+
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
-#ifdef __native_client_codegen__
-/* Default alignment for Native Client is 32-byte. */
-guint8 nacl_align_byte = 0xe0;
-#endif
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
guint32 mono_jit_tls_id = -1;
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_jit_tls MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_jit_tls);
+#endif
+
+#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
+#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#endif
MonoTraceSpec *mono_jit_trace_calls = NULL;
gboolean mono_dont_free_global_codeman;
+gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#if defined(__default_codegen__)
+ return g_realloc (cfg->native_code, cfg->code_size);
+#elif defined(__native_client_codegen__)
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment );
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ g_assert_not_reached ();
+ return cfg->native_code;
+#endif
+}
+
#ifdef __native_client_codegen__
/* Prevent instructions from straddling a 32-byte alignment boundary. */
* output. Unlike mono_pmip which returns a string, this routine
* prints the value on the standard output.
*/
+#ifdef __GNUC__
+/* Prevent the linker from optimizing this away in embedding setups to help debugging */
+ __attribute__((used))
+#endif
void
mono_print_method_from_ip (void *ip)
{
}
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
+ * which we are generating code, return a pointer to the destination in the
+ * dynamic code segment into which the code will be copied when
+ * mono_global_codeman_commit is called.
+ * LOCKING: Acquires the jit lock.
+ */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ void *dest;
+ mono_jit_lock ();
+ dest = nacl_code_manager_get_code_dest (global_codeman, data);
+ mono_jit_unlock ();
+ return dest;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+ mono_jit_lock ();
+ mono_code_manager_commit (global_codeman, data, size, newsize);
+ mono_jit_unlock ();
+}
+
+/*
+ * Convenience function which calls mono_global_codeman_commit to validate and
+ * copy the code. The caller sets *buf_base and *buf_size to the start and size
+ * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
+ * the byte after the last instruction byte. On return, *buf_base will point to
+ * the start of the copied in the code segment, and *code_end will point after
+ * the end of the copied code.
+ */
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+ guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
+ mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
+ *code_end = tmp + (*code_end - *buf_base);
+ *buf_base = tmp;
+}
+#else
+/* no-op versions of Native Client functions */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ return data;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+}
+
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+}
+
+#endif /* __native_client__ */
+
/**
* mono_create_unwind_op:
*
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
+ if (cfg->compute_gc_maps) {
+ if (type->byref) {
+ mono_mark_vreg_as_mp (cfg, vreg);
+ } else {
+ MonoType *t = mini_type_get_underlying_type (NULL, type);
+ if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || MONO_TYPE_IS_REFERENCE (t)) {
+ inst->flags |= MONO_INST_GC_TRACK;
+ mono_mark_vreg_as_ref (cfg, vreg);
+ }
+ }
+ }
+
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
#endif
+void
+mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_ref_len) {
+ gboolean *tmp = cfg->vreg_is_ref;
+ int size = cfg->vreg_is_ref_len;
+
+ while (vreg >= cfg->vreg_is_ref_len)
+ cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
+ cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
+ if (size)
+ memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_ref [vreg] = TRUE;
+}
+
+void
+mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
+{
+ if (vreg >= cfg->vreg_is_mp_len) {
+ gboolean *tmp = cfg->vreg_is_mp;
+ int size = cfg->vreg_is_mp_len;
+
+ while (vreg >= cfg->vreg_is_mp_len)
+ cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
+ cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
+ if (size)
+ memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
+ }
+ cfg->vreg_is_mp [vreg] = TRUE;
+}
+
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
- if (method->wrapper_type != MONO_WRAPPER_NONE)
+ if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
- *stack_align = sizeof (gpointer);
+ *stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
MonoBasicBlock *bb;
MonoInst *c;
- g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
+ {
+ char *method_name = mono_method_full_name (cfg->method, TRUE);
+ g_print ("IR code for method %s\n", method_name);
+ g_free (method_name);
+ }
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
{
if (ins == NULL) {
ins = bb->code;
+ if (ins)
+ ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
g_hash_table_destroy (cfg->abs_patches);
mono_mempool_destroy (cfg->mempool);
+ mono_debug_free_method (cfg);
+
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
}
-#ifdef HAVE_KW_THREAD
-static __thread gpointer mono_lmf_addr MONO_TLS_FAST;
+#ifdef MINI_HAVE_FAST_TLS
+MINI_FAST_TLS_DECLARE(mono_lmf_addr);
#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
/*
* When this is defined, the current lmf is stored in this tls variable instead of in
* jit_tls->lmf.
*/
-static __thread gpointer mono_lmf MONO_TLS_FAST;
+MINI_FAST_TLS_DECLARE(mono_lmf);
#endif
#endif
gint32
mono_get_jit_tls_offset (void)
{
-#ifdef HAVE_KW_THREAD
+#ifdef MINI_HAVE_FAST_TLS
int offset;
- MONO_THREAD_VAR_OFFSET (mono_jit_tls, offset);
+ MINI_THREAD_VAR_OFFSET (mono_jit_tls, offset);
return offset;
#else
return -1;
gint32
mono_get_lmf_tls_offset (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf,offset);
return offset;
#else
return -1;
mono_get_lmf_addr_tls_offset (void)
{
int offset;
- MONO_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
+ MINI_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
return offset;
}
MonoLMF *
mono_get_lmf (void)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- return mono_lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ return MINI_FAST_TLS_GET (mono_lmf);
#else
MonoJitTlsData *jit_tls;
if ((jit_tls = TlsGetValue (mono_jit_tls_id)))
return jit_tls->lmf;
-
- g_assert_not_reached ();
+ /*
+ * We do not assert here because this function can be called from
+ * mini-gc.c on a thread that has not executed any managed code, yet
+ * (the thread object allocation can trigger a collection).
+ */
return NULL;
#endif
}
MonoLMF **
mono_get_lmf_addr (void)
{
-#ifdef HAVE_KW_THREAD
- return mono_lmf_addr;
+#ifdef MINI_HAVE_FAST_TLS
+ return MINI_FAST_TLS_GET (mono_lmf_addr);
#else
MonoJitTlsData *jit_tls;
void
mono_set_lmf (MonoLMF *lmf)
{
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
- mono_lmf = lmf;
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
#endif
(*mono_get_lmf_addr ()) = lmf;
{
TlsSetValue (mono_jit_tls_id, jit_tls);
-#ifdef HAVE_KW_THREAD
- mono_jit_tls = jit_tls;
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_jit_tls, jit_tls);
#endif
}
static void
mono_set_lmf_addr (gpointer lmf_addr)
{
-#ifdef HAVE_KW_THREAD
- mono_lmf_addr = lmf_addr;
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_SET (mono_lmf_addr, lmf_addr);
#endif
}
*/
domain = mono_get_root_domain ();
-#ifdef HAVE_KW_THREAD
- if (!mono_lmf_addr) {
+#ifdef MINI_HAVE_FAST_TLS
+ if (!MINI_FAST_TLS_GET (mono_lmf_addr)) {
mono_thread_attach (domain);
}
#else
mono_set_jit_tls (jit_tls);
lmf = g_new0 (MonoLMF, 1);
-#ifdef MONO_ARCH_INIT_TOP_LMF_ENTRY
MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf);
-#else
- lmf->ebp = -1;
-#endif
jit_tls->first_lmf = lmf;
-#if defined(HAVE_KW_THREAD) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
+#if defined(MINI_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
/* jit_tls->lmf is unused */
- mono_lmf = lmf;
+ MINI_FAST_TLS_SET (mono_lmf, lmf);
mono_set_lmf_addr (&mono_lmf);
#else
mono_set_lmf_addr (&jit_tls->lmf);
return jit_tls;
}
+static void
+free_jit_tls_data (MonoJitTlsData *jit_tls)
+{
+ mono_arch_free_jit_tls_data (jit_tls);
+ mono_free_altstack (jit_tls);
+
+ g_free (jit_tls->first_lmf);
+ g_free (jit_tls);
+}
+
static void
mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func)
{
mono_debugger_thread_created (tid, thread->root_domain_thread, jit_tls, func);
if (thread)
thread->jit_data = jit_tls;
+
+ mono_arch_cpu_init ();
}
void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL;
thread->jit_data = jit_tls;
if (mono_profiler_get_events () & MONO_PROFILE_STATISTICAL)
mono_runtime_setup_stat_profiler ();
+
+ mono_arch_cpu_init ();
}
static void
if (jit_tls) {
mono_debugger_thread_cleanup (jit_tls);
- mono_arch_free_jit_tls_data (jit_tls);
-
- mono_free_altstack (jit_tls);
- g_free (jit_tls->first_lmf);
- g_free (jit_tls);
- thread->jit_data = NULL;
/* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff
* It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's
mono_set_jit_tls (NULL);
mono_set_lmf_addr (NULL);
}
+
+ free_jit_tls_data (jit_tls);
+
+ thread->jit_data = NULL;
}
}
switch (ji->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
- case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
return (ji->type << 8) | ji->data.token->token;
+ case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
+ return (ji->type << 8) | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0);
case MONO_PATCH_INFO_INTERNAL_METHOD:
return (ji->type << 8) | g_str_hash (ji->data.name);
case MONO_PATCH_INFO_VTABLE:
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* Need to transform to the destination address, it's */
+ /* emitted as an immediate in the code. */
+ target = nacl_inverse_modify_patch_target(ip);
+#else
target = ip;
+#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
+#if defined(__native_client__) && defined(__native_client_codegen__)
+#if defined(TARGET_AMD64)
+ /* This target is an absolute address, not relative to the */
+ /* current code being emitted on AMD64. */
+ target = nacl_inverse_modify_patch_target(target);
+#endif
+#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
gpointer *jump_table;
int i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak, but we don't care if we're */
+ /* not deleting JIT'd methods anyway */
+ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
+#endif
- for (i = 0; i < patch_info->data.table->table_size; i++)
+ for (i = 0; i < patch_info->data.table->table_size; i++) {
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* 'code' is relative to the current code blob, we */
+ /* need to do this transform on it to make the */
+ /* pointers in this table absolute */
+ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#endif
+ }
+
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* jump_table is in the data section, we need to transform */
+ /* it here so when it gets modified in amd64_patch it will */
+ /* then point back to the absolute data address */
+ target = nacl_inverse_modify_patch_target (jump_table);
+#else
target = jump_table;
+#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
g_assert_not_reached ();
#endif
break;
-#ifdef HAVE_SGEN_GC
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
int card_table_shift_bits;
gpointer card_table_mask;
target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
break;
}
-#endif
+ case MONO_PATCH_INFO_CASTCLASS_CACHE: {
+ target = mono_domain_alloc0 (domain, sizeof (gpointer));
+ break;
+ }
default:
g_assert_not_reached ();
}
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak. */
+ /* TODO: can we free this when */
+ /* making the final jump table? */
+ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
+#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
GSList *list;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* When this jump target gets evaluated, the method */
+ /* will be installed in the dynamic code section, */
+ /* not at the location of cfg->native_code. */
+ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
+#endif
mono_domain_lock (domain);
if (!domain_jit_info (domain)->jump_target_hash)
cfg->seq_point_info = info;
// FIXME: dynamic methods
- mono_domain_lock (domain);
- g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
- mono_domain_unlock (domain);
+ if (!cfg->compile_aot) {
+ mono_domain_lock (domain);
+ // FIXME: How can the lookup succeed ?
+ if (!g_hash_table_lookup (domain_jit_info (domain)->seq_points, cfg->method_to_register))
+ g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
+ mono_domain_unlock (domain);
+ }
g_ptr_array_free (cfg->seq_points, TRUE);
cfg->seq_points = NULL;
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
+ MonoDomain *code_domain;
+
+ if (mono_using_xdebug)
+ /*
+ * Recent gdb versions have trouble processing symbol files containing
+ * overlapping address ranges, so allocate all code from the code manager
+ * of the root domain. (#666152).
+ */
+ code_domain = mono_get_root_domain ();
+ else
+ code_domain = cfg->domain;
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ void *code_dest;
+
+ /* This keeps patch targets from being transformed during
+ * ordinary method compilation, for local branches and jumps.
+ */
+ nacl_allow_target_modification (FALSE);
+#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
+ bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
}
}
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ if (mono_using_xdebug)
+ /* See the comment for cfg->code_domain */
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ else
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
} else {
guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
- code = mono_domain_code_reserve (cfg->domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ nacl_allow_target_modification (TRUE);
+#endif
memcpy (code, cfg->native_code, cfg->code_len);
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ g_free (cfg->native_code);
+#elif defined(__native_client_codegen__)
if (cfg->native_code_alloc) {
g_free (cfg->native_code_alloc);
cfg->native_code_alloc = 0;
else if (cfg->native_code) {
g_free (cfg->native_code);
}
-#else
- g_free (cfg->native_code);
-#endif
+#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
-
-#ifdef __native_client_codegen__
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (!cfg->compile_aot) {
+ if (cfg->method->dynamic) {
+ code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
+ } else {
+ code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
+ }
+ }
+#endif
+
+#if defined(__native_client_codegen__)
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
if (cfg->method->dynamic) {
- mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
+ if (mono_using_xdebug)
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ else
+ mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
} else {
- mono_domain_code_commit (cfg->domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ cfg->native_code = code_dest;
+#endif
mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
}
}
+static void
+mono_handle_out_of_line_bblock (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ MONO_ADD_INS (bb, ins);
+ ins->inst_target_bb = bb->next_bb;
+ }
+ }
+}
+
static MonoJitInfo*
create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
{
tblock = cfg->cil_offset_to_bb [ec->try_offset];
g_assert (tblock);
- ei->try_start = cfg->native_code + tblock->native_offset;
g_assert (tblock->native_offset);
+ ei->try_start = cfg->native_code + tblock->native_offset;
+ if (tblock->extend_try_block) {
+ /*
+ * Extend the try block backwards to include parts of the previous call
+ * instruction.
+ */
+ ei->try_start = (guint8*)ei->try_start - MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
+ }
tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (tblock);
if (!tblock->native_offset) {
mono_profiler_method_jit (method);
if (MONO_PROBE_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
-
+
if (compile_aot)
/*
* We might get passed the original generic method definition or
cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
+ char *method_name;
if (COMPILE_LLVM (cfg))
- g_print ("converting llvm method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->generic_sharing_context)
- g_print ("converting shared method %s\n", mono_method_full_name (method_to_compile, TRUE));
+ g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
- g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
+ g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
+ g_free (method_name);
}
if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
*/
mono_liveness_handle_exception_clauses (cfg);
+ mono_handle_out_of_line_bblock (cfg);
+
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
if (!COMPILE_LLVM (cfg))
else
InterlockedIncrement (&mono_jit_stats.methods_without_llvm);
- if (cfg->verbose_level >= 2) {
- char *id = mono_method_full_name (cfg->method, FALSE);
- mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
- g_free (id);
- }
-
cfg->jit_info = create_jit_info (cfg, method_to_compile);
#ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
}
#endif
- mono_save_xdebug_info (cfg);
+ if (!cfg->compile_aot)
+ mono_save_xdebug_info (cfg);
mini_gc_create_gc_map (cfg);
mono_save_seq_point_info (cfg);
+ if (cfg->verbose_level >= 2) {
+ char *id = mono_method_full_name (cfg->method, FALSE);
+ mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
+ g_free (id);
+ }
+
if (!cfg->compile_aot) {
mono_domain_lock (cfg->domain);
mono_jit_info_table_add (cfg->domain, cfg->jit_info);
MonoVTable *vtable;
MonoException *ex = NULL;
guint32 prof_options;
+ GTimer *jit_timer;
+ MonoMethod *prof_method;
#ifdef MONO_USE_AOT_COMPILER
if (opt & MONO_OPT_AOT) {
mono_lookup_pinvoke_call (method, NULL, NULL);
}
nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, FALSE);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ code = mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ jinfo = mono_jit_info_table_find (target_domain, code);
+ if (!jinfo)
+ jinfo = mono_jit_info_table_find (mono_domain_get (), code);
+ if (jinfo)
+ mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ return code;
//if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
//mono_debug_add_wrapper (method, nm);
return NULL;
}
+ jit_timer = g_timer_new ();
+
cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0);
+ prof_method = cfg->method;
+
+ g_timer_stop (jit_timer);
+ mono_jit_stats.jit_time += g_timer_elapsed (jit_timer, NULL);
+ g_timer_destroy (jit_timer);
switch (cfg->exception_type) {
case MONO_EXCEPTION_NONE:
ex = exp;
break;
}
+ case MONO_EXCEPTION_OUT_OF_MEMORY:
+ ex = mono_domain_get ()->out_of_memory_ex;
+ break;
default:
g_assert_not_reached ();
}
/* The profiler doesn't know about wrappers, so pass the original icall method */
mono_profiler_method_end_jit (mono_marshal_method_from_wrapper (method), jinfo, MONO_PROFILE_OK);
}
- } else {
- mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ }
+ mono_profiler_method_end_jit (method, jinfo, MONO_PROFILE_OK);
+ if (prof_method != method) {
+ mono_profiler_method_end_jit (prof_method, jinfo, MONO_PROFILE_OK);
}
}
#if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW)
if (mono_arch_is_int_overflow (ctx, info))
- exc = mono_get_exception_arithmetic ();
+ /*
+ * The spec says this throws ArithmeticException, but MS throws the derived
+ * OverflowException.
+ */
+ exc = mono_get_exception_overflow ();
else
exc = mono_get_exception_divide_by_zero ();
#else
{
MonoJitInfo *ji;
MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ gpointer fault_addr = NULL;
GET_CONTEXT;
#endif
#if !defined(HOST_WIN32) && defined(HAVE_SIG_INFO)
+ fault_addr = info->si_addr;
if (mono_aot_is_pagefault (info->si_addr)) {
mono_aot_handle_pagefault (info->si_addr);
return;
if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr))
return;
- /* The hard-guard page has been hit: there is not much we can do anymore
- * Print a hopefully clear message and abort.
- */
+#ifdef MONO_ARCH_HAVE_SIGCTX_TO_MONOCTX
+ /* info->si_addr seems to be NULL on some kernels when handling stack overflows */
+ fault_addr = info->si_addr;
+ if (fault_addr == NULL) {
+ MonoContext mctx;
+
+ mono_arch_sigctx_to_monoctx (ctx, &mctx);
+
+ fault_addr = MONO_CONTEXT_GET_SP (&mctx);
+ }
+#endif
+
if (jit_tls->stack_size &&
- ABS ((guint8*)info->si_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 32768) {
- const char *method;
- /* we don't do much now, but we can warn the user with a useful message */
- fprintf (stderr, "Stack overflow: IP: %p, fault addr: %p\n", mono_arch_ip_from_context (ctx), (gpointer)info->si_addr);
- if (ji && ji->method)
- method = mono_method_full_name (ji->method, TRUE);
- else
- method = "Unmanaged";
- fprintf (stderr, "At %s\n", method);
- _exit (1);
+ ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) {
+ /*
+ * The hard-guard page has been hit: there is not much we can do anymore
+ * Print a hopefully clear message and abort.
+ */
+ mono_handle_hard_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr);
+ g_assert_not_reached ();
} else {
/* The original handler might not like that it is executed on an altstack... */
if (!ji && mono_chain_signal (SIG_HANDLER_PARAMS))
debug_options.no_gdb_backtrace = TRUE;
else if (!strcmp (arg, "suspend-on-sigsegv"))
debug_options.suspend_on_sigsegv = TRUE;
+ else if (!strcmp (arg, "suspend-on-unhandled"))
+ debug_options.suspend_on_unhandled = TRUE;
else if (!strcmp (arg, "dont-free-domains"))
mono_dont_free_domains = TRUE;
else if (!strcmp (arg, "dyn-runtime-invoke"))
debug_options.better_cast_details = TRUE;
else {
fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg);
- fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n");
+ fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'suspend-on-unhandled', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n");
exit (1);
}
}
mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_WORD, &mono_jit_stats.methods_aot);
mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm);
mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm);
+ mono_counters_register ("Total time spent JITting (sec)", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &mono_jit_stats.jit_time);
}
static void runtime_invoke_info_free (gpointer value);
mini_debugger_init ();
#endif
+#ifdef MINI_HAVE_FAST_TLS
+ MINI_FAST_TLS_INIT (mono_jit_tls);
+ MINI_FAST_TLS_INIT (mono_lmf_addr);
+#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
+ MINI_FAST_TLS_INIT (mono_lmf);
+#endif
+#endif
+
#ifdef MONO_ARCH_HAVE_TLS_GET
mono_runtime_set_has_tls_get (MONO_ARCH_HAVE_TLS_GET);
#else
callbacks.create_ftnptr = mini_create_ftnptr;
callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr;
callbacks.get_runtime_build_info = mono_get_runtime_build_info;
+ callbacks.set_cast_details = mono_set_cast_details;
#ifdef MONO_ARCH_HAVE_IMT
if (mono_use_imt) {
#endif
mono_install_callbacks (&callbacks);
+
+ if (getenv ("MONO_DEBUG") != NULL)
+ mini_parse_debug_options ();
mono_arch_cpu_init ();
mono_unwind_init ();
- mini_gc_init ();
-
- if (getenv ("MONO_DEBUG") != NULL)
- mini_parse_debug_options ();
-
if (getenv ("MONO_XDEBUG")) {
char *xdebug_opts = getenv ("MONO_XDEBUG");
mono_xdebug_init (xdebug_opts);
mono_icall_init ();
+ /* This should come after mono_init () too */
+ mini_gc_init ();
+
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
ves_icall_get_frame_info);
mono_add_internal_call ("System.Diagnostics.StackTrace::get_trace",
register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE);
register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE);
+#endif
/*
* NOTE, NOTE, NOTE, NOTE:
* when adding emulation for some opcodes, remember to also add a dummy
mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
+#if defined(__default_codegen__)
mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE);
+#elif defined(__native_client_codegen__)
+ mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE);
+#endif
#endif
#ifdef MONO_ARCH_SOFT_FLOAT
mono_runtime_cleanup (domain);
#endif
+ free_jit_tls_data (TlsGetValue (mono_jit_tls_id));
+
mono_icall_cleanup ();
mono_runtime_cleanup_handlers ();