#include <mono/metadata/gc-internal.h>
+#include <mono/utils/atomic.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-logger-internal.h>
#include <mono/utils/mono-membar.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/hazard-pointer.h>
#include <mono/utils/mono-tls.h>
+#include <mono/utils/mono-mmap.h>
+#include <mono/utils/mono-threads.h>
#include <mono/metadata/object.h>
#include <mono/metadata/object-internals.h>
#include <mono/metadata/domain-internals.h>
#include <metadata/profiler-private.h>
#include <mono/metadata/coree.h>
-/* #define DEBUG_DOMAIN_UNLOAD */
+//#define DEBUG_DOMAIN_UNLOAD 1
/* we need to use both the Tls* functions and __thread because
* some archs may generate faster jit code with one meachanism
#define GET_APPDOMAIN() ((MonoDomain*)MONO_FAST_TLS_GET(tls_appdomain))
#define SET_APPDOMAIN(x) do { \
+ MonoThreadInfo *info; \
MONO_FAST_TLS_SET (tls_appdomain,x); \
mono_native_tls_set_value (appdomain_thread_id, x); \
mono_gc_set_current_thread_appdomain (x); \
+ info = mono_thread_info_current (); \
+ if (info) \
+ mono_thread_info_tls_set (info, TLS_KEY_DOMAIN, (x)); \
} while (FALSE)
#else /* !MONO_HAVE_FAST_TLS */
#define GET_APPDOMAIN() ((MonoDomain *)mono_native_tls_get_value (appdomain_thread_id))
#define SET_APPDOMAIN(x) do { \
+ MonoThreadInfo *info; \
mono_native_tls_set_value (appdomain_thread_id, x); \
mono_gc_set_current_thread_appdomain (x); \
+ info = mono_thread_info_current (); \
+ if (info) \
+ mono_thread_info_tls_set (info, TLS_KEY_DOMAIN, (x)); \
} while (FALSE)
#endif
{"v4.0.30319","4.5", { {4,0,0,0}, {10,0,0,0}, {4,0,0,0}, {4,0,0,0} } },
{"v4.0.30128","4.0", { {4,0,0,0}, {10,0,0,0}, {4,0,0,0}, {4,0,0,0} } },
{"v4.0.20506","4.0", { {4,0,0,0}, {10,0,0,0}, {4,0,0,0}, {4,0,0,0} } },
+ {"mobile", "2.1", { {2,0,5,0}, {10,0,0,0}, {2,0,5,0}, {2,0,5,0} } },
{"moonlight", "2.1", { {2,0,5,0}, { 9,0,0,0}, {3,5,0,0}, {3,0,0,0} } },
};
*
* If TRY_AOT is FALSE, avoid loading information for missing methods from AOT images, which is currently not async safe.
* In this case, only those AOT methods will be found whose jit info is already loaded.
+ * ASYNC SAFETY: When called in an async context (mono_thread_info_is_async_context ()), this is async safe.
+ * In this case, the returned MonoJitInfo might not have metadata information, in particular,
+ * mono_jit_info_get_method () could fail.
*/
MonoJitInfo*
mono_jit_info_table_find_internal (MonoDomain *domain, char *addr, gboolean try_aot)
MonoMethod*
mono_jit_info_get_method (MonoJitInfo* ji)
{
+ g_assert (!ji->async);
return ji->d.method;
}
}
}
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+#define ALIGN_PTR_TO(ptr,align) (gpointer)((((gssize)(ptr)) + (align - 1)) & (~(align - 1)))
+
+static LockFreeMempool*
+lock_free_mempool_new (void)
+{
+ return g_new0 (LockFreeMempool, 1);
+}
+
+static void
+lock_free_mempool_free (LockFreeMempool *mp)
+{
+ LockFreeMempoolChunk *chunk, *next;
+
+ chunk = mp->chunks;
+ while (chunk) {
+ next = chunk->prev;
+ mono_vfree (chunk, mono_pagesize ());
+ chunk = next;
+ }
+ g_free (mp);
+}
+
+/*
+ * This is async safe
+ */
+static LockFreeMempoolChunk*
+lock_free_mempool_chunk_new (LockFreeMempool *mp, int len)
+{
+ LockFreeMempoolChunk *chunk, *prev;
+ int size;
+
+ size = mono_pagesize ();
+ while (size - sizeof (LockFreeMempoolChunk) < len)
+ size += mono_pagesize ();
+ chunk = mono_valloc (0, size, MONO_MMAP_READ|MONO_MMAP_WRITE);
+ g_assert (chunk);
+ chunk->mem = ALIGN_PTR_TO ((char*)chunk + sizeof (LockFreeMempoolChunk), 16);
+ chunk->size = ((char*)chunk + size) - (char*)chunk->mem;
+ chunk->pos = 0;
+
+ /* Add to list of chunks lock-free */
+ while (TRUE) {
+ prev = mp->chunks;
+ if (InterlockedCompareExchangePointer ((volatile gpointer*)&mp->chunks, chunk, prev) == prev)
+ break;
+ }
+ chunk->prev = prev;
+
+ return chunk;
+}
+
+/*
+ * This is async safe
+ */
+static gpointer
+lock_free_mempool_alloc0 (LockFreeMempool *mp, guint size)
+{
+ LockFreeMempoolChunk *chunk;
+ gpointer res;
+ int oldpos;
+
+ // FIXME: Free the allocator
+
+ size = ALIGN_TO (size, 8);
+ chunk = mp->current;
+ if (!chunk) {
+ chunk = lock_free_mempool_chunk_new (mp, size);
+ mono_memory_barrier ();
+ /* Publish */
+ mp->current = chunk;
+ }
+
+ /* The code below is lock-free, 'chunk' is shared state */
+ oldpos = InterlockedExchangeAdd (&chunk->pos, size);
+ if (oldpos + size > chunk->size) {
+ chunk = lock_free_mempool_chunk_new (mp, size);
+ g_assert (chunk->pos + size <= chunk->size);
+ res = chunk->mem;
+ chunk->pos += size;
+ mono_memory_barrier ();
+ mp->current = chunk;
+ } else {
+ res = (char*)chunk->mem + oldpos;
+ }
+
+ return res;
+}
+
void
mono_install_create_domain_hook (MonoCreateDomainFunc func)
{
domain->mp = mono_mempool_new ();
domain->code_mp = mono_code_manager_new ();
+ domain->lock_free_mp = lock_free_mempool_new ();
domain->env = mono_g_hash_table_new_type ((GHashFunc)mono_string_hash, (GCompareFunc)mono_string_equal, MONO_HASH_KEY_VALUE_GC);
domain->domain_assemblies = NULL;
domain->assembly_bindings = NULL;
mono_reflection_cleanup_domain (domain);
+ /* This must be done before type_hash is freed */
+ if (domain->class_vtable_array) {
+ int i;
+ for (i = 0; i < domain->class_vtable_array->len; ++i)
+ unregister_vtable_reflection_type (g_ptr_array_index (domain->class_vtable_array, i));
+ }
+
if (domain->type_hash) {
mono_g_hash_table_destroy (domain->type_hash);
domain->type_hash = NULL;
domain->type_init_exception_hash = NULL;
}
- if (domain->class_vtable_array) {
- int i;
- for (i = 0; i < domain->class_vtable_array->len; ++i)
- unregister_vtable_reflection_type (g_ptr_array_index (domain->class_vtable_array, i));
- }
-
for (tmp = domain->domain_assemblies; tmp; tmp = tmp->next) {
MonoAssembly *ass = tmp->data;
mono_assembly_release_gc_roots (ass);
mono_code_manager_destroy (domain->code_mp);
domain->code_mp = NULL;
#endif
+ lock_free_mempool_free (domain->lock_free_mp);
+ domain->lock_free_mp = NULL;
g_hash_table_destroy (domain->finalizable_objects_hash);
domain->finalizable_objects_hash = NULL;
return res;
}
+gpointer
+mono_domain_alloc0_lock_free (MonoDomain *domain, guint size)
+{
+ return lock_free_mempool_alloc0 (domain->lock_free_mp, size);
+}
+
/*
* mono_domain_code_reserve:
*
if (next >= size) {
/* 'data' is allocated by alloc_fixed */
gpointer *new_array = mono_gc_alloc_fixed (sizeof (gpointer) * (size * 2), MONO_GC_ROOT_DESCR_FOR_FIXED (size * 2));
- mono_gc_memmove (new_array, domain->static_data_array, sizeof (gpointer) * size);
+ mono_gc_memmove_aligned (new_array, domain->static_data_array, sizeof (gpointer) * size);
size *= 2;
new_array [1] = GINT_TO_POINTER (size);
mono_gc_free_fixed (domain->static_data_array);