-/*
- * sgen-mono.c: SGen features specific to Mono.
+/**
+ * \file
+ * SGen features specific to Mono.
*
* Copyright (C) 2014 Xamarin Inc
*
#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
+#include "sgen/sgen-workers.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
#include "utils/mono-memory-model.h"
#include "utils/mono-logger-internals.h"
#include "utils/mono-threads-coop.h"
-#include "sgen/sgen-thread-pool.h"
+#include "utils/mono-threads.h"
+#include "metadata/w32handle.h"
#ifdef HEAVY_STATISTICS
static guint64 stat_wbarrier_set_arrayref = 0;
/* Functions supplied by the runtime to be called by the GC */
static MonoGCCallbacks gc_callbacks;
-#ifdef HAVE_KW_THREAD
-__thread SgenThreadInfo *sgen_thread_info;
-#else
-MonoNativeTlsKey thread_info_key;
-#endif
-
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
/**
* mono_gc_wbarrier_object_copy:
*
- * Write barrier to call when obj is the result of a clone or copy of an object.
+ * Write barrier to call when \p obj is the result of a clone or copy of an object.
*/
void
mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
sgen_get_remset ()->wbarrier_object_copy (obj, src);
}
+/**
+ * mono_gc_wbarrier_set_arrayref:
+ */
void
mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
{
sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
}
+/**
+ * mono_gc_wbarrier_set_field:
+ */
void
mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
{
sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
}
+int
+mono_gc_get_suspend_signal (void)
+{
+ return mono_threads_suspend_get_suspend_signal ();
+}
+
+int
+mono_gc_get_restart_signal (void)
+{
+ return mono_threads_suspend_get_restart_signal ();
+}
+
static MonoMethod *write_barrier_conc_method;
static MonoMethod *write_barrier_noconc_method;
return sgen_has_managed_allocator ();
}
+static gboolean
+ip_in_critical_region (MonoDomain *domain, gpointer ip)
+{
+ MonoJitInfo *ji;
+ MonoMethod *method;
+
+ /*
+ * We pass false for 'try_aot' so this becomes async safe.
+ * It won't find aot methods whose jit info is not yet loaded,
+ * so we preload their jit info in the JIT.
+ */
+ ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
+ if (!ji)
+ return FALSE;
+
+ method = mono_jit_info_get_method (ji);
+
+ return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
+}
+
+gboolean
+mono_gc_is_critical_method (MonoMethod *method)
+{
+ return sgen_is_critical_method (method);
+}
+
#ifndef DISABLE_JIT
static void
mono_gc_run_finalize (obj, NULL);
}
+/**
+ * mono_gc_invoke_finalizers:
+ */
int
mono_gc_invoke_finalizers (void)
{
return sgen_gc_invoke_finalizers ();
}
+/**
+ * mono_gc_pending_finalizers:
+ */
MonoBoolean
mono_gc_pending_finalizers (void)
{
/**
* mono_gc_finalizers_for_domain:
- * @domain: the unloading appdomain
- * @out_array: output array
- * @out_size: size of output array
- *
- * Enqueue for finalization all objects that belong to the unloading appdomain @domain
- * @suspend is used for early termination of the enqueuing process.
+ * \param domain the unloading appdomain
+ * \param out_array output array
+ * \param out_size size of output array
+ * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
+ * \p suspend is used for early termination of the enqueuing process.
*/
void
mono_gc_finalize_domain (MonoDomain *domain)
* Appdomain handling
*/
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
- SgenThreadInfo *info = mono_thread_info_current ();
-
- /* Could be called from sgen_thread_unregister () with a NULL info */
- if (domain) {
- g_assert (info);
- info->client_info.stopped_domain = domain;
- }
-}
-
static gboolean
need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
{
* Allocation
*/
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
void*
mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
return obj;
}
+/**
+ * mono_gc_alloc_fixed:
+ */
void*
mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
{
return res;
}
+/**
+ * mono_gc_free_fixed:
+ */
void
mono_gc_free_fixed (void* addr)
{
static gboolean use_managed_allocator = TRUE;
#ifdef MANAGED_ALLOCATION
-
-#if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
-
// Cache the SgenThreadInfo pointer in a local 'var'.
#define EMIT_TLS_ACCESS_VAR(mb, var) \
do { \
mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
-#else
-#define EMIT_TLS_ACCESS_VAR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-
-#endif
-
/* FIXME: Do this in the JIT, where specialized allocation sequences can be created
* for each class. This is currently not easy to do, as it is hard to generate basic
* blocks + branches, but it is easy with the linear IL codebase.
static MonoMethod*
create_allocator (int atype, ManagedAllocatorVariant variant)
{
- int p_var, size_var, thread_var G_GNUC_UNUSED;
+ int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
- guint32 slowpath_branch, max_size_branch;
+ guint32 fastpath_branch, max_size_branch, no_oom_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
MonoMethodSignature *csig;
mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
#endif
+ if (nursery_canaries_enabled ()) {
+ real_size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_stloc(mb, real_size_var);
+ }
+ else
+ real_size_var = size_var;
+
/* size += ALLOC_ALIGN - 1; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_byte (mb, CEE_CONV_I);
mono_mb_emit_byte (mb, CEE_ADD);
+
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, CEE_ADD);
+ }
mono_mb_emit_stloc (mb, new_next_var);
/* if (G_LIKELY (new_next < tlab_temp_end)) */
mono_mb_emit_ldloc (mb, new_next_var);
EMIT_TLS_ACCESS_TEMP_END (mb, thread_var);
- slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
+ fastpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
/* Slowpath */
if (atype != ATYPE_SMALL)
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
mono_mb_emit_icall (mb, mono_gc_alloc_obj);
} else if (atype == ATYPE_VECTOR) {
} else {
g_assert_not_reached ();
}
+
+ /* if (ret == NULL) throw OOM; */
+ mono_mb_emit_byte (mb, CEE_DUP);
+ no_oom_branch = mono_mb_emit_branch (mb, CEE_BRTRUE);
+ mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
+
+ mono_mb_patch_branch (mb, no_oom_branch);
mono_mb_emit_byte (mb, CEE_RET);
/* Fastpath */
- mono_mb_patch_short_branch (mb, slowpath_branch);
+ mono_mb_patch_short_branch (mb, fastpath_branch);
/* FIXME: Memory barrier */
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_byte (mb, CEE_STIND_I);
+ /* mark object end with nursery word */
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_ldloc (mb, p_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
+ mono_mb_emit_byte (mb, MONO_CEE_ADD);
+ mono_mb_emit_icon8 (mb, (mword) CANARY_STRING);
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, MONO_CEE_PREFIX1);
+ mono_mb_emit_byte (mb, CEE_CPBLK);
+ }
+
if (atype == ATYPE_VECTOR) {
/* arr->max_length = max_length; */
mono_mb_emit_ldloc (mb, p_var);
#ifdef MANAGED_ALLOCATION
if (collect_before_allocs)
return NULL;
- if (!mono_runtime_has_tls_get ())
- return NULL;
if (klass->instance_size > tlab_size)
return NULL;
if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
#ifdef MANAGED_ALLOCATION
if (klass->rank != 1)
return NULL;
- if (!mono_runtime_has_tls_get ())
- return NULL;
if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
return NULL;
if (has_per_allocation_action)
if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
return NULL;
- if (variant == MANAGED_ALLOCATOR_REGULAR && !mono_runtime_has_tls_get ())
- return NULL;
-
switch (variant) {
case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
return FALSE;
}
-/*
- * Cardtable scanning
- */
-
-#define MWORD_MASK (sizeof (mword) - 1)
-
-static inline int
-find_card_offset (mword card)
-{
-/*XXX Use assembly as this generates some pretty bad code */
-#if defined(__i386__) && defined(__GNUC__)
- return (__builtin_ffs (card) - 1) / 8;
-#elif defined(__x86_64__) && defined(__GNUC__)
- return (__builtin_ffsll (card) - 1) / 8;
-#elif defined(__s390x__)
- return (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
-#else
- int i;
- guint8 *ptr = (guint8 *) &card;
- for (i = 0; i < sizeof (mword); ++i) {
- if (ptr[i])
- return i;
- }
- return 0;
-#endif
-}
-
-static guint8*
-find_next_card (guint8 *card_data, guint8 *end)
-{
- mword *cards, *cards_end;
- mword card;
-
- while ((((mword)card_data) & MWORD_MASK) && card_data < end) {
- if (*card_data)
- return card_data;
- ++card_data;
- }
-
- if (card_data == end)
- return end;
-
- cards = (mword*)card_data;
- cards_end = (mword*)((mword)end & ~MWORD_MASK);
- while (cards < cards_end) {
- card = *cards;
- if (card)
- return (guint8*)cards + find_card_offset (card);
- ++cards;
- }
-
- card_data = (guint8*)cards_end;
- while (card_data < end) {
- if (*card_data)
- return card_data;
- ++card_data;
- }
-
- return end;
-}
-
#define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
gboolean
-sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, ScanCopyContext ctx)
+sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx)
{
MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
MonoClass *klass = vt->klass;
LOOP_HEAD:
#endif
- card_data = find_next_card (card_data, card_data_end);
- for (; card_data < card_data_end; card_data = find_next_card (card_data + 1, card_data_end)) {
+ card_data = sgen_find_next_card (card_data, card_data_end);
+ for (; card_data < card_data_end; card_data = sgen_find_next_card (card_data + 1, card_data_end)) {
size_t index;
size_t idx = (card_data - card_base) + extra_idx;
char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&str->object);
return str;
}
break;
}
+ case ROOT_DESC_VECTOR: {
+ void **p;
+
+ for (p = start_root; p < end_root; p++) {
+ if (*p)
+ add_profile_gc_root (report, *p, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ }
+ break;
+ }
case ROOT_DESC_USER: {
MonoGCRootMarkFunc marker = (MonoGCRootMarkFunc)sgen_get_user_descriptor_func (desc);
root_report = report;
static void *moved_objects [MOVED_OBJECTS_NUM];
static int moved_objects_idx = 0;
+static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
+
void
mono_sgen_register_moved_object (void *obj, void *destination)
{
- g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
+ /*
+ * This function can be called from SGen's worker threads. We want to try
+ * and avoid exposing those threads to the profiler API, so queue up move
+ * events and send them later when the main GC thread calls
+ * mono_sgen_gc_event_moves ().
+ *
+ * TODO: Once SGen has multiple worker threads, we need to switch to a
+ * lock-free data structure for the queue as multiple threads will be
+ * adding to it at the same time.
+ */
+ if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
+ sgen_pointer_queue_add (&moved_objects_queue, obj);
+ sgen_pointer_queue_add (&moved_objects_queue, destination);
+ } else {
+ if (moved_objects_idx == MOVED_OBJECTS_NUM) {
+ mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ moved_objects_idx = 0;
+ }
- if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
- moved_objects_idx = 0;
+ moved_objects [moved_objects_idx++] = obj;
+ moved_objects [moved_objects_idx++] = destination;
}
- moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = destination;
}
void
mono_sgen_gc_event_moves (void)
{
+ while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
+ void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
+ void *src = sgen_pointer_queue_pop (&moved_objects_queue);
+
+ mono_sgen_register_moved_object (src, dst);
+ }
+
if (moved_objects_idx) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
/**
* mono_gc_walk_heap:
- * @flags: flags for future use
- * @callback: a function pointer called for each object in the heap
- * @data: a user data pointer that is passed to callback
- *
- * This function can be used to iterate over all the live objects in the heap:
- * for each object, @callback is invoked, providing info about the object's
+ * \param flags flags for future use
+ * \param callback a function pointer called for each object in the heap
+ * \param data a user data pointer that is passed to callback
+ * This function can be used to iterate over all the live objects in the heap;
+ * for each object, \p callback is invoked, providing info about the object's
* location in memory, its class, its size and the objects it references.
- * For each referenced object it's offset from the object address is
+ * For each referenced object its offset from the object address is
* reported in the offsets array.
* The object references may be buffered, so the callback may be invoked
* multiple times for the same object: in all but the first call, the size
* argument will be zero.
- * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
+ * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
* profiler event handler.
- *
- * Returns: a non-zero value if the GC doesn't support heap walking
+ * \returns a non-zero value if the GC doesn't support heap walking
*/
int
mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
size_t stsize = 0;
guint8 *staddr = NULL;
-#ifndef HAVE_KW_THREAD
- g_assert (!mono_native_tls_get_value (thread_info_key));
- mono_native_tls_set_value (thread_info_key, info);
-#else
- sgen_thread_info = info;
-#endif
+ mono_tls_set_sgen_thread_info (info);
info->client_info.skip = 0;
- info->client_info.stopped_ip = NULL;
- info->client_info.stopped_domain = NULL;
info->client_info.stack_start = NULL;
info->client_info.signal = 0;
#endif
- /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
mono_thread_info_get_stack_bounds (&staddr, &stsize);
if (staddr) {
-#ifndef HOST_WIN32
info->client_info.stack_start_limit = staddr;
-#endif
info->client_info.stack_end = staddr + stsize;
} else {
gsize stack_bottom = (gsize)stack_bottom_fallback;
{
MonoNativeThreadId tid;
-#ifndef HAVE_KW_THREAD
- mono_native_tls_set_value (thread_info_key, NULL);
-#else
- sgen_thread_info = NULL;
-#endif
+ mono_tls_set_sgen_thread_info (NULL);
tid = mono_thread_info_get_tid (p);
LOCK_GC;
info->client_info.gc_disabled = skip;
UNLOCK_GC;
-}
-static gboolean
-is_critical_method (MonoMethod *method)
-{
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
+ if (skip) {
+ /* If we skip scanning a thread with a non-empty handle stack, we may move an
+ * object but fail to update the reference in the handle.
+ */
+ HandleStack *stack = info->client_info.info.handle_stack;
+ g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
+ }
}
static gboolean
mono_thread_detach_internal (mono_thread_internal_current ());
}
+/**
+ * mono_gc_register_thread:
+ */
gboolean
mono_gc_register_thread (void *baseptr)
{
return mono_thread_info_attach (baseptr) != NULL;
}
+/**
+ * mono_gc_is_gc_thread:
+ */
gboolean
mono_gc_is_gc_thread (void)
{
return obj;
}
+typedef struct {
+ void **start_nursery;
+ void **end_nursery;
+} PinHandleStackInteriorPtrData;
+
+/* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
+ middle of an object.
+ */
+static void
+pin_handle_stack_interior_ptrs (void **ptr_slot, void *user_data)
+{
+ PinHandleStackInteriorPtrData *ud = (PinHandleStackInteriorPtrData *)user_data;
+ sgen_conservatively_pin_objects_from (ptr_slot, ptr_slot+1, ud->start_nursery, ud->end_nursery, PIN_TYPE_STACK);
+}
+
+
/*
* Mark from thread stacks and registers.
*/
binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.stack_end, skip_reason);
- if (skip_reason)
+ if (skip_reason) {
+ if (precise) {
+ /* If we skip a thread with a non-empty handle stack and then it
+ * resumes running we may potentially move an object but fail to
+ * update the reference in the handle.
+ */
+ HandleStack *stack = info->client_info.info.handle_stack;
+ g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
+ }
continue;
+ }
g_assert (info->client_info.stack_start);
g_assert (info->client_info.stack_end);
}
}
}
- if (precise && info->client_info.info.handle_stack) {
- mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue);
+ if (info->client_info.info.handle_stack) {
+ /*
+ Make two passes over the handle stack. On the imprecise pass, pin all
+ objects where the handle points into the interior of the object. On the
+ precise pass, copy or mark all the objects that have handles to the
+ beginning of the object.
+ */
+ if (precise)
+ mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue, precise);
+ else {
+ PinHandleStackInteriorPtrData ud = { .start_nursery = start_nursery,
+ .end_nursery = end_nursery,
+ };
+ mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, pin_handle_stack_interior_ptrs, &ud, precise);
+ }
}
} FOREACH_THREAD_END
}
int
mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
{
- return pthread_create (new_thread, attr, start_routine, arg);
+ int res;
+
+ mono_threads_join_lock ();
+ res = pthread_create (new_thread, attr, start_routine, arg);
+ mono_threads_join_unlock ();
+
+ return res;
}
#endif
return 1;
}
-void
-mono_gc_enable_events (void)
-{
-}
-
const char *
mono_gc_get_gc_name (void)
{
/**
* mono_gchandle_is_in_domain:
- * @gchandle: a GCHandle's handle.
- * @domain: An application domain.
- *
- * Returns: TRUE if the object wrapped by the @gchandle belongs to the specific @domain.
+ * \param gchandle a GCHandle's handle.
+ * \param domain An application domain.
+ * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
*/
gboolean
mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
/**
* mono_gchandle_free_domain:
- * @unloading: domain that is unloading
+ * \param unloading domain that is unloading
*
* Function used internally to cleanup any GC handle for objects belonging
* to the specified domain during appdomain unload.
{
switch (type) {
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
+ case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
default:
return NULL;
}
cb.thread_detach = sgen_thread_detach;
cb.thread_unregister = sgen_thread_unregister;
cb.thread_attach = sgen_thread_attach;
- cb.mono_method_is_critical = (gboolean (*)(void *))is_critical_method;
cb.mono_thread_in_critical_region = thread_in_critical_region;
+ cb.ip_in_critical_region = ip_in_critical_region;
mono_threads_init (&cb, sizeof (SgenThreadInfo));
mono_sgen_init_stw ();
-#ifndef HAVE_KW_THREAD
- mono_native_tls_alloc (&thread_info_key, NULL);
-#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
- /*
- * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
- * where the two are the same.
- */
- mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
-#endif
-#else
- {
- int tls_offset = -1;
- MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
- mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
- }
-#endif
-
- /*
- * This needs to happen before any internal allocations because
- * it inits the small id which is required for hazard pointer
- * operations.
- */
- sgen_os_init ();
+ mono_tls_init_gc_keys ();
mono_gc_register_thread (&dummy);
}
static gboolean gc_inited;
+/**
+ * mono_gc_base_init:
+ */
void
mono_gc_base_init (void)
{
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
sgen_gc_init ();
- if (nursery_canaries_enabled ())
- sgen_set_use_managed_allocator (FALSE);
-
-#if defined(HAVE_KW_THREAD)
- /* This can happen with using libmonosgen.so */
- if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO) == -1)
- sgen_set_use_managed_allocator (FALSE);
-#endif
-
gc_inited = TRUE;
}
void
mono_gc_base_cleanup (void)
{
- sgen_thread_pool_shutdown ();
+ sgen_thread_pool_shutdown (major_collector.get_sweep_pool ());
+
+ sgen_workers_shutdown ();
+
+ // We should have consumed any outstanding moves.
+ g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
}
gboolean