-/*
- * sgen-mono.c: SGen features specific to Mono.
+/**
+ * \file
+ * SGen features specific to Mono.
*
* Copyright (C) 2014 Xamarin Inc
*
#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
-#include "sgen/sgen-thread-pool.h"
+#include "sgen/sgen-workers.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
gpointer stack_start = &stack_start;
SgenThreadInfo *info = mono_thread_info_current ();
- if (ptr >= stack_start && ptr < (gpointer)info->client_info.stack_end)
+ if (ptr >= stack_start && ptr < (gpointer)info->client_info.info.stack_end)
return TRUE;
return FALSE;
}
/**
* mono_gc_wbarrier_object_copy:
*
- * Write barrier to call when obj is the result of a clone or copy of an object.
+ * Write barrier to call when \p obj is the result of a clone or copy of an object.
*/
void
mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
sgen_get_remset ()->wbarrier_object_copy (obj, src);
}
+/**
+ * mono_gc_wbarrier_set_arrayref:
+ */
void
mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
{
sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
}
+/**
+ * mono_gc_wbarrier_set_field:
+ */
void
mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
{
}
void
-mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
+mono_gc_wbarrier_range_copy (gpointer _dest, gpointer _src, int size)
{
- sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
+ sgen_wbarrier_range_copy (_dest, _src, size);
+}
+
+void*
+mono_gc_get_range_copy_func (void)
+{
+ return sgen_get_remset ()->wbarrier_range_copy;
}
int
return sgen_has_managed_allocator ();
}
-static gboolean
-ip_in_critical_region (MonoDomain *domain, gpointer ip)
-{
- MonoJitInfo *ji;
- MonoMethod *method;
-
- /*
- * We pass false for 'try_aot' so this becomes async safe.
- * It won't find aot methods whose jit info is not yet loaded,
- * so we preload their jit info in the JIT.
- */
- ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
- if (!ji)
- return FALSE;
-
- method = mono_jit_info_get_method (ji);
-
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
-}
-
gboolean
mono_gc_is_critical_method (MonoMethod *method)
{
return sgen_is_critical_method (method);
}
-#ifndef DISABLE_JIT
+#ifdef ENABLE_ILGEN
static void
emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels, gboolean is_concurrent)
else
mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
-#ifndef DISABLE_JIT
+#ifdef ENABLE_ILGEN
#ifdef MANAGED_WBARRIER
emit_nursery_check (mb, nursery_check_labels, is_concurrent);
/*
mono_gc_run_finalize (obj, NULL);
}
+/**
+ * mono_gc_invoke_finalizers:
+ */
int
mono_gc_invoke_finalizers (void)
{
return sgen_gc_invoke_finalizers ();
}
+/**
+ * mono_gc_pending_finalizers:
+ */
MonoBoolean
mono_gc_pending_finalizers (void)
{
/**
* mono_gc_finalizers_for_domain:
- * @domain: the unloading appdomain
- * @out_array: output array
- * @out_size: size of output array
- *
- * Enqueue for finalization all objects that belong to the unloading appdomain @domain
- * @suspend is used for early termination of the enqueuing process.
+ * \param domain the unloading appdomain
+ * \param out_array output array
+ * \param out_size size of output array
+ * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
+ * \p suspend is used for early termination of the enqueuing process.
*/
void
mono_gc_finalize_domain (MonoDomain *domain)
sgen_clear_nursery_fragments ();
+ FOREACH_THREAD (info) {
+ mono_handle_stack_free_domain ((HandleStack*)info->client_info.info.handle_stack, domain);
+ } FOREACH_THREAD_END
+
if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
- if (obj)
- mono_profiler_allocation (obj);
- }
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
+ MONO_PROFILER_RAISE (gc_allocation, (obj));
return obj;
}
+/**
+ * mono_gc_alloc_fixed:
+ */
void*
mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
{
return res;
}
+/**
+ * mono_gc_free_fixed:
+ */
void
mono_gc_free_fixed (void* addr)
{
static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
+static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
static gboolean use_managed_allocator = TRUE;
#ifdef MANAGED_ALLOCATION
{
int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
+ gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
guint32 fastpath_branch, max_size_branch, no_oom_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
+ mono_register_jit_icall (mono_profiler_raise_gc_allocation, "mono_profiler_raise_gc_allocation", mono_create_icall_signature ("void object"), FALSE);
registered = TRUE;
}
if (atype == ATYPE_SMALL) {
- name = slowpath ? "SlowAllocSmall" : "AllocSmall";
+ name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
} else if (atype == ATYPE_NORMAL) {
- name = slowpath ? "SlowAlloc" : "Alloc";
+ name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
} else if (atype == ATYPE_VECTOR) {
- name = slowpath ? "SlowAllocVector" : "AllocVector";
+ name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
} else if (atype == ATYPE_STRING) {
- name = slowpath ? "SlowAllocString" : "AllocString";
+ name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
} else {
g_assert_not_reached ();
}
mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
-#ifndef DISABLE_JIT
+#ifdef ENABLE_ILGEN
if (slowpath) {
switch (atype) {
case ATYPE_NORMAL:
mono_mb_emit_ldloc (mb, p_var);
done:
+
+ /*
+ * It's important that we do this outside of the critical region as we
+ * will be invoking arbitrary code.
+ */
+ if (profiler) {
+ /*
+ * if (G_UNLIKELY (*&mono_profiler_state.gc_allocation_count)) {
+ * mono_profiler_raise_gc_allocation (p);
+ * }
+ */
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT);
+ mono_mb_emit_byte (mb, CEE_LDIND_U4);
+
+ int prof_br = mono_mb_emit_short_branch (mb, CEE_BRFALSE_S);
+
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ mono_mb_emit_byte (mb, CEE_DUP);
+ mono_mb_emit_icall (mb, mono_profiler_raise_gc_allocation);
+
+ mono_mb_patch_short_branch (mb, prof_br);
+ }
+
mono_mb_emit_byte (mb, CEE_RET);
#endif
info->d.alloc.gc_name = "sgen";
info->d.alloc.alloc_type = atype;
-#ifndef DISABLE_JIT
+#ifdef ENABLE_ILGEN
mb->init_locals = FALSE;
#endif
mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
{
#ifdef MANAGED_ALLOCATION
+ ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
+
if (collect_before_allocs)
return NULL;
if (klass->instance_size > tlab_size)
return NULL;
if (klass->rank)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
#else
return NULL;
#endif
#ifdef MANAGED_ALLOCATION
if (klass->rank != 1)
return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
if (has_per_allocation_action)
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
+ MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
MonoMethod *res;
MonoMethod **cache;
- if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
+ if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
return NULL;
switch (variant) {
case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
default: g_assert_not_reached (); break;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i])
+ if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
int i;
for (i = 0; i < ATYPE_NUM; ++i)
- if (alloc_method_cache [i] || slowpath_alloc_method_cache [i])
+ if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
return TRUE;
return FALSE;
}
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&arr->obj);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
return arr;
UNLOCK_GC;
done:
- if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
- mono_profiler_allocation (&str->object);
+ if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
+ MONO_PROFILER_RAISE (gc_allocation, (&str->object));
return str;
}
{
if (!report->count)
return;
- mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
+ MONO_PROFILER_RAISE (gc_roots, ((MonoObject **) report->objects, (MonoProfilerGCRootType *) report->root_types, report->extra_info, report->count));
report->count = 0;
}
void
sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
+ if (MONO_PROFILER_ENABLED (gc_roots)) {
GCRootReport report;
int idx;
report.count = 0;
for (idx = 0; idx < count; ++idx)
- add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
notify_gc_roots (&report);
}
}
void *obj = queue->data [i];
if (!obj)
continue;
- add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
+ add_profile_gc_root (&report, obj, MONO_PROFILER_GC_ROOT_FINALIZER, 0);
}
notify_gc_roots (&report);
}
single_arg_report_root (MonoObject **obj, void *gc_data)
{
if (*obj)
- add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (root_report, *obj, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
static void
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *start_root, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
desc >>= 1;
start_root++;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *objptr, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
bmap >>= 1;
++objptr;
for (p = start_root; p < end_root; p++) {
if (*p)
- add_profile_gc_root (report, *p, MONO_PROFILE_GC_ROOT_OTHER, 0);
+ add_profile_gc_root (report, *p, MONO_PROFILER_GC_ROOT_OTHER, 0);
}
break;
}
void
sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
void
sgen_client_collecting_major_1 (void)
{
- profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ profile_roots = MONO_PROFILER_ENABLED (gc_roots);
memset (&major_root_report, 0, sizeof (GCRootReport));
}
sgen_client_pinned_los_object (GCObject *obj)
{
if (profile_roots)
- add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILER_GC_ROOT_PINNING | MONO_PROFILER_GC_ROOT_MISC, 0);
}
void
if (profile_roots)
notify_gc_roots (&major_root_report);
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_registered_roots ();
}
void
sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
{
- if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
+ if (MONO_PROFILER_ENABLED (gc_roots))
report_finalizer_roots (fin_ready_queue, critical_fin_queue);
}
* lock-free data structure for the queue as multiple threads will be
* adding to it at the same time.
*/
- if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
+ if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
sgen_pointer_queue_add (&moved_objects_queue, obj);
sgen_pointer_queue_add (&moved_objects_queue, destination);
} else {
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
if (moved_objects_idx) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
moved_objects_idx = 0;
}
}
/**
* mono_gc_walk_heap:
- * @flags: flags for future use
- * @callback: a function pointer called for each object in the heap
- * @data: a user data pointer that is passed to callback
- *
- * This function can be used to iterate over all the live objects in the heap:
- * for each object, @callback is invoked, providing info about the object's
+ * \param flags flags for future use
+ * \param callback a function pointer called for each object in the heap
+ * \param data a user data pointer that is passed to callback
+ * This function can be used to iterate over all the live objects in the heap;
+ * for each object, \p callback is invoked, providing info about the object's
* location in memory, its class, its size and the objects it references.
- * For each referenced object it's offset from the object address is
+ * For each referenced object its offset from the object address is
* reported in the offsets array.
* The object references may be buffered, so the callback may be invoked
* multiple times for the same object: in all but the first call, the size
* argument will be zero.
- * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
+ * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
* profiler event handler.
- *
- * Returns: a non-zero value if the GC doesn't support heap walking
+ * \returns a non-zero value if the GC doesn't support heap walking
*/
int
mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
return &gc_callbacks;
}
-void
-sgen_client_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback)
+gpointer
+mono_gc_thread_attach (SgenThreadInfo *info)
{
- size_t stsize = 0;
- guint8 *staddr = NULL;
+ return sgen_thread_attach (info);
+}
+void
+sgen_client_thread_attach (SgenThreadInfo* info)
+{
mono_tls_set_sgen_thread_info (info);
info->client_info.skip = 0;
info->client_info.signal = 0;
#endif
- mono_thread_info_get_stack_bounds (&staddr, &stsize);
- if (staddr) {
- info->client_info.stack_start_limit = staddr;
- info->client_info.stack_end = staddr + stsize;
- } else {
- gsize stack_bottom = (gsize)stack_bottom_fallback;
- stack_bottom += 4095;
- stack_bottom &= ~4095;
- info->client_info.stack_end = (char*)stack_bottom;
- }
-
memset (&info->client_info.ctx, 0, sizeof (MonoContext));
if (mono_gc_get_gc_callbacks ()->thread_attach_func)
binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
- SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.stack_end);
+ SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.info.stack_end);
info->client_info.info.handle_stack = mono_handle_stack_alloc ();
}
void
-sgen_client_thread_unregister (SgenThreadInfo *p)
+mono_gc_thread_detach_with_lock (SgenThreadInfo *info)
+{
+ return sgen_thread_detach_with_lock (info);
+}
+
+void
+sgen_client_thread_detach_with_lock (SgenThreadInfo *p)
{
MonoNativeThreadId tid;
}
}
-static gboolean
-thread_in_critical_region (SgenThreadInfo *info)
-{
- return info->client_info.in_critical_region;
-}
-
-static void
-sgen_thread_attach (SgenThreadInfo *info)
-{
- if (mono_gc_get_gc_callbacks ()->thread_attach_func && !info->client_info.runtime_data)
- info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
-}
-
-static void
-sgen_thread_detach (SgenThreadInfo *p)
-{
- /* If a delegate is passed to native code and invoked on a thread we dont
- * know about, marshal will register it with mono_threads_attach_coop, but
- * we have no way of knowing when that thread goes away. SGen has a TSD
- * so we assume that if the domain is still registered, we can detach
- * the thread
- */
- if (mono_thread_internal_current_is_attached ())
- mono_thread_detach_internal (mono_thread_internal_current ());
-}
-
gboolean
-mono_gc_register_thread (void *baseptr)
+mono_gc_thread_in_critical_region (SgenThreadInfo *info)
{
- return mono_thread_info_attach (baseptr) != NULL;
+ return info->client_info.in_critical_region;
}
+/**
+ * mono_gc_is_gc_thread:
+ */
gboolean
mono_gc_is_gc_thread (void)
{
return obj;
}
+typedef struct {
+ void **start_nursery;
+ void **end_nursery;
+} PinHandleStackInteriorPtrData;
+
+/* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
+ middle of an object.
+ */
+static void
+pin_handle_stack_interior_ptrs (void **ptr_slot, void *user_data)
+{
+ PinHandleStackInteriorPtrData *ud = (PinHandleStackInteriorPtrData *)user_data;
+ sgen_conservatively_pin_objects_from (ptr_slot, ptr_slot+1, ud->start_nursery, ud->end_nursery, PIN_TYPE_STACK);
+}
+
+
/*
* Mark from thread stacks and registers.
*/
void *aligned_stack_start;
if (info->client_info.skip) {
- SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
+ SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start);
skip_reason = 1;
} else if (info->client_info.gc_disabled) {
- SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
+ SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start);
skip_reason = 2;
} else if (!mono_thread_info_is_live (info)) {
- SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
+ SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
skip_reason = 3;
} else if (!info->client_info.stack_start) {
SGEN_LOG (3, "Skipping starting or detaching thread %p", info);
skip_reason = 4;
}
- binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.stack_end, skip_reason);
+ binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.info.stack_end, skip_reason);
if (skip_reason) {
if (precise) {
}
g_assert (info->client_info.stack_start);
- g_assert (info->client_info.stack_end);
+ g_assert (info->client_info.info.stack_end);
aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
#ifdef HOST_WIN32
#endif
g_assert (info->client_info.suspend_done);
- SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
+ SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
- mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, (guint8 *)aligned_stack_start, (guint8 *)info->client_info.stack_end, precise, &ctx);
+ mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, (guint8 *)aligned_stack_start, (guint8 *)info->client_info.info.stack_end, precise, &ctx);
} else if (!precise) {
if (!conservative_stack_mark) {
fprintf (stderr, "Precise stack mark not supported - disabling.\n");
conservative_stack_mark = TRUE;
}
//FIXME we should eventually use the new stack_mark from coop
- sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
+ sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
}
if (!precise) {
{
// This is used on Coop GC for platforms where we cannot get the data for individual registers.
// We force a spill of all registers into the stack and pass a chunk of data into sgen.
- //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.stack_end to stack_mark
+ //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.info.stack_end to stack_mark
MonoThreadUnwindState *state = &info->client_info.info.thread_saved_state [SELF_SUSPEND_STATE_INDEX];
if (state && state->gc_stackdata) {
sgen_conservatively_pin_objects_from ((void **)state->gc_stackdata, (void**)((char*)state->gc_stackdata + state->gc_stackdata_size),
}
}
}
- if (precise && info->client_info.info.handle_stack) {
- mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue);
+ if (info->client_info.info.handle_stack) {
+ /*
+ Make two passes over the handle stack. On the imprecise pass, pin all
+ objects where the handle points into the interior of the object. On the
+ precise pass, copy or mark all the objects that have handles to the
+ beginning of the object.
+ */
+ if (precise)
+ mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue, precise);
+ else {
+ PinHandleStackInteriorPtrData ud = { .start_nursery = start_nursery,
+ .end_nursery = end_nursery,
+ };
+ mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, pin_handle_stack_interior_ptrs, &ud, precise);
+ }
}
} FOREACH_THREAD_END
}
LOCK_GC;
info = mono_thread_info_current ();
if (info) {
- SGEN_ASSERT (0, stack_end < info->client_info.stack_end, "Can only lower stack end");
- info->client_info.stack_end = stack_end;
+ SGEN_ASSERT (0, stack_end < info->client_info.info.stack_end, "Can only lower stack end");
+ info->client_info.info.stack_end = stack_end;
}
UNLOCK_GC;
}
mono_gc_get_nursery (int *shift_bits, size_t *size)
{
*size = sgen_nursery_size;
- *shift_bits = DEFAULT_NURSERY_BITS;
+ *shift_bits = sgen_nursery_bits;
return sgen_get_nursery_start ();
}
/**
* mono_gchandle_is_in_domain:
- * @gchandle: a GCHandle's handle.
- * @domain: An application domain.
- *
- * Returns: TRUE if the object wrapped by the @gchandle belongs to the specific @domain.
+ * \param gchandle a GCHandle's handle.
+ * \param domain An application domain.
+ * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
*/
gboolean
mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
/**
* mono_gchandle_free_domain:
- * @unloading: domain that is unloading
+ * \param unloading domain that is unloading
*
* Function used internally to cleanup any GC handle for objects belonging
* to the specified domain during appdomain unload.
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles++;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handle_type, handle, obj);
+
+ MONO_PROFILER_RAISE (gc_handle_created, (handle, handle_type, obj));
}
void
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_num_handles--;
#endif
- mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handle_type, handle, NULL);
+
+ MONO_PROFILER_RAISE (gc_handle_deleted, (handle, handle_type));
}
void
*/
void
-sgen_client_degraded_allocation (size_t size)
+sgen_client_degraded_allocation (void)
{
- static int last_major_gc_warned = -1;
- static int num_degraded = 0;
+ static gint32 last_major_gc_warned = -1;
+ static gint32 num_degraded = 0;
- if (last_major_gc_warned < (int)gc_stats.major_gc_count) {
- ++num_degraded;
- if (num_degraded == 1 || num_degraded == 3)
+ gint32 major_gc_count = InterlockedRead (&gc_stats.major_gc_count);
+ if (InterlockedRead (&last_major_gc_warned) < major_gc_count) {
+ gint32 num = InterlockedIncrement (&num_degraded);
+ if (num == 1 || num == 3)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
- else if (num_degraded == 10)
+ else if (num == 10)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
- last_major_gc_warned = gc_stats.major_gc_count;
+ InterlockedWrite (&last_major_gc_warned, major_gc_count);
}
}
void
sgen_client_init (void)
{
- int dummy;
- MonoThreadInfoCallbacks cb;
-
- cb.thread_register = sgen_thread_register;
- cb.thread_detach = sgen_thread_detach;
- cb.thread_unregister = sgen_thread_unregister;
- cb.thread_attach = sgen_thread_attach;
- cb.mono_thread_in_critical_region = thread_in_critical_region;
- cb.ip_in_critical_region = ip_in_critical_region;
-
- mono_threads_init (&cb, sizeof (SgenThreadInfo));
+ mono_thread_callbacks_init ();
+ mono_thread_info_init (sizeof (SgenThreadInfo));
///* Keep this the default for now */
/* Precise marking is broken on all supported targets. Disable until fixed. */
mono_tls_init_gc_keys ();
- mono_gc_register_thread (&dummy);
+ mono_thread_info_attach ();
}
gboolean
static gboolean gc_inited;
+/**
+ * mono_gc_base_init:
+ */
void
mono_gc_base_init (void)
{