*
* Copyright (C) 2014 Xamarin Inc
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#include "sgen/sgen-client.h"
#include "sgen/sgen-cardtable.h"
#include "sgen/sgen-pinning.h"
+#include "sgen/sgen-thread-pool.h"
#include "metadata/marshal.h"
#include "metadata/method-builder.h"
#include "metadata/abi-details.h"
#include "metadata/handle.h"
#include "utils/mono-memory-model.h"
#include "utils/mono-logger-internals.h"
+#include "utils/mono-threads-coop.h"
+#include "sgen/sgen-thread-pool.h"
+#include "utils/mono-threads.h"
#ifdef HEAVY_STATISTICS
static guint64 stat_wbarrier_set_arrayref = 0;
HEAVY_STAT (++stat_wbarrier_object_copy);
- if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
+ SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
+ if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
size = mono_object_class (obj)->instance_size;
mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
size - sizeof (MonoObject));
sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
}
+int
+mono_gc_get_suspend_signal (void)
+{
+ return mono_threads_suspend_get_suspend_signal ();
+}
+
+int
+mono_gc_get_restart_signal (void)
+{
+ return mono_threads_suspend_get_restart_signal ();
+}
+
static MonoMethod *write_barrier_conc_method;
static MonoMethod *write_barrier_noconc_method;
gboolean
sgen_is_critical_method (MonoMethod *method)
{
- return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
+ return sgen_is_managed_allocator (method);
}
gboolean
sgen_has_critical_method (void)
{
- return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
+ return sgen_has_managed_allocator ();
+}
+
+static gboolean
+ip_in_critical_region (MonoDomain *domain, gpointer ip)
+{
+ MonoJitInfo *ji;
+ MonoMethod *method;
+
+ /*
+ * We pass false for 'try_aot' so this becomes async safe.
+ * It won't find aot methods whose jit info is not yet loaded,
+ * so we preload their jit info in the JIT.
+ */
+ ji = mono_jit_info_table_find_internal (domain, ip, FALSE, FALSE);
+ if (!ji)
+ return FALSE;
+
+ method = mono_jit_info_get_method (ji);
+
+ return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
+}
+
+gboolean
+mono_gc_is_critical_method (MonoMethod *method)
+{
+ return sgen_is_critical_method (method);
}
#ifndef DISABLE_JIT
return sgen_gc_invoke_finalizers ();
}
-gboolean
+MonoBoolean
mono_gc_pending_finalizers (void)
{
return sgen_have_pending_finalizers ();
* @out_array: output array
* @out_size: size of output array
*
- * Store inside @out_array up to @out_size objects that belong to the unloading
- * appdomain @domain. Returns the number of stored items. Can be called repeteadly
- * until it returns 0.
- * The items are removed from the finalizer data structure, so the caller is supposed
- * to finalize them.
- * @out_array should be on the stack to allow the GC to know the objects are still alive.
+ * Enqueue for finalization all objects that belong to the unloading appdomain @domain
+ * @suspend is used for early termination of the enqueuing process.
*/
-int
-mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
+void
+mono_gc_finalize_domain (MonoDomain *domain)
+{
+ sgen_finalize_if (object_in_domain_predicate, domain);
+}
+
+void
+mono_gc_suspend_finalizers (void)
{
- return sgen_gather_finalizers_if (object_in_domain_predicate, domain, out_array, out_size);
+ sgen_set_suspend_finalizers ();
}
/*
* Appdomain handling
*/
-void
-mono_gc_set_current_thread_appdomain (MonoDomain *domain)
-{
- SgenThreadInfo *info = mono_thread_info_current ();
-
- /* Could be called from sgen_thread_unregister () with a NULL info */
- if (domain) {
- g_assert (info);
- info->client_info.stopped_domain = domain;
- }
-}
-
static gboolean
need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
{
sgen_stop_world (0);
if (sgen_concurrent_collection_in_progress ())
- sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
+ sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE, FALSE);
SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
major_collector.finish_sweeping ();
major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
if (domain == mono_get_root_domain ()) {
- sgen_pin_stats_print_class_stats ();
+ sgen_pin_stats_report ();
sgen_object_layout_dump (stdout);
}
- sgen_restart_world (0, NULL);
+ sgen_restart_world (0);
binary_protocol_domain_unload_end (domain);
binary_protocol_flush_buffers (FALSE);
* Allocation
*/
-static gboolean alloc_events = FALSE;
-
-void
-mono_gc_enable_alloc_events (void)
-{
- alloc_events = TRUE;
-}
-
void*
mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
{
MonoObject *obj = sgen_alloc_obj (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
{
MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
- if (G_UNLIKELY (alloc_events)) {
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
if (obj)
mono_profiler_allocation (obj);
}
mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg)
{
/* FIXME: do a single allocation */
- void *res = calloc (1, size);
+ void *res = g_calloc (1, size);
if (!res)
return NULL;
if (!mono_gc_register_root ((char *)res, size, descr, source, msg)) {
- free (res);
+ g_free (res);
res = NULL;
}
return res;
mono_gc_free_fixed (void* addr)
{
mono_gc_deregister_root ((char *)addr);
- free (addr);
+ g_free (addr);
}
/*
#ifdef MANAGED_ALLOCATION
-#ifdef HAVE_KW_THREAD
+#if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
+// Cache the SgenThreadInfo pointer in a local 'var'.
+#define EMIT_TLS_ACCESS_VAR(mb, var) \
+ do { \
+ var = mono_mb_add_local ((mb), &mono_defaults.int_class->byval_arg); \
+ mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
+ mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
+ mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
+ mono_mb_emit_stloc ((mb), (var)); \
} while (0)
-#define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
+#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, var) \
+ do { \
+ mono_mb_emit_ldloc ((mb), (var)); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenClientThreadInfo, in_critical_region)); \
+ mono_mb_emit_byte ((mb), CEE_ADD); \
} while (0)
-#else
-
-#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
- mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
+#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, var) do { \
+ mono_mb_emit_ldloc ((mb), (var)); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next)); \
mono_mb_emit_byte ((mb), CEE_ADD); \
- mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
-#define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
+#define EMIT_TLS_ACCESS_TEMP_END(mb, var) do { \
+ mono_mb_emit_ldloc ((mb), (var)); \
mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
mono_mb_emit_byte ((mb), CEE_ADD); \
mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
#else
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#endif
+#define EMIT_TLS_ACCESS_VAR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
+#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
+#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
+#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#endif
* that they are executed atomically via the restart mechanism.
*/
static MonoMethod*
-create_allocator (int atype, gboolean slowpath)
+create_allocator (int atype, ManagedAllocatorVariant variant)
{
- int p_var, size_var, thread_var G_GNUC_UNUSED;
- int p_var, size_var, real_size_var;
++ int p_var, size_var, real_size_var, thread_var G_GNUC_UNUSED;
+ gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
guint32 slowpath_branch, max_size_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
goto done;
}
+ /*
+ * Tls access might call foreign code or code without jinfo. This can
+ * only happen if we are outside of the critical region.
+ */
+ EMIT_TLS_ACCESS_VAR (mb, thread_var);
+
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
if (atype == ATYPE_SMALL) {
/* size_var = size_arg */
g_assert_not_reached ();
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_1);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
+
+ if (nursery_canaries_enabled ()) {
+ real_size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_stloc(mb, real_size_var);
+ }
+ else
+ real_size_var = size_var;
+
/* size += ALLOC_ALIGN - 1; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
/* tlab_next_addr (local) = tlab_next_addr (TLS var) */
tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- EMIT_TLS_ACCESS_NEXT_ADDR (mb);
+ EMIT_TLS_ACCESS_NEXT_ADDR (mb, thread_var);
mono_mb_emit_stloc (mb, tlab_next_addr_var);
/* p = (void**)tlab_next; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_byte (mb, CEE_CONV_I);
mono_mb_emit_byte (mb, CEE_ADD);
+
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, CEE_ADD);
+ }
mono_mb_emit_stloc (mb, new_next_var);
/* if (G_LIKELY (new_next < tlab_temp_end)) */
mono_mb_emit_ldloc (mb, new_next_var);
- EMIT_TLS_ACCESS_TEMP_END (mb);
+ EMIT_TLS_ACCESS_TEMP_END (mb, thread_var);
slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
/* Slowpath */
mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ /*
+ * We are no longer in a critical section. We need to do this before calling
+ * to unmanaged land in order to avoid stw deadlocks since unmanaged code
+ * might take locks.
+ */
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_0);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
mono_mb_emit_icall (mb, mono_gc_alloc_obj);
} else if (atype == ATYPE_VECTOR) {
mono_mb_emit_ldloc (mb, new_next_var);
mono_mb_emit_byte (mb, CEE_STIND_I);
- /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
- mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
-
/* *p = vtable; */
mono_mb_emit_ldloc (mb, p_var);
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_byte (mb, CEE_STIND_I);
+ /* mark object end with nursery word */
+ if (nursery_canaries_enabled ()) {
+ mono_mb_emit_ldloc (mb, p_var);
+ mono_mb_emit_ldloc (mb, real_size_var);
+ mono_mb_emit_byte (mb, MONO_CEE_ADD);
+ mono_mb_emit_icon8 (mb, (mword) CANARY_STRING);
+ mono_mb_emit_icon (mb, CANARY_SIZE);
+ mono_mb_emit_byte (mb, MONO_CEE_PREFIX1);
+ mono_mb_emit_byte (mb, CEE_CPBLK);
+ }
+
if (atype == ATYPE_VECTOR) {
/* arr->max_length = max_length; */
mono_mb_emit_ldloc (mb, p_var);
mono_mb_emit_byte (mb, MONO_CEE_ADD);
mono_mb_emit_ldarg (mb, 1);
mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
- /* s->chars [len] = 0; */
- mono_mb_emit_ldloc (mb, p_var);
- mono_mb_emit_ldloc (mb, size_var);
- mono_mb_emit_icon (mb, 2);
- mono_mb_emit_byte (mb, MONO_CEE_SUB);
- mono_mb_emit_byte (mb, MONO_CEE_ADD);
- mono_mb_emit_icon (mb, 0);
- mono_mb_emit_byte (mb, MONO_CEE_STIND_I2);
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_0);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+#else
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
+#endif
/*
We must make sure both vtable and max_length are globaly visible before returning to managed land.
*/
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
/* return p */
return NULL;
if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
return NULL;
- if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
+ if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass))
return NULL;
if (klass->rank)
return NULL;
+ if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
+ return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
}
MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath)
+mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
#ifdef MANAGED_ALLOCATION
MonoMethod *res;
- MonoMethod **cache = slowpath ? slowpath_alloc_method_cache : alloc_method_cache;
+ MonoMethod **cache;
- if (!use_managed_allocator)
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
return NULL;
- if (!mono_runtime_has_tls_get ())
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !mono_runtime_has_tls_get ())
return NULL;
+ switch (variant) {
+ case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ default: g_assert_not_reached (); break;
+ }
+
res = cache [atype];
if (res)
return res;
- res = create_allocator (atype, slowpath);
+ res = create_allocator (atype, variant);
LOCK_GC;
if (cache [atype]) {
mono_free_method (res);
#define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
gboolean
-sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
+sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, ScanCopyContext ctx)
{
MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
MonoClass *klass = vt->klass;
for (; elem < card_end; elem += elem_size)
scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
} else {
- CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+ ScanPtrFieldFunc scan_ptr_field_func = ctx.ops->scan_ptr_field;
HEAVY_STAT (++los_array_cards);
- for (; elem < card_end; elem += SIZEOF_VOID_P) {
- GCObject *new_;
- gpointer old = *(gpointer*)elem;
- if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
- HEAVY_STAT (++los_array_remsets);
- copy_func ((GCObject**)elem, ctx.queue);
- new_ = *(GCObject **)elem;
- if (G_UNLIKELY (sgen_ptr_in_nursery (new_)))
- sgen_add_to_global_remset (elem, new_);
- }
- }
+ for (; elem < card_end; elem += SIZEOF_VOID_P)
+ scan_ptr_field_func (obj, (GCObject**)elem, ctx.queue);
}
binary_protocol_card_scan (first_elem, elem - first_elem);
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&arr->obj);
SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
UNLOCK_GC;
done:
- if (G_UNLIKELY (alloc_events))
+ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_ALLOCATIONS))
mono_profiler_allocation (&str->object);
return str;
static void *moved_objects [MOVED_OBJECTS_NUM];
static int moved_objects_idx = 0;
+static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
+
void
mono_sgen_register_moved_object (void *obj, void *destination)
{
- g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
+ /*
+ * This function can be called from SGen's worker threads. We want to try
+ * and avoid exposing those threads to the profiler API, so queue up move
+ * events and send them later when the main GC thread calls
+ * mono_sgen_gc_event_moves ().
+ *
+ * TODO: Once SGen has multiple worker threads, we need to switch to a
+ * lock-free data structure for the queue as multiple threads will be
+ * adding to it at the same time.
+ */
+ if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
+ sgen_pointer_queue_add (&moved_objects_queue, obj);
+ sgen_pointer_queue_add (&moved_objects_queue, destination);
+ } else {
+ if (moved_objects_idx == MOVED_OBJECTS_NUM) {
+ mono_profiler_gc_moves (moved_objects, moved_objects_idx);
+ moved_objects_idx = 0;
+ }
- if (moved_objects_idx == MOVED_OBJECTS_NUM) {
- mono_profiler_gc_moves (moved_objects, moved_objects_idx);
- moved_objects_idx = 0;
+ moved_objects [moved_objects_idx++] = obj;
+ moved_objects [moved_objects_idx++] = destination;
}
- moved_objects [moved_objects_idx++] = obj;
- moved_objects [moved_objects_idx++] = destination;
}
void
mono_sgen_gc_event_moves (void)
{
+ while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
+ void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
+ void *src = sgen_pointer_queue_pop (&moved_objects_queue);
+
+ mono_sgen_register_moved_object (src, dst);
+ }
+
if (moved_objects_idx) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
moved_objects_idx = 0;
#endif
info->client_info.skip = 0;
- info->client_info.stopped_ip = NULL;
- info->client_info.stopped_domain = NULL;
info->client_info.stack_start = NULL;
info->client_info.signal = 0;
#endif
- /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
mono_thread_info_get_stack_bounds (&staddr, &stsize);
if (staddr) {
-#ifndef HOST_WIN32
info->client_info.stack_start_limit = staddr;
-#endif
info->client_info.stack_end = staddr + stsize;
} else {
gsize stack_bottom = (gsize)stack_bottom_fallback;
info->client_info.stack_end = (char*)stack_bottom;
}
-#ifdef USE_MONO_CTX
memset (&info->client_info.ctx, 0, sizeof (MonoContext));
-#else
- memset (&info->client_info.regs, 0, sizeof (info->client_info.regs));
-#endif
if (mono_gc_get_gc_callbacks ()->thread_attach_func)
info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.stack_end);
+
+ info->client_info.info.handle_stack = mono_handle_stack_alloc ();
}
void
binary_protocol_thread_unregister ((gpointer)tid);
SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
+
+ HandleStack *handles = (HandleStack*) p->client_info.info.handle_stack;
+ p->client_info.info.handle_stack = NULL;
+ mono_handle_stack_free (handles);
}
void
UNLOCK_GC;
}
-static gboolean
-is_critical_method (MonoMethod *method)
-{
- return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
-}
-
static gboolean
thread_in_critical_region (SgenThreadInfo *info)
{
static void
sgen_thread_attach (SgenThreadInfo *info)
{
- mono_handle_arena_init ((MonoHandleArena**) &info->client_info.info.handle_arena);
-
if (mono_gc_get_gc_callbacks ()->thread_attach_func && !info->client_info.runtime_data)
info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
}
sgen_thread_detach (SgenThreadInfo *p)
{
/* If a delegate is passed to native code and invoked on a thread we dont
- * know about, the jit will register it with mono_jit_thread_attach, but
+ * know about, marshal will register it with mono_threads_attach_coop, but
* we have no way of knowing when that thread goes away. SGen has a TSD
* so we assume that if the domain is still registered, we can detach
* the thread
*/
- if (mono_domain_get ())
+ if (mono_thread_internal_current_is_attached ())
mono_thread_detach_internal (mono_thread_internal_current ());
-
- mono_handle_arena_cleanup ((MonoHandleArena**) &p->client_info.info.handle_arena);
}
gboolean
sgen_client_thread_register_worker (void)
{
mono_thread_info_register_small_id ();
+ mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
}
/* Variables holding start/end nursery so it won't have to be passed at every call */
FOREACH_THREAD (info) {
int skip_reason = 0;
- void *aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
+ void *aligned_stack_start;
if (info->client_info.skip) {
SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
} else if (!mono_thread_info_is_live (info)) {
SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
skip_reason = 3;
+ } else if (!info->client_info.stack_start) {
+ SGEN_LOG (3, "Skipping starting or detaching thread %p", info);
+ skip_reason = 4;
}
binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.stack_end, skip_reason);
if (skip_reason)
continue;
+ g_assert (info->client_info.stack_start);
+ g_assert (info->client_info.stack_end);
+
+ aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
+#ifdef HOST_WIN32
+ /* Windows uses a guard page before the committed stack memory pages to detect when the
+ stack needs to be grown. If we suspend a thread just after a function prolog has
+ decremented the stack pointer to point into the guard page but before the thread has
+ been able to read or write to that page, starting the stack scan at aligned_stack_start
+ will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
+ VirtualQuery() to determine whether stack_start points into the guard page and then
+ updates aligned_stack_start to point at the next non-guard page. */
+ MEMORY_BASIC_INFORMATION mem_info;
+ SIZE_T result = VirtualQuery(info->client_info.stack_start, &mem_info, sizeof(mem_info));
+ g_assert (result != 0);
+ if (mem_info.Protect & PAGE_GUARD) {
+ aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
+ }
+#endif
+
g_assert (info->client_info.suspend_done);
SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
fprintf (stderr, "Precise stack mark not supported - disabling.\n");
conservative_stack_mark = TRUE;
}
+ //FIXME we should eventually use the new stack_mark from coop
sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
}
if (!precise) {
-#ifdef USE_MONO_CTX
sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1),
start_nursery, end_nursery, PIN_TYPE_STACK);
-#else
- sgen_conservatively_pin_objects_from ((void**)&info->client_info.regs, (void**)&info->client_info.regs + ARCH_NUM_REGS,
- start_nursery, end_nursery, PIN_TYPE_STACK);
-#endif
+
{
// This is used on Coop GC for platforms where we cannot get the data for individual registers.
// We force a spill of all registers into the stack and pass a chunk of data into sgen.
+ //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.stack_end to stack_mark
MonoThreadUnwindState *state = &info->client_info.info.thread_saved_state [SELF_SUSPEND_STATE_INDEX];
if (state && state->gc_stackdata) {
sgen_conservatively_pin_objects_from ((void **)state->gc_stackdata, (void**)((char*)state->gc_stackdata + state->gc_stackdata_size),
}
}
}
+ if (precise && info->client_info.info.handle_stack) {
+ mono_handle_stack_scan ((HandleStack*)info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue);
+ }
} FOREACH_THREAD_END
}
return 1;
}
-void
-mono_gc_enable_events (void)
-{
-}
-
const char *
mono_gc_get_gc_name (void)
{
char*
mono_gc_get_description (void)
{
+#ifdef HAVE_CONC_GC_AS_DEFAULT
+ return g_strdup ("sgen (concurrent by default)");
+#else
return g_strdup ("sgen");
+#endif
}
void
}
}
-void
-sgen_client_log_timing (GGTimingInfo *info, mword last_major_num_sections, mword last_los_memory_usage)
-{
- SgenMajorCollector *major_collector = sgen_get_major_collector ();
- mword num_major_sections = major_collector->get_num_major_sections ();
- char full_timing_buff [1024];
- full_timing_buff [0] = '\0';
-
- if (!info->is_overflow)
- sprintf (full_timing_buff, "total %.2fms, bridge %.2fms", info->stw_time / 10000.0f, (int)info->bridge_time / 10000.0f);
- if (info->generation == GENERATION_OLD)
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR%s: (%s) pause %.2fms, %s major %dK/%dK los %dK/%dK",
- info->is_overflow ? "_OVERFLOW" : "",
- info->reason ? info->reason : "",
- (int)info->total_time / 10000.0f,
- full_timing_buff,
- major_collector->section_size * num_major_sections / 1024,
- major_collector->section_size * last_major_num_sections / 1024,
- los_memory_usage / 1024,
- last_los_memory_usage / 1024);
- else
- mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MINOR%s: (%s) pause %.2fms, %s promoted %dK major %dK los %dK",
- info->is_overflow ? "_OVERFLOW" : "",
- info->reason ? info->reason : "",
- (int)info->total_time / 10000.0f,
- full_timing_buff,
- (num_major_sections - last_major_num_sections) * major_collector->section_size / 1024,
- major_collector->section_size * num_major_sections / 1024,
- los_memory_usage / 1024);
-}
-
/*
* Debugging
*/
{
switch (type) {
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
+ case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
default:
return NULL;
}
cb.thread_detach = sgen_thread_detach;
cb.thread_unregister = sgen_thread_unregister;
cb.thread_attach = sgen_thread_attach;
- cb.mono_method_is_critical = (gboolean (*)(void *))is_critical_method;
cb.mono_thread_in_critical_region = thread_in_critical_region;
+ cb.ip_in_critical_region = ip_in_critical_region;
mono_threads_init (&cb, sizeof (SgenThreadInfo));
}
#endif
- /*
- * This needs to happen before any internal allocations because
- * it inits the small id which is required for hazard pointer
- * operations.
- */
- sgen_os_init ();
-
mono_gc_register_thread (&dummy);
}
} else if (g_str_has_prefix (opt, "toggleref-test")) {
/* FIXME: This should probably in MONO_GC_DEBUG */
sgen_register_test_toggleref_callback ();
- } else {
+ } else if (!sgen_bridge_handle_gc_param (opt)) {
return FALSE;
}
return TRUE;
sgen_bridge_describe_pointer (ptr);
}
+static gboolean gc_inited;
+
void
mono_gc_base_init (void)
{
+ if (gc_inited)
+ return;
+
mono_counters_init ();
+#ifndef HOST_WIN32
+ mono_w32handle_init ();
+#endif
+
#ifdef HEAVY_STATISTICS
mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
sgen_gc_init ();
- if (nursery_canaries_enabled ())
- sgen_set_use_managed_allocator (FALSE);
-
#if defined(HAVE_KW_THREAD)
/* This can happen with using libmonosgen.so */
- if (mono_tls_key_get_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR) == -1)
+ if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO) == -1)
sgen_set_use_managed_allocator (FALSE);
#endif
+
+ gc_inited = TRUE;
}
void
mono_gc_base_cleanup (void)
{
+ sgen_thread_pool_shutdown ();
+
+ // We should have consumed any outstanding moves.
+ g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
}
gboolean
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
* Copyright (C) 2012 Xamarin Inc
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGENGC_H__
#define __MONO_SGENGC_H__
g_error (__VA_ARGS__); \
} } while (0)
+#ifndef HOST_WIN32
+# define LOG_TIMESTAMP \
+ do { \
+ time_t t; \
+ struct tm tod; \
+ time(&t); \
+ localtime_r(&t, &tod); \
+ strftime(logTime, sizeof(logTime), "%Y-%m-%d %H:%M:%S", &tod); \
+ } while (0)
+#else
+# define LOG_TIMESTAMP \
+ do { \
+ time_t t; \
+ struct tm *tod; \
+ time(&t); \
+ tod = localtime(&t); \
+ strftime(logTime, sizeof(logTime), "%F %T", tod); \
+ } while (0)
+#endif
#define SGEN_LOG(level, format, ...) do { \
if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
- mono_gc_printf (gc_debug_file, format "\n", ##__VA_ARGS__); \
+ char logTime[80]; \
+ LOG_TIMESTAMP; \
+ mono_gc_printf (gc_debug_file, "%s " format "\n", logTime, ##__VA_ARGS__); \
} } while (0)
#define SGEN_COND_LOG(level, cond, format, ...) do { \
- if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
- if (cond) \
- mono_gc_printf (gc_debug_file, format "\n", ##__VA_ARGS__); \
+ if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
+ if (cond) { \
+ char logTime[80]; \
+ LOG_TIMESTAMP; \
+ mono_gc_printf (gc_debug_file, "%s " format "\n", logTime, ##__VA_ARGS__); \
+ } \
} } while (0)
extern int gc_debug_level;
return GPOINTER_TO_UINT (ptr) >> 3;
}
-#define SGEN_PTR_IN_NURSERY(p,bits,start,end) (((mword)(p) & ~((1 << (bits)) - 1)) == (mword)(start))
+#define SGEN_PTR_IN_NURSERY(p,bits,start,end) (((mword)(p) & ~(((mword)1 << (bits)) - 1)) == (mword)(start))
#ifdef USER_CONFIG
List of what each bit on of the vtable gc bits means.
*/
enum {
+ // When the Java bridge has determined an object is "bridged", it uses these two bits to cache that information.
SGEN_GC_BIT_BRIDGE_OBJECT = 1,
SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT = 2,
SGEN_GC_BIT_FINALIZER_AWARE = 4,
void sgen_conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type);
+gboolean sgen_gc_initialized (void);
+
/* Keep in sync with description_for_type() in sgen-internal.c! */
enum {
INTERNAL_MEM_PIN_QUEUE,
INTERNAL_MEM_CARDTABLE_MOD_UNION,
INTERNAL_MEM_BINARY_PROTOCOL,
INTERNAL_MEM_TEMPORARY,
+ INTERNAL_MEM_LOG_ENTRY,
+ INTERNAL_MEM_COMPLEX_DESCRIPTORS,
INTERNAL_MEM_FIRST_CLIENT
};
struct _SgenThreadInfo {
SgenClientThreadInfo client_info;
- char **tlab_next_addr;
- char **tlab_start_addr;
- char **tlab_temp_end_addr;
- char **tlab_real_end_addr;
-
-#ifndef HAVE_KW_THREAD
char *tlab_start;
char *tlab_next;
char *tlab_temp_end;
char *tlab_real_end;
-#endif
};
gboolean sgen_is_worker_thread (MonoNativeThreadId thread);
typedef void (*CopyOrMarkObjectFunc) (GCObject**, SgenGrayQueue*);
typedef void (*ScanObjectFunc) (GCObject *obj, SgenDescriptor desc, SgenGrayQueue*);
typedef void (*ScanVTypeFunc) (GCObject *full_object, char *start, SgenDescriptor desc, SgenGrayQueue* BINARY_PROTOCOL_ARG (size_t size));
+typedef void (*ScanPtrFieldFunc) (GCObject *obj, GCObject **ptr, SgenGrayQueue* queue);
typedef gboolean (*DrainGrayStackFunc) (SgenGrayQueue *queue);
typedef struct {
CopyOrMarkObjectFunc copy_or_mark_object;
ScanObjectFunc scan_object;
ScanVTypeFunc scan_vtype;
+ ScanPtrFieldFunc scan_ptr_field;
/* Drain stack optimized for the above functions */
DrainGrayStackFunc drain_gray_stack;
/*FIXME add allocation function? */
void sgen_free_internal_dynamic (void *addr, size_t size, int type);
void sgen_pin_stats_enable (void);
-void sgen_pin_stats_register_object (GCObject *obj, size_t size);
+void sgen_pin_stats_register_object (GCObject *obj, int generation);
void sgen_pin_stats_register_global_remset (GCObject *obj);
-void sgen_pin_stats_print_class_stats (void);
+void sgen_pin_stats_report (void);
void sgen_sort_addresses (void **array, size_t size);
void sgen_add_to_global_remset (gpointer ptr, GCObject *obj);
GCObject* (*alloc_for_promotion) (GCVTable vtable, GCObject *obj, size_t objsize, gboolean has_references);
SgenObjectOperations serial_ops;
+ SgenObjectOperations serial_ops_with_concurrent_major;
void (*prepare_to_space) (char *to_space_bitmap, size_t space_bitmap_size);
void (*clear_fragments) (void);
/* Updating references */
#ifdef SGEN_CHECK_UPDATE_REFERENCE
-gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread) MONO_INTERNAL;
+gboolean sgen_thread_pool_is_thread_pool_thread (MonoNativeThreadId some_thread);
+
static inline void
sgen_update_reference (GCObject **p, GCObject *o, gboolean allow_null)
{
typedef void (*sgen_cardtable_block_callback) (mword start, mword size);
void sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
+void sgen_major_collector_iterate_block_ranges (sgen_cardtable_block_callback callback);
typedef enum {
ITERATE_OBJECTS_SWEEP = 1,
ITERATE_OBJECTS_NON_PINNED = 2,
ITERATE_OBJECTS_PINNED = 4,
- ITERATE_OBJECTS_ALL = ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED,
ITERATE_OBJECTS_SWEEP_NON_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED,
ITERATE_OBJECTS_SWEEP_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_PINNED,
ITERATE_OBJECTS_SWEEP_ALL = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED
size_t num_unique_scanned_objects;
} ScannedObjectCounts;
+typedef enum {
+ CARDTABLE_SCAN_GLOBAL = 0,
+ CARDTABLE_SCAN_MOD_UNION = 1,
+ CARDTABLE_SCAN_MOD_UNION_PRECLEAN = CARDTABLE_SCAN_MOD_UNION | 2,
+} CardTableScanType;
+
typedef struct _SgenMajorCollector SgenMajorCollector;
struct _SgenMajorCollector {
size_t section_size;
void (*free_non_pinned_object) (GCObject *obj, size_t size);
void (*pin_objects) (SgenGrayQueue *queue);
void (*pin_major_object) (GCObject *obj, SgenGrayQueue *queue);
- void (*scan_card_table) (gboolean mod_union, ScanCopyContext ctx);
+ void (*scan_card_table) (CardTableScanType scan_type, ScanCopyContext ctx);
void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
+ void (*iterate_block_ranges) (sgen_cardtable_block_callback callback);
void (*update_cardtable_mod_union) (void);
void (*init_to_space) (void);
void (*sweep) (void);
gboolean (*have_swept) (void);
void (*finish_sweeping) (void);
- void (*free_swept_blocks) (size_t allowance);
+ void (*free_swept_blocks) (size_t section_reserve);
void (*check_scan_starts) (void);
void (*dump_heap) (FILE *heap_dump_file);
gint64 (*get_used_size) (void);
gboolean sgen_have_pending_finalizers (void);
void sgen_object_register_for_finalization (GCObject *obj, void *user_data);
-int sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size);
+void sgen_finalize_if (SgenObjectPredicateFunc predicate, void *user_data);
void sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation);
+void sgen_set_suspend_finalizers (void);
void sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc);
void sgen_ensure_free_space (size_t size, int generation);
void sgen_gc_collect (int generation);
-void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish);
+void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish, gboolean stw);
int sgen_gc_collection_count (int generation);
/* FIXME: what exactly does this return? */
/* STW */
-typedef struct {
- int generation;
- const char *reason;
- gboolean is_overflow;
- gint64 total_time;
- gint64 stw_time;
- gint64 bridge_time;
-} GGTimingInfo;
-
void sgen_stop_world (int generation);
-void sgen_restart_world (int generation, GGTimingInfo *timing);
+void sgen_restart_world (int generation);
gboolean sgen_is_world_stopped (void);
gboolean sgen_set_allow_synchronous_major (gboolean flag);
extern LOSObject *los_object_list;
extern mword los_memory_usage;
+extern mword los_memory_usage_total;
void sgen_los_free_object (LOSObject *obj);
void* sgen_los_alloc_large_inner (GCVTable vtable, size_t size);
gboolean sgen_ptr_is_in_los (char *ptr, char **start);
void sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data);
void sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
-void sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx);
+void sgen_los_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx);
void sgen_los_update_cardtable_mod_union (void);
void sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards);
gboolean sgen_los_is_valid_object (char *object);
extern guint32 tlab_size;
extern NurseryClearPolicy nursery_clear_policy;
extern gboolean sgen_try_free_some_memory;
-
+extern mword total_promoted_size;
+extern mword total_allocated_major;
+extern volatile gboolean sgen_suspend_finalizers;
extern MonoCoopMutex gc_mutex;
/* Nursery helpers. */
ATYPE_NUM
} SgenAllocatorType;
-void sgen_init_tlab_info (SgenThreadInfo* info);
void sgen_clear_tlabs (void);
GCObject* sgen_alloc_obj (GCVTable vtable, size_t size);
/* Debug support */
-void sgen_check_consistency (void);
+void sgen_check_remset_consistency (void);
void sgen_check_mod_union_consistency (void);
void sgen_check_major_refs (void);
void sgen_check_whole_heap (gboolean allow_missing_pinning);
/* Utilities */
-void sgen_qsort (void *base, size_t nel, size_t width, int (*compar) (const void*, const void*));
+void sgen_qsort (void *array, size_t count, size_t element_size, int (*compare) (const void*, const void*));
gint64 sgen_timestamp (void);
/*
#define CANARY_VALID(addr) (strncmp ((char*) (addr), CANARY_STRING, CANARY_SIZE) == 0)
#define CHECK_CANARY_FOR_OBJECT(addr,fail) if (nursery_canaries_enabled ()) { \
- char* canary_ptr = (char*) (addr) + sgen_safe_object_get_size_unaligned ((GCObject *) (addr)); \
+ guint size = sgen_safe_object_get_size_unaligned ((GCObject *) (addr)); \
+ char* canary_ptr = (char*) (addr) + size; \
if (!CANARY_VALID(canary_ptr)) { \
- char canary_copy[CANARY_SIZE +1]; \
- strncpy (canary_copy, canary_ptr, CANARY_SIZE); \
- canary_copy[CANARY_SIZE] = 0; \
- if ((fail)) \
- g_error ("CORRUPT CANARY:\naddr->%p\ntype->%s\nexcepted->'%s'\nfound->'%s'\n", (char*) addr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE ((addr))), CANARY_STRING, canary_copy); \
- else \
- g_warning ("CORRUPT CANARY:\naddr->%p\ntype->%s\nexcepted->'%s'\nfound->'%s'\n", (char*) addr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE ((addr))), CANARY_STRING, canary_copy); \
+ char *window_start, *window_end; \
+ window_start = (char*)(addr) - 128; \
+ if (!sgen_ptr_in_nursery (window_start)) \
+ window_start = sgen_get_nursery_start (); \
+ window_end = (char*)(addr) + 128; \
+ if (!sgen_ptr_in_nursery (window_end)) \
+ window_end = sgen_get_nursery_end (); \
+ fprintf (stderr, "\nCANARY ERROR - Type:%s Size:%d Address:%p Data:\n", sgen_client_vtable_get_name (SGEN_LOAD_VTABLE ((addr))), size, (char*) addr); \
+ fwrite (addr, sizeof (char), size, stderr); \
+ fprintf (stderr, "\nCanary zone (next 12 chars):\n"); \
+ fwrite (canary_ptr, sizeof (char), 12, stderr); \
+ fprintf (stderr, "\nOriginal canary string:\n"); \
+ fwrite (CANARY_STRING, sizeof (char), 8, stderr); \
+ for (int x = -8; x <= 8; x++) { \
+ if (canary_ptr + x < (char*) addr); \
+ continue; \
+ if (CANARY_VALID(canary_ptr +x)) \
+ fprintf (stderr, "\nCANARY ERROR - canary found at offset %d\n", x); \
+ } \
+ fprintf (stderr, "\nSurrounding nursery (%p - %p):\n", window_start, window_end); \
+ fwrite (window_start, sizeof (char), window_end - window_start, stderr); \
} }
/*