[MethodImplAttribute (MethodImplOptions.InternalCall)]
static extern string GetNativeStackTrace (Exception exception);
- [MethodImplAttribute (MethodImplOptions.InternalCall)]
- public static extern bool SetGCAllowSynchronousMajor (bool flag);
+ public static bool SetGCAllowSynchronousMajor (bool flag)
+ {
+ // No longer used
+ return true;
+ }
}
}
return NULL;
}
-gboolean
-mono_gc_set_allow_synchronous_major (gboolean flag)
-{
- return flag;
-}
/* Toggleref support */
void
void ves_icall_System_GC_register_ephemeron_array (MonoObject *array);
MonoObject *ves_icall_System_GC_get_ephemeron_tombstone (void);
-MonoBoolean ves_icall_Mono_Runtime_SetGCAllowSynchronousMajor (MonoBoolean flag);
extern void mono_gc_init (void);
extern void mono_gc_base_init (void);
/*Ephemeron functionality. Sgen only*/
gboolean mono_gc_ephemeron_array_add (MonoObject *obj);
-/* To disable synchronous, evacuating collections - concurrent SGen only */
-gboolean mono_gc_set_allow_synchronous_major (gboolean flag);
-
MonoBoolean
mono_gc_GCHandle_CheckCurrentDomain (guint32 gchandle);
return NULL;
}
-MonoBoolean
-ves_icall_Mono_Runtime_SetGCAllowSynchronousMajor (MonoBoolean flag)
-{
- return mono_gc_set_allow_synchronous_major (flag);
-}
-
MonoBoolean
mono_gc_GCHandle_CheckCurrentDomain (guint32 gchandle)
{
ICALL_TYPE(RUNTIME, "Mono.Runtime", RUNTIME_1)
ICALL(RUNTIME_1, "GetDisplayName", ves_icall_Mono_Runtime_GetDisplayName)
ICALL(RUNTIME_12, "GetNativeStackTrace", ves_icall_Mono_Runtime_GetNativeStackTrace)
-ICALL(RUNTIME_13, "SetGCAllowSynchronousMajor", ves_icall_Mono_Runtime_SetGCAllowSynchronousMajor)
#ifndef PLATFORM_RO_FS
ICALL_TYPE(KPAIR, "Mono.Security.Cryptography.KeyPairPersistence", KPAIR_1)
{
}
-gboolean
-mono_gc_set_allow_synchronous_major (gboolean flag)
-{
- return TRUE;
-}
-
gboolean
mono_gc_is_null (void)
{
#endif
}
+static void G_GNUC_UNUSED
+sgen_client_binary_protocol_mod_union_remset (gpointer obj, gpointer ptr, gpointer value, gpointer value_vtable)
+{
+}
+
static void G_GNUC_UNUSED
sgen_client_binary_protocol_ptr_update (gpointer ptr, gpointer old_value, gpointer new_value, gpointer vtable, size_t size)
{
mono_gc_wait_for_bridge_processing ();
}
-gboolean
-mono_gc_set_allow_synchronous_major (gboolean flag)
-{
- return sgen_set_allow_synchronous_major (flag);
-}
-
void*
mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
{
update_mod_union (guint8 *dest, guint8 *start_card, size_t num_cards)
{
int i;
- for (i = 0; i < num_cards; ++i)
- dest [i] |= start_card [i];
+ /* Marking from another thread can happen while we mark here */
+ for (i = 0; i < num_cards; ++i) {
+ if (start_card [i])
+ dest [i] = 1;
+ }
}
guint8*
each collection */
static gboolean do_scan_starts_check = FALSE;
-/*
- * If the major collector is concurrent and this is FALSE, we will
- * never initiate a synchronous major collection, unless requested via
- * GC.Collect().
- */
-static gboolean allow_synchronous_major = TRUE;
static gboolean disable_minor_collections = FALSE;
static gboolean disable_major_collections = FALSE;
static gboolean do_verify_nursery = FALSE;
goto done;
}
- /*
- * If we've been asked to do a major collection, and the major collector wants to
- * run synchronously (to evacuate), we set the flag to do that.
- */
- if (generation_to_collect == GENERATION_OLD &&
- allow_synchronous_major &&
- major_collector.want_synchronous_collection &&
- *major_collector.want_synchronous_collection) {
- wait_to_finish = TRUE;
- }
-
SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
/*
return tot;
}
-gboolean
-sgen_set_allow_synchronous_major (gboolean flag)
-{
- if (!major_collector.is_concurrent)
- return flag;
-
- allow_synchronous_major = flag;
- return TRUE;
-}
-
void
sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
{
}
continue;
}
- if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
- if (!major_collector.is_concurrent) {
- sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
- continue;
- }
-
- opt = strchr (opt, '=') + 1;
-
- if (!strcmp (opt, "yes")) {
- allow_synchronous_major = TRUE;
- } else if (!strcmp (opt, "no")) {
- allow_synchronous_major = FALSE;
- } else {
- sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
- continue;
- }
- }
if (!strcmp (opt, "cementing")) {
cement_enabled = TRUE;
fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
fprintf (stderr, " [no-]cementing\n");
- if (major_collector.is_concurrent)
- fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
if (major_collector.print_gc_param_usage)
major_collector.print_gc_param_usage ();
if (sgen_minor_collector.print_gc_param_usage)
gboolean supports_cardtable;
gboolean sweeps_lazily;
- /*
- * This is set to TRUE by the sweep if the next major
- * collection should be synchronous (for evacuation). For
- * non-concurrent collectors, this should be NULL.
- */
- gboolean *want_synchronous_collection;
-
void* (*alloc_heap) (mword nursery_size, mword nursery_align, int nursery_bits);
gboolean (*is_object_live) (GCObject *obj);
GCObject* (*alloc_small_pinned_obj) (GCVTable vtable, size_t size, gboolean has_references);
block = MS_BLOCK_FOR_OBJ (obj);
+#ifdef COPY_OR_MARK_CONCURRENT
+ if (G_UNLIKELY (major_block_is_evacuating (block))) {
+ /*
+ * We don't copy within the concurrent phase. These objects will
+ * be handled below in the finishing pause, by scanning the mod-union
+ * card table.
+ */
+ return FALSE;
+ }
+#endif
+
#ifdef COPY_OR_MARK_WITH_EVACUATION
- {
- int size_index = block->obj_size_index;
-
- if (evacuate_block_obj_sizes [size_index] && !block->has_pinned) {
- HEAVY_STAT (++stat_optimized_copy_major_small_evacuate);
- if (block->is_to_space)
- return FALSE;
- goto do_copy_object;
- }
+ if (major_block_is_evacuating (block)) {
+ HEAVY_STAT (++stat_optimized_copy_major_small_evacuate);
+ goto do_copy_object;
}
#endif
#ifdef COPY_OR_MARK_CONCURRENT
#define HANDLE_PTR(ptr,obj) do { \
GCObject *__old = *(ptr); \
- binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+ binary_protocol_scan_process_reference ((full_object), (ptr), __old); \
if (__old && !sgen_ptr_in_nursery (__old)) { \
- PREFETCH_READ (__old); \
- COPY_OR_MARK_FUNCTION_NAME ((ptr), __old, queue); \
+ MSBlockInfo *block = MS_BLOCK_FOR_OBJ (__old); \
+ if (G_UNLIKELY (!sgen_ptr_in_nursery (ptr) && \
+ sgen_safe_object_is_small (__old, sgen_obj_get_descriptor (__old) & DESC_TYPE_MASK) && \
+ major_block_is_evacuating (block))) { \
+ mark_mod_union_card ((full_object), (void**)(ptr), __old); \
+ } else { \
+ PREFETCH_READ (__old); \
+ COPY_OR_MARK_FUNCTION_NAME ((ptr), __old, queue); \
+ } \
} else { \
if (G_UNLIKELY (sgen_ptr_in_nursery (__old) && !sgen_ptr_in_nursery ((ptr)))) \
- mark_mod_union_card ((full_object), (void**)(ptr)); \
+ mark_mod_union_card ((full_object), (void**)(ptr), __old); \
} \
} while (0)
#else
#define HANDLE_PTR(ptr,obj) do { \
void *__old = *(ptr); \
- binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+ binary_protocol_scan_process_reference ((full_object), (ptr), __old); \
if (__old) { \
gboolean __still_in_nursery = COPY_OR_MARK_FUNCTION_NAME ((ptr), __old, queue); \
if (G_UNLIKELY (__still_in_nursery && !sgen_ptr_in_nursery ((ptr)) && !SGEN_OBJECT_IS_CEMENTED (*(ptr)))) { \
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
void *__old = *(ptr); \
- binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+ binary_protocol_scan_process_reference ((full_object), (ptr), __old); \
if (__old) { \
- gboolean __still_in_nursery = major_copy_or_mark_object_no_evacuation ((ptr), __old, queue); \
+ gboolean __still_in_nursery = major_copy_or_mark_object_with_evacuation ((ptr), __old, queue); \
if (G_UNLIKELY (__still_in_nursery && !sgen_ptr_in_nursery ((ptr)) && !SGEN_OBJECT_IS_CEMENTED (*(ptr)))) { \
void *__copy = *(ptr); \
sgen_add_to_global_remset ((ptr), __copy); \
static gboolean *evacuate_block_obj_sizes;
static float evacuation_threshold = 0.666f;
-static float concurrent_evacuation_threshold = 0.666f;
-static gboolean want_evacuation = FALSE;
static gboolean lazy_sweep = FALSE;
static guint64 stat_major_blocks_alloced = 0;
static guint64 stat_major_blocks_freed = 0;
static guint64 stat_major_blocks_lazy_swept = 0;
-static guint64 stat_major_objects_evacuated = 0;
#if SIZEOF_VOID_P != 8
static guint64 stat_major_blocks_freed_ideal = 0;
* Blocks that are to-space are not evacuated from. During an major collection
* blocks are allocated for two reasons: evacuating objects from the nursery and
* evacuating them from major blocks marked for evacuation. In both cases we don't
- * want further evacuation.
+ * want further evacuation. We also don't want to evacuate objects allocated during
+ * the concurrent mark since it would add pointless stress on the finishing pause.
*/
- info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
- info->state = (info->is_to_space || sgen_concurrent_collection_in_progress ()) ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
+ info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD) || sgen_concurrent_collection_in_progress ();
+ info->state = info->is_to_space ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
SGEN_ASSERT (6, !sweep_in_progress () || info->state == BLOCK_STATE_SWEPT, "How do we add a new block to be swept while sweeping?");
info->cardtable_mod_union = NULL;
* Mark the mod-union card for `ptr`, which must be a reference within the object `obj`.
*/
static void
-mark_mod_union_card (GCObject *obj, void **ptr)
+mark_mod_union_card (GCObject *obj, void **ptr, GCObject *value_obj)
{
int type = sgen_obj_get_descriptor (obj) & DESC_TYPE_MASK;
if (sgen_safe_object_is_small (obj, type)) {
} else {
sgen_los_mark_mod_union_card (obj, ptr);
}
+
+ binary_protocol_mod_union_remset (obj, ptr, value_obj, SGEN_LOAD_VTABLE (value_obj));
+}
+
+static inline gboolean
+major_block_is_evacuating (MSBlockInfo *block)
+{
+ if (evacuate_block_obj_sizes [block->obj_size_index] &&
+ !block->has_pinned &&
+ !block->is_to_space)
+ return TRUE;
+ return FALSE;
}
#define LOAD_VTABLE SGEN_LOAD_VTABLE
static guint64 stat_drain_loops;
#endif
-static void major_scan_object_with_evacuation (GCObject *start, mword desc, SgenGrayQueue *queue);
-
#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_no_evacuation
#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_no_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
static void
major_copy_or_mark_object_concurrent_finish_canonical (GCObject **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object_no_evacuation (ptr, *ptr, queue);
+ major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
}
static void
static void
sweep_finish (void)
{
- mword total_evacuate_heap = 0;
- mword total_evacuate_saved = 0;
int i;
for (i = 0; i < num_block_obj_sizes; ++i) {
} else {
evacuate_block_obj_sizes [i] = FALSE;
}
- {
- mword total_bytes = block_obj_sizes [i] * sweep_slots_available [i];
- total_evacuate_heap += total_bytes;
- if (evacuate_block_obj_sizes [i])
- total_evacuate_saved += total_bytes - block_obj_sizes [i] * sweep_slots_used [i];
- }
}
- want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
-
set_sweep_state (SWEEP_STATE_SWEPT, SWEEP_STATE_COMPACTING);
}
mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
- mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
#if SIZEOF_VOID_P != 8
mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
concurrent_mark = is_concurrent;
collector->is_concurrent = is_concurrent;
collector->needs_thread_pool = is_concurrent || concurrent_sweep;
- if (is_concurrent)
- collector->want_synchronous_collection = &want_evacuation;
- else
- collector->want_synchronous_collection = NULL;
collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
collector->supports_cardtable = TRUE;
collector->major_ops_concurrent_start.drain_gray_stack = drain_gray_stack_concurrent;
collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
- collector->major_ops_concurrent_finish.scan_object = major_scan_object_no_evacuation;
+ collector->major_ops_concurrent_finish.scan_object = major_scan_object_with_evacuation;
collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_concurrent_finish;
- collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack_no_evacuation;
+ collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack;
}
#ifdef HEAVY_STATISTICS
mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
+ mono_counters_register ("Optimized copy major small evacuate", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_evacuate);
mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
#define HANDLE_PTR(ptr,obj) do { \
void *__old = *(ptr); \
SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP ((obj), (ptr)); \
- binary_protocol_scan_process_reference ((obj), (ptr), __old); \
+ binary_protocol_scan_process_reference ((full_object), (ptr), __old); \
if (__old) { \
SERIAL_COPY_OBJECT_FROM_OBJ ((ptr), queue); \
SGEN_COND_LOG (9, __old != *(ptr), "Overwrote field at %p with %p (was: %p)", (ptr), *(ptr), __old); \
} while (0)
static void
-SERIAL_SCAN_OBJECT (GCObject *object, SgenDescriptor desc, SgenGrayQueue *queue)
+SERIAL_SCAN_OBJECT (GCObject *full_object, SgenDescriptor desc, SgenGrayQueue *queue)
{
- char *start = (char*)object;
+ char *start = (char*)full_object;
SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
IS_VTABLE_MATCH (FALSE)
END_PROTOCOL_ENTRY_HEAVY
+BEGIN_PROTOCOL_ENTRY_HEAVY4 (binary_protocol_mod_union_remset, TYPE_POINTER, obj, TYPE_POINTER, ptr, TYPE_POINTER, value, TYPE_POINTER, value_vtable)
+DEFAULT_PRINT ()
+IS_ALWAYS_MATCH (FALSE)
+MATCH_INDEX (ptr == entry->obj ? 0 : ptr == entry->ptr ? 1 : ptr == entry->value ? 2 : BINARY_PROTOCOL_NO_MATCH)
+IS_VTABLE_MATCH (ptr == entry->value_vtable)
+END_PROTOCOL_ENTRY_HEAVY
+
#undef BEGIN_PROTOCOL_ENTRY0
#undef BEGIN_PROTOCOL_ENTRY1
#undef BEGIN_PROTOCOL_ENTRY2