each collection */
static gboolean do_scan_starts_check = FALSE;
-/*
- * If the major collector is concurrent and this is FALSE, we will
- * never initiate a synchronous major collection, unless requested via
- * GC.Collect().
- */
-static gboolean allow_synchronous_major = TRUE;
static gboolean disable_minor_collections = FALSE;
static gboolean disable_major_collections = FALSE;
static gboolean do_verify_nursery = FALSE;
static gboolean do_dump_nursery_content = FALSE;
static gboolean enable_nursery_canaries = FALSE;
+static gboolean precleaning_enabled = TRUE;
+
#ifdef HEAVY_STATISTICS
guint64 stat_objects_alloced_degraded = 0;
guint64 stat_bytes_alloced_degraded = 0;
* ######## Global data.
* ######################################################################
*/
-LOCK_DECLARE (gc_mutex);
+MonoCoopMutex gc_mutex;
gboolean sgen_try_free_some_memory;
#define SCAN_START_SIZE SGEN_SCAN_START_SIZE
static volatile mword lowest_heap_address = ~(mword)0;
static volatile mword highest_heap_address = 0;
-LOCK_DECLARE (sgen_interruption_mutex);
+MonoCoopMutex sgen_interruption_mutex;
int current_collection_generation = -1;
static volatile gboolean concurrent_collection_in_progress = FALSE;
GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
if (!section)
break;
- sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
+ sgen_section_gray_queue_enqueue ((SgenSectionGrayQueue *)queue->alloc_prepare_data, section);
wake = TRUE;
}
}
if (allow_flags) {
- if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
+ if (!(obj = (char *)SGEN_OBJECT_IS_FORWARDED (start)))
obj = start;
} else {
obj = start;
* lock must be held. For serial collectors that is not necessary.
*/
void
-sgen_add_to_global_remset (gpointer ptr, gpointer obj)
+sgen_add_to_global_remset (gpointer ptr, GCObject *obj)
{
SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
* frequently after each object is copied, to achieve better locality and cache
* usage.
*
- * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
- * empty.
*/
gboolean
-sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
+sgen_drain_gray_stack (ScanCopyContext ctx)
{
ScanObjectFunc scan_func = ctx.ops->scan_object;
GrayQueue *queue = ctx.queue;
- if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
- return major_collector.drain_gray_stack (ctx);
+ if (ctx.ops->drain_gray_stack)
+ return ctx.ops->drain_gray_stack (queue);
- do {
- int i;
- for (i = 0; i != max_objs; ++i) {
- GCObject *obj;
- SgenDescriptor desc;
- GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
- if (!obj)
- return TRUE;
- SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
- scan_func (obj, desc, queue);
- }
- } while (max_objs < 0);
+ for (;;) {
+ GCObject *obj;
+ SgenDescriptor desc;
+ GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
+ if (!obj)
+ return TRUE;
+ SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
+ scan_func (obj, desc, queue);
+ }
return FALSE;
}
{
int count = 0;
+ SGEN_ASSERT (0, ((mword)start & (SIZEOF_VOID_P - 1)) == 0, "Why are we scanning for references in unaligned memory ?");
+
#if defined(VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE) && !defined(_WIN64)
VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
#endif
RootRecord *root;
SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
/* objects pinned from the API are inside these roots */
- SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
+ SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], void **, start_root, RootRecord *, root) {
SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
sgen_conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
} SGEN_HASH_TABLE_FOREACH_END;
static void
single_arg_user_copy_or_mark (GCObject **obj, void *gc_data)
{
- ScanCopyContext *ctx = gc_data;
+ ScanCopyContext *ctx = (ScanCopyContext *)gc_data;
ctx->ops->copy_or_mark_object (obj, ctx->queue);
}
}
return;
case ROOT_DESC_COMPLEX: {
- gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
+ gsize *bitmap_data = (gsize *)sgen_get_complex_descriptor_bitmap (desc);
gsize bwords = (*bitmap_data) - 1;
void **start_run = start_root;
bitmap_data++;
* objects in the existing nursery.
*/
/* FIXME: handle OOM */
- section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
+ section = (GCMemSection *)sgen_alloc_internal (INTERNAL_MEM_SECTION);
alloc_size = sgen_nursery_size;
/* If there isn't enough space even for the nursery we should simply abort. */
g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
- data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
+ data = (char *)major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)sgen_gc_get_total_heap_allocation ());
section->data = section->next_data = data;
section->size = alloc_size;
section->end_data = data + sgen_nursery_size;
scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
- section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
+ section->scan_starts = (char **)sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
section->num_scan_start = scan_starts;
nursery_section = section;
size_t i;
for (i = 0; i < fin_queue->next_slot; ++i) {
- GCObject *obj = fin_queue->data [i];
+ GCObject *obj = (GCObject *)fin_queue->data [i];
if (!obj)
continue;
SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
SgenGrayQueue *queue = ctx.queue;
+ binary_protocol_finish_gray_stack_start (sgen_timestamp (), generation);
/*
* We copied all the reachable objects. Now it's the time to copy
* the objects that were not referenced by the roots, but by the copied objects.
* To achieve better cache locality and cache usage, we drain the gray stack
* frequently, after each object is copied, and just finish the work here.
*/
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
TV_GETTIME (atv);
SGEN_LOG (2, "%s generation done", generation_name (generation));
done_with_ephemerons = 0;
do {
done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
++ephemeron_rounds;
} while (!done_with_ephemerons);
if (sgen_client_bridge_need_processing ()) {
/*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
sgen_collect_bridge_objects (generation, ctx);
if (generation == GENERATION_OLD)
sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
Make sure we drain the gray stack before processing disappearing links and finalizers.
If we don't make sure it is empty we might wrongly see a live object as dead.
*/
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
/*
We must clear weak links that don't track resurrection before processing object ready for
sgen_finalize_in_range (GENERATION_NURSERY, ctx);
/* drain the new stack that might have been created */
SGEN_LOG (6, "Precise scan of gray area post fin");
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
/*
* This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
done_with_ephemerons = 0;
do {
done_with_ephemerons = sgen_client_mark_ephemerons (ctx);
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
++ephemeron_rounds;
} while (!done_with_ephemerons);
sgen_client_clear_togglerefs (start_addr, end_addr, ctx);
TV_GETTIME (btv);
- SGEN_LOG (2, "Finalize queue handling scan for %s generation: %ld usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
+ SGEN_LOG (2, "Finalize queue handling scan for %s generation: %lld usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
/*
* handle disappearing links
sgen_null_link_in_range (GENERATION_NURSERY, ctx, TRUE);
if (sgen_gray_object_queue_is_empty (queue))
break;
- sgen_drain_gray_stack (-1, ctx);
+ sgen_drain_gray_stack (ctx);
}
g_assert (sgen_gray_object_queue_is_empty (queue));
sgen_gray_object_queue_trim_free_list (queue);
+ binary_protocol_finish_gray_stack_end (sgen_timestamp (), generation);
}
void
{
void **start_root;
RootRecord *root;
- SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
+ SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], void **, start_root, RootRecord *, root) {
SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
} SGEN_HASH_TABLE_FOREACH_END;
static void
job_remembered_set_scan (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanJob *job_data = (ScanJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
remset.scan_remsets (ctx);
static void
job_scan_from_registered_roots (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanFromRegisteredRootsJob *job_data = (ScanFromRegisteredRootsJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
static void
job_scan_thread_data (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanThreadDataJob *job_data = (ScanThreadDataJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
static void
job_scan_finalizer_entries (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanFinalizerEntriesJob *job_data = (ScanFinalizerEntriesJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
static void
job_scan_major_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanJob *job_data = (ScanJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
g_assert (concurrent_collection_in_progress);
- major_collector.scan_card_table (TRUE, ctx);
+ major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
}
static void
job_scan_los_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
{
- WorkerData *worker_data = worker_data_untyped;
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
ScanJob *job_data = (ScanJob*)job;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
g_assert (concurrent_collection_in_progress);
- sgen_los_scan_card_table (TRUE, ctx);
+ sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
+}
+
+static void
+job_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
+ ScanJob *job_data = (ScanJob*)job;
+ ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+ g_assert (concurrent_collection_in_progress);
+
+ major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
+ sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
}
static void
-init_gray_queue (void)
+init_gray_queue (gboolean use_workers)
{
- if (sgen_collection_is_concurrent ())
+ if (use_workers)
sgen_workers_init_distribute_gray_queue ();
sgen_gray_object_queue_init (&gray_queue, NULL);
}
static void
-enqueue_scan_from_roots_jobs (char *heap_start, char *heap_end, SgenObjectOperations *ops)
+enqueue_scan_from_roots_jobs (char *heap_start, char *heap_end, SgenObjectOperations *ops, gboolean enqueue)
{
ScanFromRegisteredRootsJob *scrrj;
ScanThreadDataJob *stdj;
scrrj->heap_start = heap_start;
scrrj->heap_end = heap_end;
scrrj->root_type = ROOT_TYPE_NORMAL;
- sgen_workers_enqueue_job (&scrrj->job);
+ sgen_workers_enqueue_job (&scrrj->job, enqueue);
scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots wbarrier", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
scrrj->ops = ops;
scrrj->heap_start = heap_start;
scrrj->heap_end = heap_end;
scrrj->root_type = ROOT_TYPE_WBARRIER;
- sgen_workers_enqueue_job (&scrrj->job);
+ sgen_workers_enqueue_job (&scrrj->job, enqueue);
/* Threads */
stdj = (ScanThreadDataJob*)sgen_thread_pool_job_alloc ("scan thread data", job_scan_thread_data, sizeof (ScanThreadDataJob));
stdj->heap_start = heap_start;
stdj->heap_end = heap_end;
- sgen_workers_enqueue_job (&stdj->job);
+ sgen_workers_enqueue_job (&stdj->job, enqueue);
/* Scan the list of objects ready for finalization. */
sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
sfej->queue = &fin_ready_queue;
sfej->ops = ops;
- sgen_workers_enqueue_job (&sfej->job);
+ sgen_workers_enqueue_job (&sfej->job, enqueue);
sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan critical finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
sfej->queue = &critical_fin_queue;
sfej->ops = ops;
- sgen_workers_enqueue_job (&sfej->job);
+ sgen_workers_enqueue_job (&sfej->job, enqueue);
}
/*
sgen_memgov_minor_collection_start ();
- init_gray_queue ();
+ init_gray_queue (FALSE);
gc_stats.minor_gc_count ++;
TV_GETTIME (atv);
time_minor_pinning += TV_ELAPSED (btv, atv);
- SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
+ SGEN_LOG (2, "Finding pinned pointers: %zd in %lld usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
- /*
- * FIXME: When we finish a concurrent collection we do a nursery collection first,
- * as part of which we scan the card table. Then, later, we scan the mod union
- * cardtable. We should only have to do one.
- */
sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan remset", job_remembered_set_scan, sizeof (ScanJob));
sj->ops = object_ops;
- sgen_workers_enqueue_job (&sj->job);
+ sgen_workers_enqueue_job (&sj->job, FALSE);
/* we don't have complete write barrier yet, so we scan all the old generation sections */
TV_GETTIME (btv);
time_minor_scan_remsets += TV_ELAPSED (atv, btv);
- SGEN_LOG (2, "Old generation scan: %ld usecs", TV_ELAPSED (atv, btv));
+ SGEN_LOG (2, "Old generation scan: %lld usecs", TV_ELAPSED (atv, btv));
sgen_pin_stats_print_class_stats ();
- sgen_drain_gray_stack (-1, ctx);
-
/* FIXME: Why do we do this at this specific, seemingly random, point? */
sgen_client_collecting_minor (&fin_ready_queue, &critical_fin_queue);
TV_GETTIME (atv);
time_minor_scan_pinned += TV_ELAPSED (btv, atv);
- enqueue_scan_from_roots_jobs (sgen_get_nursery_start (), nursery_next, object_ops);
+ enqueue_scan_from_roots_jobs (sgen_get_nursery_start (), nursery_next, object_ops, FALSE);
TV_GETTIME (btv);
time_minor_scan_roots += TV_ELAPSED (atv, btv);
sgen_client_binary_protocol_reclaim_end (GENERATION_NURSERY);
TV_GETTIME (btv);
time_minor_fragment_creation += TV_ELAPSED (atv, btv);
- SGEN_LOG (2, "Fragment creation: %ld usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
+ SGEN_LOG (2, "Fragment creation: %lld usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
if (consistency_check_at_minor_collection)
sgen_check_major_refs ();
sgen_nursery_alloc_prepare_for_major ();
}
- init_gray_queue ();
+ init_gray_queue (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
TV_GETTIME (atv);
sgen_client_pre_collection_checks ();
- if (!concurrent) {
+ if (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
/* Remsets are not useful for a major collection */
remset.clear_cards ();
}
sgen_init_pinning ();
SGEN_LOG (6, "Collecting pinned addresses");
pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, ctx);
-
+ if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
+ /* Pin cemented objects that were forced */
+ sgen_pin_cemented_objects ();
+ }
sgen_optimize_pin_queue ();
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+ /*
+ * Cemented objects that are in the pinned list will be marked. When
+ * marking concurrently we won't mark mod-union cards for these objects.
+ * Instead they will remain cemented until the next major collection,
+ * when we will recheck if they are still pinned in the roots.
+ */
+ sgen_cement_force_pinned ();
+ }
sgen_client_collecting_major_1 ();
TV_GETTIME (btv);
time_major_pinning += TV_ELAPSED (atv, btv);
- SGEN_LOG (2, "Finding pinned pointers: %zd in %ld usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
+ SGEN_LOG (2, "Finding pinned pointers: %zd in %lld usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
major_collector.init_to_space ();
- /*
- * The concurrent collector doesn't move objects, neither on
- * the major heap nor in the nursery, so we can mark even
- * before pinning has finished. For the non-concurrent
- * collector we start the workers after pinning.
- */
- if (mode != COPY_OR_MARK_FROM_ROOTS_SERIAL) {
- SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
- sgen_workers_start_all_workers (object_ops);
- gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
+ if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
+ if (sgen_workers_have_idle_work ()) {
+ /*
+ * We force the finish of the worker with the new object ops context
+ * which can also do copying. We need to have finished pinning.
+ */
+ sgen_workers_start_all_workers (object_ops, NULL);
+ sgen_workers_join ();
+ }
}
#ifdef SGEN_DEBUG_INTERNAL_ALLOC
sgen_client_collecting_major_3 (&fin_ready_queue, &critical_fin_queue);
- /*
- * FIXME: is this the right context? It doesn't seem to contain a copy function
- * unless we're concurrent.
- */
- enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops);
+ enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops, FALSE);
TV_GETTIME (btv);
time_major_scan_roots += TV_ELAPSED (atv, btv);
+ /*
+ * We start the concurrent worker after pinning and after we scanned the roots
+ * in order to make sure that the worker does not finish before handling all
+ * the roots.
+ */
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+ if (precleaning_enabled) {
+ ScanJob *sj;
+ /* Mod union preclean job */
+ sj = (ScanJob*)sgen_thread_pool_job_alloc ("preclean mod union cardtable", job_mod_union_preclean, sizeof (ScanJob));
+ sj->ops = object_ops;
+ sgen_workers_start_all_workers (object_ops, &sj->job);
+ } else {
+ sgen_workers_start_all_workers (object_ops, NULL);
+ }
+ gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ }
+
if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
ScanJob *sj;
/* Mod union card table */
sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan mod union cardtable", job_scan_major_mod_union_card_table, sizeof (ScanJob));
sj->ops = object_ops;
- sgen_workers_enqueue_job (&sj->job);
+ sgen_workers_enqueue_job (&sj->job, FALSE);
sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan LOS mod union cardtable", job_scan_los_mod_union_card_table, sizeof (ScanJob));
sj->ops = object_ops;
- sgen_workers_enqueue_job (&sj->job);
+ sgen_workers_enqueue_job (&sj->job, FALSE);
TV_GETTIME (atv);
time_major_scan_mod_union += TV_ELAPSED (btv, atv);
static void
major_finish_copy_or_mark (CopyOrMarkFromRootsMode mode)
{
- switch (mode) {
- case COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT:
- /*
- * Prepare the pin queue for the next collection. Since pinning runs on the worker
- * threads we must wait for the jobs to finish before we can reset it.
- */
- sgen_workers_wait_for_jobs_finished ();
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
sgen_finish_pinning ();
sgen_pin_stats_reset ();
if (do_concurrent_checks)
sgen_debug_check_nursery_is_clean ();
- break;
- case COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT:
- sgen_workers_wait_for_jobs_finished ();
- break;
- case COPY_OR_MARK_FROM_ROOTS_SERIAL:
- break;
- default:
- g_assert_not_reached ();
}
}
major_finish_copy_or_mark (COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT);
- sgen_workers_join ();
-
- SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
-
#ifdef SGEN_DEBUG_INTERNAL_ALLOC
main_gc_thread = NULL;
#endif
object_ops = &major_collector.major_ops_serial;
}
- /*
- * The workers have stopped so we need to finish gray queue
- * work that might result from finalization in the main GC
- * thread. Redirection must therefore be turned off.
- */
- sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
/* all the objects in the heap */
time_major_fragment_creation += TV_ELAPSED (atv, btv);
binary_protocol_sweep_begin (GENERATION_OLD, !major_collector.sweeps_lazily);
+ sgen_memgov_major_pre_sweep ();
TV_GETTIME (atv);
time_major_free_bigobjs += TV_ELAPSED (btv, atv);
binary_protocol_concurrent_finish ();
/*
- * The major collector can add global remsets which are processed in the finishing
- * nursery collection, below. That implies that the workers must have finished
- * marking before the nursery collection is allowed to run, otherwise we might miss
- * some remsets.
+ * We need to stop all workers since we're updating the cardtable below.
+ * The workers will be resumed with a finishing pause context to avoid
+ * additional cardtable and object scanning.
*/
- sgen_workers_wait ();
+ sgen_workers_stop_all_workers ();
SGEN_TV_GETTIME (time_major_conc_collection_end);
gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
* LOCKING: The GC lock MUST be held.
*/
void
-sgen_ensure_free_space (size_t size)
+sgen_ensure_free_space (size_t size, int generation)
{
int generation_to_collect = -1;
const char *reason = NULL;
- if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
+ if (generation == GENERATION_OLD) {
if (sgen_need_major_collection (size)) {
reason = "LOS overflow";
generation_to_collect = GENERATION_OLD;
generation_to_collect = GENERATION_OLD;
}
} else if (sgen_need_major_collection (size)) {
- reason = "Minor allowance";
+ reason = concurrent_collection_in_progress ? "Forced finish concurrent collection" : "Minor allowance";
generation_to_collect = GENERATION_OLD;
} else {
generation_to_collect = GENERATION_NURSERY;
if (concurrent_collection_in_progress) {
/*
- * We update the concurrent collection. If it finished, we're done. If
- * not, and we've been asked to do a nursery collection, we do that.
+ * If the concurrent worker is finished or we are asked to do a major collection
+ * then we finish the concurrent collection.
*/
- gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
+ gboolean finish = major_should_finish_concurrent_collection () || generation_to_collect == GENERATION_OLD;
if (finish) {
major_finish_concurrent_collection (wait_to_finish);
oldest_generation_collected = GENERATION_OLD;
} else {
+ SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY, "Why aren't we finishing the concurrent collection?");
major_update_concurrent_collection ();
- if (generation_to_collect == GENERATION_NURSERY)
- collect_nursery (NULL, FALSE);
+ collect_nursery (NULL, FALSE);
}
goto done;
}
- /*
- * If we've been asked to do a major collection, and the major collector wants to
- * run synchronously (to evacuate), we set the flag to do that.
- */
- if (generation_to_collect == GENERATION_OLD &&
- allow_synchronous_major &&
- major_collector.want_synchronous_collection &&
- *major_collector.want_synchronous_collection) {
- wait_to_finish = TRUE;
- }
-
SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
/*
if (!sgen_pointer_queue_is_empty (&fin_ready_queue)) {
pending_unqueued_finalizer = TRUE;
mono_memory_write_barrier ();
- obj = sgen_pointer_queue_pop (&fin_ready_queue);
+ obj = (GCObject *)sgen_pointer_queue_pop (&fin_ready_queue);
} else if (!sgen_pointer_queue_is_empty (&critical_fin_queue)) {
pending_unqueued_finalizer = TRUE;
mono_memory_write_barrier ();
- obj = sgen_pointer_queue_pop (&critical_fin_queue);
+ obj = (GCObject *)sgen_pointer_queue_pop (&critical_fin_queue);
} else {
obj = NULL;
}
int i;
LOCK_GC;
for (i = 0; i < ROOT_TYPE_NUM; ++i) {
- RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
+ RootRecord *root = (RootRecord *)sgen_hash_table_lookup (&roots_hash [i], start);
/* we allow changing the size and the descriptor (for thread statics etc) */
if (root) {
size_t old_size = root->end_root - start;
SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
- InterlockedWritePointer (ptr, value);
+ InterlockedWritePointer ((volatile gpointer *)ptr, value);
if (ptr_in_nursery (value) || concurrent_collection_in_progress)
mono_gc_wbarrier_generic_nostore (ptr);
void
sgen_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
{
- GCObject **dest = _dest;
- GCObject **src = _src;
+ GCObject **dest = (GCObject **)_dest;
+ GCObject **src = (GCObject **)_src;
while (size) {
if (bitmap & 0x1)
return tot;
}
-gboolean
-sgen_set_allow_synchronous_major (gboolean flag)
-{
- if (!major_collector.is_concurrent)
- return flag;
-
- allow_synchronous_major = flag;
- return TRUE;
-}
-
void
sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
{
return;
case -1:
/* being inited by another thread */
- g_usleep (1000);
+ mono_thread_info_usleep (1000);
break;
case 0:
/* we will init it */
mono_thread_smr_init ();
#endif
- LOCK_INIT (gc_mutex);
+ mono_coop_mutex_init (&gc_mutex);
gc_debug_file = stderr;
- LOCK_INIT (sgen_interruption_mutex);
+ mono_coop_mutex_init (&sgen_interruption_mutex);
if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
opts = g_strsplit (env, ",", -1);
}
continue;
}
- if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
- if (!major_collector.is_concurrent) {
- sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
- continue;
- }
-
- opt = strchr (opt, '=') + 1;
-
- if (!strcmp (opt, "yes")) {
- allow_synchronous_major = TRUE;
- } else if (!strcmp (opt, "no")) {
- allow_synchronous_major = FALSE;
- } else {
- sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
- continue;
- }
- }
if (!strcmp (opt, "cementing")) {
cement_enabled = TRUE;
continue;
}
+ if (!strcmp (opt, "precleaning")) {
+ precleaning_enabled = TRUE;
+ continue;
+ }
+ if (!strcmp (opt, "no-precleaning")) {
+ precleaning_enabled = FALSE;
+ continue;
+ }
+
if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
continue;
fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
fprintf (stderr, " [no-]cementing\n");
- if (major_collector.is_concurrent)
- fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
if (major_collector.print_gc_param_usage)
major_collector.print_gc_param_usage ();
if (sgen_minor_collector.print_gc_param_usage)
} else if (g_str_has_prefix (opt, "binary-protocol=")) {
char *filename = strchr (opt, '=') + 1;
char *colon = strrchr (filename, ':');
- size_t limit = -1;
+ size_t limit = 0;
if (colon) {
if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
void
sgen_gc_lock (void)
{
- LOCK_GC;
+ mono_coop_mutex_lock (&gc_mutex);
}
void
{
gboolean try_free = sgen_try_free_some_memory;
sgen_try_free_some_memory = FALSE;
- mono_mutex_unlock (&gc_mutex);
+ mono_coop_mutex_unlock (&gc_mutex);
if (try_free)
mono_thread_hazardous_try_free_some ();
}
SGEN_ASSERT (0, !world_is_stopped, "Why are we stopping a stopped world?");
- binary_protocol_world_stopping (generation, sgen_timestamp (), (gpointer)mono_native_thread_id_get ());
+ binary_protocol_world_stopping (generation, sgen_timestamp (), (gpointer) (gsize) mono_native_thread_id_get ());
sgen_client_stop_world (generation);