static guint64 time_major_pinning = 0;
static guint64 time_major_scan_pinned = 0;
static guint64 time_major_scan_roots = 0;
-static guint64 time_major_scan_mod_union = 0;
+static guint64 time_major_scan_mod_union_blocks = 0;
+static guint64 time_major_scan_mod_union_los = 0;
static guint64 time_major_finish_gray_stack = 0;
static guint64 time_major_free_bigobjs = 0;
static guint64 time_major_los_sweep = 0;
static void
gray_queue_redirect (SgenGrayQueue *queue)
{
- sgen_workers_take_from_queue (queue);
+ sgen_workers_take_from_queue (current_collection_generation, queue);
}
void
mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
mono_counters_register ("Major scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_roots);
- mono_counters_register ("Major scan mod union", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union);
+ mono_counters_register ("Major scan mod union blocks", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union_blocks);
+ mono_counters_register ("Major scan mod union los", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union_los);
mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
* object ops changes, like forced concurrent finish.
*/
SGEN_ASSERT (0, sgen_workers_is_worker_thread (mono_native_thread_id_get ()), "We need a context for the scan job");
- job->ops = sgen_workers_get_idle_func_object_ops ();
+ job->ops = sgen_workers_get_idle_func_object_ops (worker_data);
}
return CONTEXT_FROM_OBJECT_OPERATIONS (job->ops, sgen_workers_get_job_gray_queue (worker_data, job->gc_thread_gray_queue));
major_collector.scan_card_table (CARDTABLE_SCAN_GLOBAL, ctx, job_data->job_index, job_data->job_split_count);
SGEN_TV_GETTIME (btv);
time_minor_scan_major_blocks += SGEN_TV_ELAPSED (atv, btv);
+
+ if (worker_data_untyped)
+ ((WorkerData*)worker_data_untyped)->major_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
sgen_los_scan_card_table (CARDTABLE_SCAN_GLOBAL, ctx, job_data->job_index, job_data->job_split_count);
SGEN_TV_GETTIME (btv);
time_minor_scan_los += SGEN_TV_ELAPSED (atv, btv);
+
+ if (worker_data_untyped)
+ ((WorkerData*)worker_data_untyped)->los_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
job_scan_major_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
{
+ SGEN_TV_DECLARE (atv);
+ SGEN_TV_DECLARE (btv);
ParallelScanJob *job_data = (ParallelScanJob*)job;
ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
g_assert (concurrent_collection_in_progress);
+ SGEN_TV_GETTIME (atv);
major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx, job_data->job_index, job_data->job_split_count);
+ SGEN_TV_GETTIME (btv);
+ time_major_scan_mod_union_blocks += SGEN_TV_ELAPSED (atv, btv);
+
+ if (worker_data_untyped)
+ ((WorkerData*)worker_data_untyped)->major_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
job_scan_los_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob *job)
{
+ SGEN_TV_DECLARE (atv);
+ SGEN_TV_DECLARE (btv);
ParallelScanJob *job_data = (ParallelScanJob*)job;
ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
g_assert (concurrent_collection_in_progress);
+ SGEN_TV_GETTIME (atv);
sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx, job_data->job_index, job_data->job_split_count);
+ SGEN_TV_GETTIME (btv);
+ time_major_scan_mod_union_los += SGEN_TV_ELAPSED (atv, btv);
+
+ if (worker_data_untyped)
+ ((WorkerData*)worker_data_untyped)->los_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
job_major_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
{
+ SGEN_TV_DECLARE (atv);
+ SGEN_TV_DECLARE (btv);
ParallelScanJob *job_data = (ParallelScanJob*)job;
ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
g_assert (concurrent_collection_in_progress);
-
+ SGEN_TV_GETTIME (atv);
major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx, job_data->job_index, job_data->job_split_count);
+ SGEN_TV_GETTIME (btv);
+
+ g_assert (worker_data_untyped);
+ ((WorkerData*)worker_data_untyped)->major_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
job_los_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
{
+ SGEN_TV_DECLARE (atv);
+ SGEN_TV_DECLARE (btv);
ParallelScanJob *job_data = (ParallelScanJob*)job;
ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
g_assert (concurrent_collection_in_progress);
-
+ SGEN_TV_GETTIME (atv);
sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx, job_data->job_index, job_data->job_split_count);
+ SGEN_TV_GETTIME (btv);
+
+ g_assert (worker_data_untyped);
+ ((WorkerData*)worker_data_untyped)->los_scan_time += SGEN_TV_ELAPSED (atv, btv);
}
static void
{
ParallelScanJob *psj;
ScanJob *sj;
- int split_count = sgen_workers_get_job_split_count ();
+ int split_count = sgen_workers_get_job_split_count (GENERATION_OLD);
int i;
/* Mod union preclean jobs */
for (i = 0; i < split_count; i++) {
psj->scan_job.gc_thread_gray_queue = NULL;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, TRUE);
+ sgen_workers_enqueue_job (GENERATION_OLD, &psj->scan_job.job, TRUE);
}
for (i = 0; i < split_count; i++) {
psj->scan_job.gc_thread_gray_queue = NULL;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, TRUE);
+ sgen_workers_enqueue_job (GENERATION_OLD, &psj->scan_job.job, TRUE);
}
sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan last pinned", job_scan_last_pinned, sizeof (ScanJob));
sj->gc_thread_gray_queue = NULL;
- sgen_workers_enqueue_job (&sj->job, TRUE);
+ sgen_workers_enqueue_job (GENERATION_OLD, &sj->job, TRUE);
}
static void
static void
enqueue_scan_remembered_set_jobs (SgenGrayQueue *gc_thread_gray_queue, SgenObjectOperations *ops, gboolean enqueue)
{
- int i, split_count = sgen_workers_get_job_split_count ();
+ int i, split_count = sgen_workers_get_job_split_count (GENERATION_NURSERY);
ScanJob *sj;
sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan wbroots", job_scan_wbroots, sizeof (ScanJob));
sj->ops = ops;
sj->gc_thread_gray_queue = gc_thread_gray_queue;
- sgen_workers_enqueue_job (&sj->job, enqueue);
+ sgen_workers_enqueue_job (GENERATION_NURSERY, &sj->job, enqueue);
for (i = 0; i < split_count; i++) {
ParallelScanJob *psj;
psj->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (GENERATION_NURSERY, &psj->scan_job.job, enqueue);
psj = (ParallelScanJob*)sgen_thread_pool_job_alloc ("scan LOS remsets", job_scan_los_card_table, sizeof (ParallelScanJob));
psj->scan_job.ops = ops;
psj->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (GENERATION_NURSERY, &psj->scan_job.job, enqueue);
}
}
scrrj->heap_start = heap_start;
scrrj->heap_end = heap_end;
scrrj->root_type = ROOT_TYPE_NORMAL;
- sgen_workers_enqueue_job (&scrrj->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (current_collection_generation, &scrrj->scan_job.job, enqueue);
if (current_collection_generation == GENERATION_OLD) {
/* During minors we scan the cardtable for these roots instead */
scrrj->heap_start = heap_start;
scrrj->heap_end = heap_end;
scrrj->root_type = ROOT_TYPE_WBARRIER;
- sgen_workers_enqueue_job (&scrrj->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (current_collection_generation, &scrrj->scan_job.job, enqueue);
}
/* Threads */
stdj->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
stdj->heap_start = heap_start;
stdj->heap_end = heap_end;
- sgen_workers_enqueue_job (&stdj->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (current_collection_generation, &stdj->scan_job.job, enqueue);
/* Scan the list of objects ready for finalization. */
sfej->scan_job.ops = ops;
sfej->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
sfej->queue = &fin_ready_queue;
- sgen_workers_enqueue_job (&sfej->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (current_collection_generation, &sfej->scan_job.job, enqueue);
sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan critical finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
sfej->scan_job.ops = ops;
sfej->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
sfej->queue = &critical_fin_queue;
- sgen_workers_enqueue_job (&sfej->scan_job.job, enqueue);
+ sgen_workers_enqueue_job (current_collection_generation, &sfej->scan_job.job, enqueue);
}
/*
TV_DECLARE (btv);
SGEN_TV_DECLARE (last_minor_collection_start_tv);
SGEN_TV_DECLARE (last_minor_collection_end_tv);
+ guint64 major_scan_start = time_minor_scan_major_blocks;
+ guint64 los_scan_start = time_minor_scan_los;
+ guint64 finish_gray_start = time_minor_finish_gray_stack;
if (disable_minor_collections)
return TRUE;
binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
- if (sgen_concurrent_collection_in_progress ()) {
- /* FIXME Support parallel nursery collections with concurrent major */
- object_ops_nopar = &sgen_minor_collector.serial_ops_with_concurrent_major;
- } else {
- object_ops_nopar = &sgen_minor_collector.serial_ops;
- if (sgen_minor_collector.is_parallel && sgen_nursery_size >= SGEN_PARALLEL_MINOR_MIN_NURSERY_SIZE) {
- object_ops_par = &sgen_minor_collector.parallel_ops;
- is_parallel = TRUE;
- }
+ object_ops_nopar = sgen_concurrent_collection_in_progress ()
+ ? &sgen_minor_collector.serial_ops_with_concurrent_major
+ : &sgen_minor_collector.serial_ops;
+ if (sgen_minor_collector.is_parallel && sgen_nursery_size >= SGEN_PARALLEL_MINOR_MIN_NURSERY_SIZE) {
+ object_ops_par = sgen_concurrent_collection_in_progress ()
+ ? &sgen_minor_collector.parallel_ops_with_concurrent_major
+ : &sgen_minor_collector.parallel_ops;
+ is_parallel = TRUE;
}
if (do_verify_nursery || do_dump_nursery_content)
if (is_parallel) {
gray_queue_redirect (&gc_thread_gray_queue);
- sgen_workers_start_all_workers (object_ops_nopar, object_ops_par, NULL);
- sgen_workers_join ();
+ sgen_workers_start_all_workers (GENERATION_NURSERY, object_ops_nopar, object_ops_par, NULL);
+ sgen_workers_join (GENERATION_NURSERY);
}
TV_GETTIME (btv);
current_collection_generation = -1;
objects_pinned = 0;
+ if (is_parallel)
+ binary_protocol_collection_end_stats (0, 0, time_minor_finish_gray_stack - finish_gray_start);
+ else
+ binary_protocol_collection_end_stats (
+ time_minor_scan_major_blocks - major_scan_start,
+ time_minor_scan_los - los_scan_start,
+ time_minor_finish_gray_stack - finish_gray_start);
+
binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
if (object_ops_par != NULL)
- sgen_workers_set_num_active_workers (0);
- if (sgen_workers_have_idle_work ()) {
+ sgen_workers_set_num_active_workers (GENERATION_OLD, 0);
+ if (object_ops_par == NULL && sgen_workers_have_idle_work (GENERATION_OLD)) {
/*
* We force the finish of the worker with the new object ops context
- * which can also do copying. We need to have finished pinning.
+ * which can also do copying. We need to have finished pinning. On the
+ * parallel collector, there is no need to drain the private queues
+ * here, since we can do it as part of the finishing work, achieving
+ * better work distribution.
*/
- sgen_workers_start_all_workers (object_ops_nopar, object_ops_par, NULL);
+ sgen_workers_start_all_workers (GENERATION_OLD, object_ops_nopar, object_ops_par, NULL);
- sgen_workers_join ();
+ sgen_workers_join (GENERATION_OLD);
}
}
* the roots.
*/
if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
- sgen_workers_set_num_active_workers (1);
+ sgen_workers_set_num_active_workers (GENERATION_OLD, 1);
gray_queue_redirect (gc_thread_gray_queue);
if (precleaning_enabled) {
- sgen_workers_start_all_workers (object_ops_nopar, object_ops_par, workers_finish_callback);
+ sgen_workers_start_all_workers (GENERATION_OLD, object_ops_nopar, object_ops_par, workers_finish_callback);
} else {
- sgen_workers_start_all_workers (object_ops_nopar, object_ops_par, NULL);
+ sgen_workers_start_all_workers (GENERATION_OLD, object_ops_nopar, object_ops_par, NULL);
}
}
if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
- int i, split_count = sgen_workers_get_job_split_count ();
+ int i, split_count = sgen_workers_get_job_split_count (GENERATION_OLD);
gboolean parallel = object_ops_par != NULL;
/* If we're not parallel we finish the collection on the gc thread */
psj->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, parallel);
+ sgen_workers_enqueue_job (GENERATION_OLD, &psj->scan_job.job, parallel);
psj = (ParallelScanJob*)sgen_thread_pool_job_alloc ("scan LOS mod union cardtable", job_scan_los_mod_union_card_table, sizeof (ParallelScanJob));
psj->scan_job.ops = parallel ? NULL : object_ops_nopar;
psj->scan_job.gc_thread_gray_queue = gc_thread_gray_queue;
psj->job_index = i;
psj->job_split_count = split_count;
- sgen_workers_enqueue_job (&psj->scan_job.job, parallel);
+ sgen_workers_enqueue_job (GENERATION_OLD, &psj->scan_job.job, parallel);
}
if (parallel) {
* stack that contained roots and pinned objects and also scan the mod union card
* table.
*/
- sgen_workers_start_all_workers (object_ops_nopar, object_ops_par, NULL);
- sgen_workers_join ();
+ sgen_workers_start_all_workers (GENERATION_OLD, object_ops_nopar, object_ops_par, NULL);
+ sgen_workers_join (GENERATION_OLD);
}
}
current_collection_generation = GENERATION_OLD;
- sgen_workers_assert_gray_queue_is_empty ();
+ sgen_workers_assert_gray_queue_is_empty (GENERATION_OLD);
if (!concurrent)
sgen_cement_reset ();
mword fragment_total;
TV_DECLARE (atv);
TV_DECLARE (btv);
-
- TV_GETTIME (btv);
+ guint64 major_scan_start = time_major_scan_mod_union_blocks;
+ guint64 los_scan_start = time_major_scan_mod_union_los;
+ guint64 finish_gray_start = time_major_finish_gray_stack;
if (concurrent_collection_in_progress) {
SgenObjectOperations *object_ops_par = NULL;
object_ops_nopar = &major_collector.major_ops_serial;
}
- sgen_workers_assert_gray_queue_is_empty ();
+ sgen_workers_assert_gray_queue_is_empty (GENERATION_OLD);
+ TV_GETTIME (btv);
finish_gray_stack (GENERATION_OLD, CONTEXT_FROM_OBJECT_OPERATIONS (object_ops_nopar, gc_thread_gray_queue));
TV_GETTIME (atv);
time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
memset (&counts, 0, sizeof (ScannedObjectCounts));
major_collector.finish_major_collection (&counts);
- sgen_workers_assert_gray_queue_is_empty ();
+ sgen_workers_assert_gray_queue_is_empty (GENERATION_OLD);
SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
if (concurrent_collection_in_progress)
binary_protocol_flush_buffers (FALSE);
//consistency_check ();
+ if (major_collector.is_parallel)
+ binary_protocol_collection_end_stats (0, 0, time_major_finish_gray_stack - finish_gray_start);
+ else
+ binary_protocol_collection_end_stats (
+ time_major_scan_mod_union_blocks - major_scan_start,
+ time_major_scan_mod_union_los - los_scan_start,
+ time_major_finish_gray_stack - finish_gray_start);
binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
}
* The workers will be resumed with a finishing pause context to avoid
* additional cardtable and object scanning.
*/
- sgen_workers_stop_all_workers ();
+ sgen_workers_stop_all_workers (GENERATION_OLD);
SGEN_TV_GETTIME (time_major_conc_collection_end);
gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
/*
* Use concurrent major and dynamic nursery with a more
* aggressive shrinking relative to pause times.
- * FIXME use parallel minors
*/
- minor = SGEN_MINOR_SIMPLE;
+ minor = SGEN_MINOR_SIMPLE_PARALLEL;
major = SGEN_MAJOR_CONCURRENT;
dynamic_nursery = TRUE;
sgen_max_pause_margin = SGEN_PAUSE_MODE_MAX_PAUSE_MARGIN;
if (major_collector.post_param_init)
major_collector.post_param_init (&major_collector);
- if (major_collector.is_concurrent || sgen_minor_collector.is_parallel) {
- int num_workers = 1;
- if (major_collector.is_parallel || sgen_minor_collector.is_parallel) {
- num_workers = mono_cpu_count ();
- if (num_workers <= 1) {
- num_workers = 1;
- major_collector.is_parallel = FALSE;
- sgen_minor_collector.is_parallel = FALSE;
- }
- }
- if (major_collector.is_concurrent || sgen_minor_collector.is_parallel)
- sgen_workers_init (num_workers, (SgenWorkerCallback) major_collector.worker_init_cb);
- }
+ sgen_thread_pool_start ();
sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);