[sgen] Disable concurrent queue redirection
[mono.git] / mono / sgen / sgen-gc.c
index 2fdd86c2bf0613f7e75129c417d5a3341f82c1a4..a45a9a106b03f0ce8035bd87202ac7faacf7c972 100644 (file)
@@ -424,12 +424,11 @@ sgen_workers_get_job_gray_queue (WorkerData *worker_data, SgenGrayQueue *default
 }
 
 static void
-gray_queue_enable_redirect (SgenGrayQueue *queue)
+gray_queue_redirect (SgenGrayQueue *queue)
 {
        SGEN_ASSERT (0, concurrent_collection_in_progress, "Where are we redirecting the gray queue to, without a concurrent collection?");
 
-       sgen_gray_queue_set_alloc_prepare (queue, sgen_workers_take_from_queue_and_awake);
-       sgen_workers_take_from_queue_and_awake (queue);
+       sgen_workers_take_from_queue (queue);
 }
 
 void
@@ -1324,6 +1323,11 @@ typedef struct {
        SgenGrayQueue *gc_thread_gray_queue;
 } ScanJob;
 
+typedef struct {
+       ScanJob scan_job;
+       int job_index;
+} ParallelScanJob;
+
 static ScanCopyContext
 scan_copy_context_for_scan_job (void *worker_data_untyped, ScanJob *job)
 {
@@ -1390,7 +1394,7 @@ job_scan_major_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJo
        ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, job_data);
 
        g_assert (concurrent_collection_in_progress);
-       major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
+       major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx, 0, 1);
 }
 
 static void
@@ -1400,23 +1404,72 @@ job_scan_los_mod_union_card_table (void *worker_data_untyped, SgenThreadPoolJob
        ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, job_data);
 
        g_assert (concurrent_collection_in_progress);
-       sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
+       sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx, 0, 1);
+}
+
+static void
+job_major_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       ParallelScanJob *job_data = (ParallelScanJob*)job;
+       ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
+
+       g_assert (concurrent_collection_in_progress);
+
+       major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx, job_data->job_index, sgen_workers_get_job_split_count ());
 }
 
 static void
-job_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
+job_los_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+       ParallelScanJob *job_data = (ParallelScanJob*)job;
+       ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, (ScanJob*)job_data);
+
+       g_assert (concurrent_collection_in_progress);
+
+       sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx, job_data->job_index, sgen_workers_get_job_split_count ());
+}
+
+static void
+job_scan_last_pinned (void *worker_data_untyped, SgenThreadPoolJob *job)
 {
        ScanJob *job_data = (ScanJob*)job;
        ScanCopyContext ctx = scan_copy_context_for_scan_job (worker_data_untyped, job_data);
 
        g_assert (concurrent_collection_in_progress);
 
-       major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
-       sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
-
        sgen_scan_pin_queue_objects (ctx);
 }
 
+static void
+workers_finish_callback (void)
+{
+       ParallelScanJob *psj;
+       ScanJob *sj;
+       int split_count = sgen_workers_get_job_split_count ();
+       int i;
+       /* Mod union preclean jobs */
+       for (i = 0; i < split_count; i++) {
+               psj = (ParallelScanJob*)sgen_thread_pool_job_alloc ("preclean major mod union cardtable", job_major_mod_union_preclean, sizeof (ParallelScanJob));
+               psj->scan_job.ops = sgen_workers_get_idle_func_object_ops ();
+               psj->scan_job.gc_thread_gray_queue = NULL;
+               psj->job_index = i;
+               sgen_workers_enqueue_job (&psj->scan_job.job, TRUE);
+       }
+
+       for (i = 0; i < split_count; i++) {
+               psj = (ParallelScanJob*)sgen_thread_pool_job_alloc ("preclean los mod union cardtable", job_los_mod_union_preclean, sizeof (ParallelScanJob));
+               psj->scan_job.ops = sgen_workers_get_idle_func_object_ops ();
+               psj->scan_job.gc_thread_gray_queue = NULL;
+               psj->job_index = i;
+               sgen_workers_enqueue_job (&psj->scan_job.job, TRUE);
+       }
+
+       sj = (ScanJob*)sgen_thread_pool_job_alloc ("scan last pinned", job_scan_last_pinned, sizeof (ScanJob));
+       sj->ops = sgen_workers_get_idle_func_object_ops ();
+       sj->gc_thread_gray_queue = NULL;
+       sgen_workers_enqueue_job (&sj->job, TRUE);
+}
+
 static void
 init_gray_queue (SgenGrayQueue *gc_thread_gray_queue, gboolean use_workers)
 {
@@ -1819,7 +1872,12 @@ major_copy_or_mark_from_roots (SgenGrayQueue *gc_thread_gray_queue, size_t *old_
                         * We force the finish of the worker with the new object ops context
                         * which can also do copying. We need to have finished pinning.
                         */
-                       sgen_workers_start_all_workers (object_ops, NULL);
+                       /* FIXME Implement parallel copying and get rid of this ineffective hack */
+                       if (major_collector.is_parallel)
+                               sgen_workers_start_all_workers (&major_collector.major_ops_conc_par_start, NULL);
+                       else
+                               sgen_workers_start_all_workers (object_ops, NULL);
+
                        sgen_workers_join ();
                }
        }
@@ -1846,17 +1904,12 @@ major_copy_or_mark_from_roots (SgenGrayQueue *gc_thread_gray_queue, size_t *old_
         * the roots.
         */
        if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+               gray_queue_redirect (gc_thread_gray_queue);
                if (precleaning_enabled) {
-                       ScanJob *sj;
-                       /* Mod union preclean job */
-                       sj = (ScanJob*)sgen_thread_pool_job_alloc ("preclean mod union cardtable", job_mod_union_preclean, sizeof (ScanJob));
-                       sj->ops = object_ops;
-                       sj->gc_thread_gray_queue = NULL;
-                       sgen_workers_start_all_workers (object_ops, &sj->job);
+                       sgen_workers_start_all_workers (object_ops, workers_finish_callback);
                } else {
                        sgen_workers_start_all_workers (object_ops, NULL);
                }
-               gray_queue_enable_redirect (gc_thread_gray_queue);
        }
 
        if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
@@ -1907,7 +1960,11 @@ major_start_collection (SgenGrayQueue *gc_thread_gray_queue, const char *reason,
                g_assert (major_collector.is_concurrent);
                concurrent_collection_in_progress = TRUE;
 
-               object_ops = &major_collector.major_ops_concurrent_start;
+               if (major_collector.is_parallel)
+                       object_ops = &major_collector.major_ops_conc_par_start;
+               else
+                       object_ops = &major_collector.major_ops_concurrent_start;
+
        } else {
                object_ops = &major_collector.major_ops_serial;
        }
@@ -1943,7 +2000,10 @@ major_finish_collection (SgenGrayQueue *gc_thread_gray_queue, const char *reason
        TV_GETTIME (btv);
 
        if (concurrent_collection_in_progress) {
-               object_ops = &major_collector.major_ops_concurrent_finish;
+               if (major_collector.is_parallel)
+                       object_ops = &major_collector.major_ops_conc_par_finish;
+               else
+                       object_ops = &major_collector.major_ops_concurrent_finish;
 
                major_copy_or_mark_from_roots (gc_thread_gray_queue, NULL, COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, object_ops);
 
@@ -2850,6 +2910,8 @@ sgen_gc_init (void)
                sgen_marksweep_init (&major_collector);
        } else if (!strcmp (major_collector_opt, "marksweep-conc")) {
                sgen_marksweep_conc_init (&major_collector);
+       } else if (!strcmp (major_collector_opt, "marksweep-conc-par")) {
+               sgen_marksweep_conc_par_init (&major_collector);
        } else {
                sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `" DEFAULT_MAJOR_NAME "` instead.", "Unknown major collector `%s'.", major_collector_opt);
                goto use_default_major;
@@ -3156,8 +3218,16 @@ sgen_gc_init (void)
        if (major_collector.post_param_init)
                major_collector.post_param_init (&major_collector);
 
-       if (major_collector.needs_thread_pool)
-               sgen_workers_init (1);
+       if (major_collector.needs_thread_pool) {
+               int num_workers = 1;
+               if (major_collector.is_parallel) {
+                       /* FIXME Detect the number of physical cores, instead of logical */
+                       num_workers = mono_cpu_count () / 2;
+                       if (num_workers < 1)
+                               num_workers = 1;
+               }
+               sgen_workers_init (num_workers, (SgenWorkerCallback) major_collector.worker_init_cb);
+       }
 
        sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);