2 * sgen-workers.c: Worker threads for parallel and concurrent GC.
4 * Copyright 2001-2003 Ximian, Inc
5 * Copyright 2003-2010 Novell, Inc.
6 * Copyright (C) 2012 Xamarin Inc
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include "mono/sgen/sgen-gc.h"
17 #include "mono/sgen/sgen-workers.h"
18 #include "mono/sgen/sgen-thread-pool.h"
19 #include "mono/utils/mono-membar.h"
20 #include "mono/sgen/sgen-client.h"
22 static int workers_num;
23 static volatile gboolean forced_stop;
24 static WorkerData *workers_data;
25 static SgenWorkerCallback worker_init_cb;
28 * When using multiple workers, we need to have the last worker
29 * enqueue the preclean jobs (if there are any). This lock ensures
30 * that when the last worker takes it, all the other workers have
31 * gracefully finished, so it can restart them.
33 static mono_mutex_t finished_lock;
35 static SgenSectionGrayQueue workers_distribute_gray_queue;
36 static gboolean workers_distribute_gray_queue_inited;
39 * Allowed transitions:
41 * | from \ to | NOT WORKING | WORKING | WORK ENQUEUED |
42 * |--------------------+-------------+---------+---------------+
43 * | NOT WORKING | - | - | main / worker |
44 * | WORKING | worker | - | main / worker |
45 * | WORK ENQUEUED | - | worker | - |
47 * The WORK ENQUEUED state guarantees that the worker thread will inspect the queue again at
48 * least once. Only after looking at the queue will it go back to WORKING, and then,
49 * eventually, to NOT WORKING. After enqueuing work the main thread transitions the state
50 * to WORK ENQUEUED. Signalling the worker thread to wake up is only necessary if the old
51 * state was NOT WORKING.
62 static SgenObjectOperations * volatile idle_func_object_ops;
63 static SgenObjectOperations *idle_func_object_ops_par, *idle_func_object_ops_nopar;
65 * finished_callback is called only when the workers finish work normally (when they
66 * are not forced to finish). The callback is used to enqueue preclean jobs.
68 static volatile SgenWorkersFinishCallback finish_callback;
70 static guint64 stat_workers_num_finished;
73 set_state (WorkerData *data, State old_state, State new_state)
75 SGEN_ASSERT (0, old_state != new_state, "Why are we transitioning to the same state?");
76 if (new_state == STATE_NOT_WORKING)
77 SGEN_ASSERT (0, old_state == STATE_WORKING, "We can only transition to NOT WORKING from WORKING");
78 else if (new_state == STATE_WORKING)
79 SGEN_ASSERT (0, old_state == STATE_WORK_ENQUEUED, "We can only transition to WORKING from WORK ENQUEUED");
80 if (new_state == STATE_NOT_WORKING || new_state == STATE_WORKING)
81 SGEN_ASSERT (6, sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Only the worker thread is allowed to transition to NOT_WORKING or WORKING");
83 return InterlockedCompareExchange (&data->state, new_state, old_state) == old_state;
87 state_is_working_or_enqueued (State state)
89 return state == STATE_WORKING || state == STATE_WORK_ENQUEUED;
93 sgen_workers_ensure_awake (void)
96 gboolean need_signal = FALSE;
99 * All workers are awaken, make sure we reset the parallel context.
100 * We call this function only when starting the workers so nobody is running,
101 * or when the last worker is enqueuing preclean work. In both cases we can't
102 * have a worker working using a nopar context, which means it is safe.
104 idle_func_object_ops = (workers_num > 1) ? idle_func_object_ops_par : idle_func_object_ops_nopar;
106 for (i = 0; i < workers_num; i++) {
108 gboolean did_set_state;
111 old_state = workers_data [i].state;
113 if (old_state == STATE_WORK_ENQUEUED)
116 did_set_state = set_state (&workers_data [i], old_state, STATE_WORK_ENQUEUED);
117 } while (!did_set_state);
119 if (!state_is_working_or_enqueued (old_state))
124 sgen_thread_pool_idle_signal ();
128 worker_try_finish (WorkerData *data)
133 ++stat_workers_num_finished;
135 mono_os_mutex_lock (&finished_lock);
137 for (i = 0; i < workers_num; i++) {
138 if (state_is_working_or_enqueued (workers_data [i].state))
143 SgenWorkersFinishCallback callback = finish_callback;
144 SGEN_ASSERT (0, idle_func_object_ops == idle_func_object_ops_nopar, "Why are we finishing with parallel context");
145 /* We are the last one left. Enqueue preclean job if we have one and awake everybody */
146 SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
148 finish_callback = NULL;
150 /* Make sure each worker has a chance of seeing the enqueued jobs */
151 sgen_workers_ensure_awake ();
152 SGEN_ASSERT (0, data->state == STATE_WORK_ENQUEUED, "Why did we fail to set our own state to ENQUEUED");
158 old_state = data->state;
160 SGEN_ASSERT (0, old_state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
161 if (old_state == STATE_WORK_ENQUEUED)
163 SGEN_ASSERT (0, old_state == STATE_WORKING, "What other possibility is there?");
164 } while (!set_state (data, old_state, STATE_NOT_WORKING));
167 * If we are second to last to finish, we set the scan context to the non-parallel
168 * version so we can speed up the last worker. This helps us maintain same level
169 * of performance as non-parallel mode even if we fail to distribute work properly.
172 idle_func_object_ops = idle_func_object_ops_nopar;
174 mono_os_mutex_unlock (&finished_lock);
176 binary_protocol_worker_finish (sgen_timestamp (), forced_stop);
178 sgen_gray_object_queue_trim_free_list (&data->private_gray_queue);
182 mono_os_mutex_unlock (&finished_lock);
186 sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue)
189 job->func (NULL, job);
190 sgen_thread_pool_job_free (job);
194 sgen_thread_pool_job_enqueue (job);
198 workers_get_work (WorkerData *data)
200 SgenMajorCollector *major;
202 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
204 /* If we're concurrent, steal from the workers distribute gray queue. */
205 major = sgen_get_major_collector ();
206 if (major->is_concurrent) {
207 GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue);
209 sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
214 /* Nobody to steal from */
215 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
220 workers_steal_work (WorkerData *data)
222 SgenMajorCollector *major = sgen_get_major_collector ();
223 GrayQueueSection *section = NULL;
224 int i, current_worker;
226 if (!major->is_parallel)
229 /* If we're parallel, steal from other workers' private gray queues */
230 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
232 current_worker = (int) (data - workers_data);
234 for (i = 1; i < workers_num && !section; i++) {
235 int steal_worker = (current_worker + i) % workers_num;
236 if (state_is_working_or_enqueued (workers_data [steal_worker].state))
237 section = sgen_gray_object_steal_section (&workers_data [steal_worker].private_gray_queue);
241 sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
245 /* Nobody to steal from */
246 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
251 concurrent_enqueue_check (GCObject *obj)
253 g_assert (sgen_concurrent_collection_in_progress ());
254 g_assert (!sgen_ptr_in_nursery (obj));
255 g_assert (SGEN_LOAD_VTABLE (obj));
259 init_private_gray_queue (WorkerData *data)
261 sgen_gray_object_queue_init (&data->private_gray_queue,
262 sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL,
267 thread_pool_init_func (void *data_untyped)
269 WorkerData *data = (WorkerData *)data_untyped;
270 SgenMajorCollector *major = sgen_get_major_collector ();
272 sgen_client_thread_register_worker ();
274 if (!major->is_concurrent)
277 init_private_gray_queue (data);
280 worker_init_cb (data);
284 continue_idle_func (void *data_untyped)
287 WorkerData *data = (WorkerData *)data_untyped;
288 return state_is_working_or_enqueued (data->state);
290 /* Return if any of the threads is working */
291 return !sgen_workers_all_done ();
296 marker_idle_func (void *data_untyped)
298 WorkerData *data = (WorkerData *)data_untyped;
300 SGEN_ASSERT (0, continue_idle_func (data_untyped), "Why are we called when we're not supposed to work?");
301 SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections.");
303 if (data->state == STATE_WORK_ENQUEUED) {
304 set_state (data, STATE_WORK_ENQUEUED, STATE_WORKING);
305 SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?");
308 if (!forced_stop && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data) || workers_steal_work (data))) {
309 ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue);
311 SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?");
313 sgen_drain_gray_stack (ctx);
315 worker_try_finish (data);
320 init_distribute_gray_queue (void)
322 if (workers_distribute_gray_queue_inited) {
323 g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
324 g_assert (workers_distribute_gray_queue.locked);
328 sgen_section_gray_queue_init (&workers_distribute_gray_queue, TRUE,
329 sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
330 workers_distribute_gray_queue_inited = TRUE;
334 sgen_workers_init_distribute_gray_queue (void)
336 SGEN_ASSERT (0, sgen_get_major_collector ()->is_concurrent,
337 "Why should we init the distribute gray queue if we don't need it?");
338 init_distribute_gray_queue ();
342 sgen_workers_init (int num_workers, SgenWorkerCallback callback)
345 void **workers_data_ptrs = (void **)alloca(num_workers * sizeof(void *));
347 if (!sgen_get_major_collector ()->is_concurrent) {
348 sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL);
352 mono_os_mutex_init (&finished_lock);
353 //g_print ("initing %d workers\n", num_workers);
355 workers_num = num_workers;
357 workers_data = (WorkerData *)sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE);
358 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
360 init_distribute_gray_queue ();
362 for (i = 0; i < num_workers; ++i)
363 workers_data_ptrs [i] = (void *) &workers_data [i];
365 worker_init_cb = callback;
367 sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs);
369 mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished);
373 sgen_workers_stop_all_workers (void)
375 finish_callback = NULL;
376 mono_memory_write_barrier ();
379 sgen_thread_pool_wait_for_all_jobs ();
380 sgen_thread_pool_idle_wait ();
381 SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state");
385 sgen_workers_start_all_workers (SgenObjectOperations *object_ops_nopar, SgenObjectOperations *object_ops_par, SgenWorkersFinishCallback callback)
387 idle_func_object_ops_par = object_ops_par;
388 idle_func_object_ops_nopar = object_ops_nopar;
390 finish_callback = callback;
391 mono_memory_write_barrier ();
393 sgen_workers_ensure_awake ();
397 sgen_workers_join (void)
401 sgen_thread_pool_wait_for_all_jobs ();
402 sgen_thread_pool_idle_wait ();
403 SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state");
405 /* At this point all the workers have stopped. */
407 SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is there still work left to do?");
408 for (i = 0; i < workers_num; ++i)
409 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue), "Why is there still work left to do?");
413 * Can only be called if the workers are stopped.
414 * If we're stopped, there are also no pending jobs.
417 sgen_workers_have_idle_work (void)
421 SGEN_ASSERT (0, forced_stop && sgen_workers_all_done (), "Checking for idle work should only happen if the workers are stopped.");
423 if (!sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue))
426 for (i = 0; i < workers_num; ++i) {
427 if (!sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue))
435 sgen_workers_all_done (void)
439 for (i = 0; i < workers_num; i++) {
440 if (state_is_working_or_enqueued (workers_data [i].state))
446 /* Must only be used for debugging */
448 sgen_workers_are_working (void)
450 return !sgen_workers_all_done ();
454 sgen_workers_assert_gray_queue_is_empty (void)
456 SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is the workers gray queue not empty?");
460 sgen_workers_take_from_queue (SgenGrayQueue *queue)
462 sgen_gray_object_spread (queue, sgen_workers_get_job_split_count ());
465 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
468 sgen_section_gray_queue_enqueue (&workers_distribute_gray_queue, section);
471 SGEN_ASSERT (0, !sgen_workers_are_working (), "We should fully populate the distribute gray queue before we start the workers");
474 SgenObjectOperations*
475 sgen_workers_get_idle_func_object_ops (void)
477 return (idle_func_object_ops_par) ? idle_func_object_ops_par : idle_func_object_ops_nopar;
481 * If we have a single worker, splitting into multiple jobs makes no sense. With
482 * more than one worker, we split into a larger number of jobs so that, in case
483 * the work load is uneven, a worker that finished quickly can take up more jobs
487 sgen_workers_get_job_split_count (void)
489 return (workers_num > 1) ? workers_num * 4 : 1;
493 sgen_workers_foreach (SgenWorkerCallback callback)
497 for (i = 0; i < workers_num; i++)
498 callback (&workers_data [i]);