2 * sgen-workers.c: Worker threads for parallel and concurrent GC.
4 * Copyright 2001-2003 Ximian, Inc
5 * Copyright 2003-2010 Novell, Inc.
6 * Copyright (C) 2012 Xamarin Inc
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include "mono/sgen/sgen-gc.h"
17 #include "mono/sgen/sgen-workers.h"
18 #include "mono/sgen/sgen-thread-pool.h"
19 #include "mono/utils/mono-membar.h"
20 #include "mono/sgen/sgen-client.h"
22 static int workers_num;
23 static volatile gboolean forced_stop;
24 static WorkerData *workers_data;
25 static SgenWorkerCallback worker_init_cb;
28 * When using multiple workers, we need to have the last worker
29 * enqueue the preclean jobs (if there are any). This lock ensures
30 * that when the last worker takes it, all the other workers have
31 * gracefully finished, so it can restart them.
33 static mono_mutex_t finished_lock;
35 static SgenSectionGrayQueue workers_distribute_gray_queue;
36 static gboolean workers_distribute_gray_queue_inited;
39 * Allowed transitions:
41 * | from \ to | NOT WORKING | WORKING | WORK ENQUEUED |
42 * |--------------------+-------------+---------+---------------+
43 * | NOT WORKING | - | - | main / worker |
44 * | WORKING | worker | - | main / worker |
45 * | WORK ENQUEUED | - | worker | - |
47 * The WORK ENQUEUED state guarantees that the worker thread will inspect the queue again at
48 * least once. Only after looking at the queue will it go back to WORKING, and then,
49 * eventually, to NOT WORKING. After enqueuing work the main thread transitions the state
50 * to WORK ENQUEUED. Signalling the worker thread to wake up is only necessary if the old
51 * state was NOT WORKING.
62 static SgenObjectOperations * volatile idle_func_object_ops;
64 * finished_callback is called only when the workers finish work normally (when they
65 * are not forced to finish). The callback is used to enqueue preclean jobs.
67 static volatile SgenWorkersFinishCallback finish_callback;
69 static guint64 stat_workers_num_finished;
72 set_state (WorkerData *data, State old_state, State new_state)
74 SGEN_ASSERT (0, old_state != new_state, "Why are we transitioning to the same state?");
75 if (new_state == STATE_NOT_WORKING)
76 SGEN_ASSERT (0, old_state == STATE_WORKING, "We can only transition to NOT WORKING from WORKING");
77 else if (new_state == STATE_WORKING)
78 SGEN_ASSERT (0, old_state == STATE_WORK_ENQUEUED, "We can only transition to WORKING from WORK ENQUEUED");
79 if (new_state == STATE_NOT_WORKING || new_state == STATE_WORKING)
80 SGEN_ASSERT (6, sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Only the worker thread is allowed to transition to NOT_WORKING or WORKING");
82 return InterlockedCompareExchange (&data->state, new_state, old_state) == old_state;
86 state_is_working_or_enqueued (State state)
88 return state == STATE_WORKING || state == STATE_WORK_ENQUEUED;
92 sgen_workers_ensure_awake (void)
95 gboolean need_signal = FALSE;
97 for (i = 0; i < workers_num; i++) {
99 gboolean did_set_state;
102 old_state = workers_data [i].state;
104 if (old_state == STATE_WORK_ENQUEUED)
107 did_set_state = set_state (&workers_data [i], old_state, STATE_WORK_ENQUEUED);
108 } while (!did_set_state);
110 if (!state_is_working_or_enqueued (old_state))
115 sgen_thread_pool_idle_signal ();
119 worker_try_finish (WorkerData *data)
124 ++stat_workers_num_finished;
126 mono_os_mutex_lock (&finished_lock);
128 for (i = 0; i < workers_num; i++) {
129 if (state_is_working_or_enqueued (workers_data [i].state))
134 SgenWorkersFinishCallback callback = finish_callback;
135 /* We are the last one left. Enqueue preclean job if we have one and awake everybody */
136 SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
138 finish_callback = NULL;
140 /* Make sure each worker has a chance of seeing the enqueued jobs */
141 sgen_workers_ensure_awake ();
142 SGEN_ASSERT (0, data->state == STATE_WORK_ENQUEUED, "Why did we fail to set our own state to ENQUEUED");
148 old_state = data->state;
150 SGEN_ASSERT (0, old_state != STATE_NOT_WORKING, "How did we get from doing idle work to NOT WORKING without setting it ourselves?");
151 if (old_state == STATE_WORK_ENQUEUED)
153 SGEN_ASSERT (0, old_state == STATE_WORKING, "What other possibility is there?");
154 } while (!set_state (data, old_state, STATE_NOT_WORKING));
156 mono_os_mutex_unlock (&finished_lock);
158 binary_protocol_worker_finish (sgen_timestamp (), forced_stop);
160 sgen_gray_object_queue_trim_free_list (&data->private_gray_queue);
164 mono_os_mutex_unlock (&finished_lock);
168 sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue)
171 job->func (NULL, job);
172 sgen_thread_pool_job_free (job);
176 sgen_thread_pool_job_enqueue (job);
180 workers_get_work (WorkerData *data)
182 SgenMajorCollector *major;
184 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
186 /* If we're concurrent, steal from the workers distribute gray queue. */
187 major = sgen_get_major_collector ();
188 if (major->is_concurrent) {
189 GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue);
191 sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
196 /* Nobody to steal from */
197 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
202 concurrent_enqueue_check (GCObject *obj)
204 g_assert (sgen_concurrent_collection_in_progress ());
205 g_assert (!sgen_ptr_in_nursery (obj));
206 g_assert (SGEN_LOAD_VTABLE (obj));
210 init_private_gray_queue (WorkerData *data)
212 sgen_gray_object_queue_init (&data->private_gray_queue,
213 sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL,
218 thread_pool_init_func (void *data_untyped)
220 WorkerData *data = (WorkerData *)data_untyped;
221 SgenMajorCollector *major = sgen_get_major_collector ();
223 sgen_client_thread_register_worker ();
225 if (!major->is_concurrent)
228 init_private_gray_queue (data);
231 worker_init_cb (data);
235 continue_idle_func (void *data_untyped)
238 WorkerData *data = (WorkerData *)data_untyped;
239 return state_is_working_or_enqueued (data->state);
241 /* Return if any of the threads is working */
242 return !sgen_workers_all_done ();
247 marker_idle_func (void *data_untyped)
249 WorkerData *data = (WorkerData *)data_untyped;
251 SGEN_ASSERT (0, continue_idle_func (data_untyped), "Why are we called when we're not supposed to work?");
252 SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections.");
254 if (data->state == STATE_WORK_ENQUEUED) {
255 set_state (data, STATE_WORK_ENQUEUED, STATE_WORKING);
256 SGEN_ASSERT (0, data->state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?");
259 if (!forced_stop && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) {
260 ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue);
262 SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?");
264 sgen_drain_gray_stack (ctx);
266 worker_try_finish (data);
271 init_distribute_gray_queue (void)
273 if (workers_distribute_gray_queue_inited) {
274 g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
275 g_assert (workers_distribute_gray_queue.locked);
279 sgen_section_gray_queue_init (&workers_distribute_gray_queue, TRUE,
280 sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
281 workers_distribute_gray_queue_inited = TRUE;
285 sgen_workers_init_distribute_gray_queue (void)
287 SGEN_ASSERT (0, sgen_get_major_collector ()->is_concurrent,
288 "Why should we init the distribute gray queue if we don't need it?");
289 init_distribute_gray_queue ();
293 sgen_workers_init (int num_workers, SgenWorkerCallback callback)
296 void **workers_data_ptrs = (void **)alloca(num_workers * sizeof(void *));
298 if (!sgen_get_major_collector ()->is_concurrent) {
299 sgen_thread_pool_init (num_workers, thread_pool_init_func, NULL, NULL, NULL);
303 mono_os_mutex_init (&finished_lock);
304 //g_print ("initing %d workers\n", num_workers);
306 workers_num = num_workers;
308 workers_data = (WorkerData *)sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE);
309 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
311 init_distribute_gray_queue ();
313 for (i = 0; i < num_workers; ++i)
314 workers_data_ptrs [i] = (void *) &workers_data [i];
316 worker_init_cb = callback;
318 sgen_thread_pool_init (num_workers, thread_pool_init_func, marker_idle_func, continue_idle_func, workers_data_ptrs);
320 mono_counters_register ("# workers finished", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_workers_num_finished);
324 sgen_workers_stop_all_workers (void)
326 finish_callback = NULL;
327 mono_memory_write_barrier ();
330 sgen_thread_pool_wait_for_all_jobs ();
331 sgen_thread_pool_idle_wait ();
332 SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state");
336 sgen_workers_start_all_workers (SgenObjectOperations *object_ops, SgenWorkersFinishCallback callback)
339 idle_func_object_ops = object_ops;
340 finish_callback = callback;
341 mono_memory_write_barrier ();
343 sgen_workers_ensure_awake ();
347 sgen_workers_join (void)
351 sgen_thread_pool_wait_for_all_jobs ();
352 sgen_thread_pool_idle_wait ();
353 SGEN_ASSERT (0, sgen_workers_all_done (), "Can only signal enqueue work when in no work state");
355 /* At this point all the workers have stopped. */
357 SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is there still work left to do?");
358 for (i = 0; i < workers_num; ++i)
359 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue), "Why is there still work left to do?");
363 * Can only be called if the workers are stopped.
364 * If we're stopped, there are also no pending jobs.
367 sgen_workers_have_idle_work (void)
371 SGEN_ASSERT (0, forced_stop && sgen_workers_all_done (), "Checking for idle work should only happen if the workers are stopped.");
373 if (!sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue))
376 for (i = 0; i < workers_num; ++i) {
377 if (!sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue))
385 sgen_workers_all_done (void)
389 for (i = 0; i < workers_num; i++) {
390 if (state_is_working_or_enqueued (workers_data [i].state))
396 /* Must only be used for debugging */
398 sgen_workers_are_working (void)
400 return !sgen_workers_all_done ();
404 sgen_workers_assert_gray_queue_is_empty (void)
406 SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is the workers gray queue not empty?");
410 sgen_workers_take_from_queue (SgenGrayQueue *queue)
413 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
416 sgen_section_gray_queue_enqueue (&workers_distribute_gray_queue, section);
419 SGEN_ASSERT (0, !sgen_workers_are_working (), "We should fully populate the distribute gray queue before we start the workers");
422 SgenObjectOperations*
423 sgen_workers_get_idle_func_object_ops (void)
425 return idle_func_object_ops;
429 * If we have a single worker, splitting into multiple jobs makes no sense. With
430 * more than one worker, we split into a larger number of jobs so that, in case
431 * the work load is uneven, a worker that finished quickly can take up more jobs
435 sgen_workers_get_job_split_count (void)
437 return (workers_num > 1) ? workers_num * 4 : 1;
441 sgen_workers_foreach (SgenWorkerCallback callback)
445 for (i = 0; i < workers_num; i++)
446 callback (&workers_data [i]);