2 * sgen-workers.c: Worker threads for parallel and concurrent GC.
4 * Copyright 2001-2003 Ximian, Inc
5 * Copyright 2003-2010 Novell, Inc.
6 * Copyright (C) 2012 Xamarin Inc
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License 2.0 as published by the Free Software Foundation;
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
17 * You should have received a copy of the GNU Library General Public
18 * License 2.0 along with this library; if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include "metadata/sgen-gc.h"
26 #include "metadata/sgen-workers.h"
27 #include "utils/mono-counters.h"
29 static int workers_num;
30 static WorkerData *workers_data;
31 static WorkerData workers_gc_thread_data;
33 static SgenGrayQueue workers_distribute_gray_queue;
35 static volatile gboolean workers_gc_in_progress = FALSE;
36 static volatile gboolean workers_marking = FALSE;
37 static gboolean workers_started = FALSE;
38 static volatile int workers_num_waiting = 0;
39 static MonoSemType workers_waiting_sem;
40 static MonoSemType workers_done_sem;
41 static volatile int workers_done_posted = 0;
43 static volatile int workers_job_queue_num_entries = 0;
44 static volatile JobQueueEntry *workers_job_queue = NULL;
45 static LOCK_DECLARE (workers_job_queue_mutex);
46 static int workers_num_jobs_enqueued = 0;
47 static volatile int workers_num_jobs_finished = 0;
49 static long long stat_workers_stolen_from_self_lock;
50 static long long stat_workers_stolen_from_self_no_lock;
51 static long long stat_workers_stolen_from_others;
52 static long long stat_workers_num_waited;
55 workers_wake_up (int max)
59 for (i = 0; i < max; ++i) {
62 num = workers_num_waiting;
65 } while (InterlockedCompareExchange (&workers_num_waiting, num - 1, num) != num);
66 MONO_SEM_POST (&workers_waiting_sem);
71 workers_wake_up_all (void)
73 workers_wake_up (workers_num);
80 ++stat_workers_num_waited;
82 num = workers_num_waiting;
83 } while (InterlockedCompareExchange (&workers_num_waiting, num + 1, num) != num);
84 if (num + 1 == workers_num && !workers_gc_in_progress) {
85 /* Make sure the done semaphore is only posted once. */
88 posted = workers_done_posted;
91 } while (InterlockedCompareExchange (&workers_done_posted, 1, 0) != 0);
93 MONO_SEM_POST (&workers_done_sem);
95 MONO_SEM_WAIT (&workers_waiting_sem);
99 collection_needs_workers (void)
101 return sgen_collection_is_parallel () || sgen_collection_is_concurrent ();
105 sgen_workers_enqueue_job (JobFunc func, void *data)
108 JobQueueEntry *entry;
110 if (!collection_needs_workers ()) {
115 entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY);
119 mono_mutex_lock (&workers_job_queue_mutex);
120 entry->next = workers_job_queue;
121 workers_job_queue = entry;
122 num_entries = ++workers_job_queue_num_entries;
123 ++workers_num_jobs_enqueued;
124 mono_mutex_unlock (&workers_job_queue_mutex);
126 workers_wake_up (num_entries);
130 sgen_workers_wait_for_jobs (void)
132 // FIXME: implement this properly
133 while (workers_num_jobs_finished < workers_num_jobs_enqueued)
138 workers_dequeue_and_do_job (WorkerData *data)
140 JobQueueEntry *entry;
144 * At this point the GC might not be running anymore. We
145 * could have been woken up by a job that was then taken by
146 * another thread, after which the collection finished, so we
147 * first have to successfully dequeue a job before doing
148 * anything assuming that the collection is still ongoing.
151 if (!workers_job_queue_num_entries)
154 mono_mutex_lock (&workers_job_queue_mutex);
155 entry = (JobQueueEntry*)workers_job_queue;
157 workers_job_queue = entry->next;
158 --workers_job_queue_num_entries;
160 mono_mutex_unlock (&workers_job_queue_mutex);
165 g_assert (collection_needs_workers ());
167 entry->func (data, entry->data);
168 sgen_free_internal (entry, INTERNAL_MEM_JOB_QUEUE_ENTRY);
171 num_finished = workers_num_jobs_finished;
172 } while (InterlockedCompareExchange (&workers_num_jobs_finished, num_finished + 1, num_finished) != num_finished);
178 workers_steal (WorkerData *data, WorkerData *victim_data, gboolean lock)
180 SgenGrayQueue *queue = &data->private_gray_queue;
183 g_assert (!queue->first);
185 if (!victim_data->stealable_stack_fill)
188 if (lock && mono_mutex_trylock (&victim_data->stealable_stack_mutex))
191 n = num = (victim_data->stealable_stack_fill + 1) / 2;
192 /* We're stealing num entries. */
195 int m = MIN (SGEN_GRAY_QUEUE_SECTION_SIZE, n);
198 sgen_gray_object_alloc_queue_section (queue);
199 memcpy (queue->first->objects,
200 victim_data->stealable_stack + victim_data->stealable_stack_fill - num + n,
202 queue->first->end = m;
205 victim_data->stealable_stack_fill -= num;
208 mono_mutex_unlock (&victim_data->stealable_stack_mutex);
210 if (data == victim_data) {
212 stat_workers_stolen_from_self_lock += num;
214 stat_workers_stolen_from_self_no_lock += num;
216 stat_workers_stolen_from_others += num;
223 workers_get_work (WorkerData *data)
227 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
229 /* Try to steal from our own stack. */
230 if (workers_steal (data, data, TRUE))
233 /* Then from the GC thread's stack. */
234 if (workers_steal (data, &workers_gc_thread_data, TRUE))
237 /* Finally, from another worker. */
238 for (i = 0; i < workers_num; ++i) {
239 WorkerData *victim_data = &workers_data [i];
240 if (data == victim_data)
242 if (workers_steal (data, victim_data, TRUE))
246 /* Nobody to steal from */
247 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
252 workers_gray_queue_share_redirect (SgenGrayQueue *queue)
254 GrayQueueSection *section;
255 WorkerData *data = queue->alloc_prepare_data;
257 if (data->stealable_stack_fill) {
259 * There are still objects in the stealable stack, so
260 * wake up any workers that might be sleeping
262 if (workers_gc_in_progress)
263 workers_wake_up_all ();
267 /* The stealable stack is empty, so fill it. */
268 mono_mutex_lock (&data->stealable_stack_mutex);
270 while (data->stealable_stack_fill < STEALABLE_STACK_SIZE &&
271 (section = sgen_gray_object_dequeue_section (queue))) {
272 int num = MIN (section->end, STEALABLE_STACK_SIZE - data->stealable_stack_fill);
274 memcpy (data->stealable_stack + data->stealable_stack_fill,
275 section->objects + section->end - num,
276 sizeof (char*) * num);
279 data->stealable_stack_fill += num;
282 sgen_gray_object_enqueue_section (queue, section);
284 sgen_gray_object_free_queue_section (section);
287 if (data != &workers_gc_thread_data && sgen_gray_object_queue_is_empty (queue))
288 workers_steal (data, data, FALSE);
290 mono_mutex_unlock (&data->stealable_stack_mutex);
292 if (workers_gc_in_progress)
293 workers_wake_up_all ();
297 init_private_gray_queue (WorkerData *data)
299 sgen_gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue,
300 workers_gray_queue_share_redirect, data);
303 static mono_native_thread_return_t
304 workers_thread_func (void *data_untyped)
306 WorkerData *data = data_untyped;
308 mono_thread_info_register_small_id ();
310 if (sgen_get_major_collector ()->init_worker_thread)
311 sgen_get_major_collector ()->init_worker_thread (data->major_collector_data);
313 init_private_gray_queue (data);
316 gboolean did_work = FALSE;
318 while (workers_dequeue_and_do_job (data)) {
320 /* FIXME: maybe distribute the gray queue here? */
323 if (workers_marking && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) {
324 g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue));
326 while (!sgen_drain_gray_stack (&data->private_gray_queue, 32))
327 workers_gray_queue_share_redirect (&data->private_gray_queue);
328 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
330 init_private_gray_queue (data);
339 /* dummy return to make compilers happy */
344 sgen_workers_distribute_gray_queue_sections (void)
346 if (!collection_needs_workers ())
349 workers_gray_queue_share_redirect (&workers_distribute_gray_queue);
353 init_distribute_gray_queue (void)
355 sgen_gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue,
356 workers_gray_queue_share_redirect, &workers_gc_thread_data);
360 sgen_workers_init_distribute_gray_queue (void)
362 if (!collection_needs_workers ())
365 init_distribute_gray_queue ();
369 sgen_workers_init (int num_workers)
373 if (!sgen_get_major_collector ()->is_parallel && !sgen_get_major_collector ()->is_concurrent)
376 //g_print ("initing %d workers\n", num_workers);
378 workers_num = num_workers;
380 workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE);
381 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
383 MONO_SEM_INIT (&workers_waiting_sem, 0);
384 MONO_SEM_INIT (&workers_done_sem, 0);
386 init_distribute_gray_queue ();
387 mono_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL);
388 workers_gc_thread_data.stealable_stack_fill = 0;
390 if (sgen_get_major_collector ()->alloc_worker_data)
391 workers_gc_thread_data.major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
393 for (i = 0; i < workers_num; ++i) {
394 /* private gray queue is inited by the thread itself */
395 mono_mutex_init (&workers_data [i].stealable_stack_mutex, NULL);
396 workers_data [i].stealable_stack_fill = 0;
398 if (sgen_get_major_collector ()->alloc_worker_data)
399 workers_data [i].major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
402 LOCK_INIT (workers_job_queue_mutex);
404 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry));
406 mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock);
407 mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock);
408 mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others);
409 mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited);
412 /* only the GC thread is allowed to start and join workers */
415 workers_start_worker (int index)
417 g_assert (index >= 0 && index < workers_num);
419 g_assert (!workers_data [index].thread);
420 mono_native_thread_create (&workers_data [index].thread, workers_thread_func, &workers_data [index]);
424 sgen_workers_start_all_workers (void)
428 if (!collection_needs_workers ())
431 if (sgen_get_major_collector ()->init_worker_thread)
432 sgen_get_major_collector ()->init_worker_thread (workers_gc_thread_data.major_collector_data);
434 g_assert (!workers_gc_in_progress);
435 workers_gc_in_progress = TRUE;
436 workers_marking = FALSE;
437 workers_done_posted = 0;
439 g_assert (workers_job_queue_num_entries == 0);
440 workers_num_jobs_enqueued = 0;
441 workers_num_jobs_finished = 0;
443 if (workers_started) {
444 if (workers_num_waiting != workers_num)
445 g_error ("Expecting all %d sgen workers to be parked, but only %d are", workers_num, workers_num_waiting);
446 workers_wake_up_all ();
450 for (i = 0; i < workers_num; ++i)
451 workers_start_worker (i);
453 workers_started = TRUE;
457 sgen_workers_start_marking (void)
459 if (!collection_needs_workers ())
462 g_assert (workers_started && workers_gc_in_progress);
463 g_assert (!workers_marking);
465 workers_marking = TRUE;
467 workers_wake_up_all ();
471 sgen_workers_join (void)
475 if (!collection_needs_workers ())
478 g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
479 g_assert (sgen_gray_object_queue_is_empty (&workers_distribute_gray_queue));
481 g_assert (workers_gc_in_progress);
482 workers_gc_in_progress = FALSE;
483 if (workers_num_waiting == workers_num) {
485 * All the workers might have shut down at this point
486 * and posted the done semaphore but we don't know it
487 * yet. It's not a big deal to wake them up again -
488 * they'll just do one iteration of their loop trying to
489 * find something to do and then go back to waiting
492 workers_wake_up_all ();
494 MONO_SEM_WAIT (&workers_done_sem);
495 workers_marking = FALSE;
497 if (sgen_get_major_collector ()->reset_worker_data) {
498 for (i = 0; i < workers_num; ++i)
499 sgen_get_major_collector ()->reset_worker_data (workers_data [i].major_collector_data);
502 g_assert (workers_done_posted);
504 g_assert (!workers_gc_thread_data.stealable_stack_fill);
505 g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
506 for (i = 0; i < workers_num; ++i) {
507 g_assert (!workers_data [i].stealable_stack_fill);
508 g_assert (sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue));
513 sgen_is_worker_thread (MonoNativeThreadId thread)
517 if (sgen_get_major_collector ()->is_worker_thread && sgen_get_major_collector ()->is_worker_thread (thread))
520 for (i = 0; i < workers_num; ++i) {
521 if (workers_data [i].thread == thread)
528 sgen_workers_is_distributed_queue (SgenGrayQueue *queue)
530 return queue == &workers_distribute_gray_queue;
534 sgen_workers_get_distribute_gray_queue (void)
536 return &workers_distribute_gray_queue;
540 sgen_workers_reset_data (void)
542 if (sgen_get_major_collector ()->reset_worker_data)
543 sgen_get_major_collector ()->reset_worker_data (workers_gc_thread_data.major_collector_data);