2 * Copyright 2001-2003 Ximian, Inc
3 * Copyright 2003-2010 Novell, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "metadata/sgen-gc.h"
29 #include "metadata/sgen-workers.h"
30 #include "utils/mono-counters.h"
32 static int workers_num;
33 static WorkerData *workers_data;
34 static WorkerData workers_gc_thread_data;
36 static SgenGrayQueue workers_distribute_gray_queue;
38 static volatile gboolean workers_gc_in_progress = FALSE;
39 static volatile gboolean workers_marking = FALSE;
40 static gboolean workers_started = FALSE;
41 static volatile int workers_num_waiting = 0;
42 static MonoSemType workers_waiting_sem;
43 static MonoSemType workers_done_sem;
44 static volatile int workers_done_posted = 0;
46 static volatile int workers_job_queue_num_entries = 0;
47 static volatile JobQueueEntry *workers_job_queue = NULL;
48 static LOCK_DECLARE (workers_job_queue_mutex);
50 static long long stat_workers_stolen_from_self_lock;
51 static long long stat_workers_stolen_from_self_no_lock;
52 static long long stat_workers_stolen_from_others;
53 static long long stat_workers_num_waited;
56 workers_wake_up (int max)
60 for (i = 0; i < max; ++i) {
63 num = workers_num_waiting;
66 } while (InterlockedCompareExchange (&workers_num_waiting, num - 1, num) != num);
67 MONO_SEM_POST (&workers_waiting_sem);
72 workers_wake_up_all (void)
74 workers_wake_up (workers_num);
81 ++stat_workers_num_waited;
83 num = workers_num_waiting;
84 } while (InterlockedCompareExchange (&workers_num_waiting, num + 1, num) != num);
85 if (num + 1 == workers_num && !workers_gc_in_progress) {
86 /* Make sure the done semaphore is only posted once. */
89 posted = workers_done_posted;
92 } while (InterlockedCompareExchange (&workers_done_posted, 1, 0) != 0);
94 MONO_SEM_POST (&workers_done_sem);
96 MONO_SEM_WAIT (&workers_waiting_sem);
100 sgen_workers_enqueue_job (JobFunc func, void *data)
103 JobQueueEntry *entry;
105 if (!sgen_collection_is_parallel ()) {
110 entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY);
114 mono_mutex_lock (&workers_job_queue_mutex);
115 entry->next = workers_job_queue;
116 workers_job_queue = entry;
117 num_entries = ++workers_job_queue_num_entries;
118 mono_mutex_unlock (&workers_job_queue_mutex);
120 workers_wake_up (num_entries);
124 workers_dequeue_and_do_job (WorkerData *data)
126 JobQueueEntry *entry;
129 * At this point the GC might not be running anymore. We
130 * could have been woken up by a job that was then taken by
131 * another thread, after which the collection finished, so we
132 * first have to successfully dequeue a job before doing
133 * anything assuming that the collection is still ongoing.
136 if (!workers_job_queue_num_entries)
139 mono_mutex_lock (&workers_job_queue_mutex);
140 entry = (JobQueueEntry*)workers_job_queue;
142 workers_job_queue = entry->next;
143 --workers_job_queue_num_entries;
145 mono_mutex_unlock (&workers_job_queue_mutex);
150 g_assert (sgen_collection_is_parallel ());
152 entry->func (data, entry->data);
153 sgen_free_internal (entry, INTERNAL_MEM_JOB_QUEUE_ENTRY);
158 workers_steal (WorkerData *data, WorkerData *victim_data, gboolean lock)
160 SgenGrayQueue *queue = &data->private_gray_queue;
163 g_assert (!queue->first);
165 if (!victim_data->stealable_stack_fill)
168 if (lock && mono_mutex_trylock (&victim_data->stealable_stack_mutex))
171 n = num = (victim_data->stealable_stack_fill + 1) / 2;
172 /* We're stealing num entries. */
175 int m = MIN (SGEN_GRAY_QUEUE_SECTION_SIZE, n);
178 sgen_gray_object_alloc_queue_section (queue);
179 memcpy (queue->first->objects,
180 victim_data->stealable_stack + victim_data->stealable_stack_fill - num + n,
182 queue->first->end = m;
185 victim_data->stealable_stack_fill -= num;
188 mono_mutex_unlock (&victim_data->stealable_stack_mutex);
190 if (data == victim_data) {
192 stat_workers_stolen_from_self_lock += num;
194 stat_workers_stolen_from_self_no_lock += num;
196 stat_workers_stolen_from_others += num;
203 workers_get_work (WorkerData *data)
207 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
209 /* Try to steal from our own stack. */
210 if (workers_steal (data, data, TRUE))
213 /* Then from the GC thread's stack. */
214 if (workers_steal (data, &workers_gc_thread_data, TRUE))
217 /* Finally, from another worker. */
218 for (i = 0; i < workers_num; ++i) {
219 WorkerData *victim_data = &workers_data [i];
220 if (data == victim_data)
222 if (workers_steal (data, victim_data, TRUE))
226 /* Nobody to steal from */
227 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
232 workers_gray_queue_share_redirect (SgenGrayQueue *queue)
234 GrayQueueSection *section;
235 WorkerData *data = queue->alloc_prepare_data;
237 if (data->stealable_stack_fill) {
239 * There are still objects in the stealable stack, so
240 * wake up any workers that might be sleeping
242 if (workers_gc_in_progress)
243 workers_wake_up_all ();
247 /* The stealable stack is empty, so fill it. */
248 mono_mutex_lock (&data->stealable_stack_mutex);
250 while (data->stealable_stack_fill < STEALABLE_STACK_SIZE &&
251 (section = sgen_gray_object_dequeue_section (queue))) {
252 int num = MIN (section->end, STEALABLE_STACK_SIZE - data->stealable_stack_fill);
254 memcpy (data->stealable_stack + data->stealable_stack_fill,
255 section->objects + section->end - num,
256 sizeof (char*) * num);
259 data->stealable_stack_fill += num;
262 sgen_gray_object_enqueue_section (queue, section);
264 sgen_gray_object_free_queue_section (section);
267 if (data != &workers_gc_thread_data && sgen_gray_object_queue_is_empty (queue))
268 workers_steal (data, data, FALSE);
270 mono_mutex_unlock (&data->stealable_stack_mutex);
272 if (workers_gc_in_progress)
273 workers_wake_up_all ();
276 static mono_native_thread_return_t
277 workers_thread_func (void *data_untyped)
279 WorkerData *data = data_untyped;
281 mono_thread_info_register_small_id ();
283 if (sgen_get_major_collector ()->init_worker_thread)
284 sgen_get_major_collector ()->init_worker_thread (data->major_collector_data);
286 sgen_gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue,
287 workers_gray_queue_share_redirect, data);
290 gboolean did_work = FALSE;
292 while (workers_dequeue_and_do_job (data)) {
294 /* FIXME: maybe distribute the gray queue here? */
297 if (workers_marking && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) {
298 g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue));
300 while (!sgen_drain_gray_stack (&data->private_gray_queue, 32))
301 workers_gray_queue_share_redirect (&data->private_gray_queue);
302 g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
304 sgen_gray_object_queue_init (&data->private_gray_queue);
313 /* dummy return to make compilers happy */
318 sgen_workers_distribute_gray_queue_sections (void)
320 if (!sgen_collection_is_parallel ())
323 workers_gray_queue_share_redirect (&workers_distribute_gray_queue);
327 sgen_workers_init_distribute_gray_queue (void)
329 if (!sgen_collection_is_parallel ())
332 sgen_gray_object_queue_init (&workers_distribute_gray_queue);
336 sgen_workers_init (int num_workers)
340 if (!sgen_get_major_collector ()->is_parallel)
343 //g_print ("initing %d workers\n", num_workers);
345 workers_num = num_workers;
347 workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA);
348 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
350 MONO_SEM_INIT (&workers_waiting_sem, 0);
351 MONO_SEM_INIT (&workers_done_sem, 0);
353 sgen_gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue,
354 workers_gray_queue_share_redirect, &workers_gc_thread_data);
355 mono_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL);
356 workers_gc_thread_data.stealable_stack_fill = 0;
358 if (sgen_get_major_collector ()->alloc_worker_data)
359 workers_gc_thread_data.major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
361 for (i = 0; i < workers_num; ++i) {
362 /* private gray queue is inited by the thread itself */
363 mono_mutex_init (&workers_data [i].stealable_stack_mutex, NULL);
364 workers_data [i].stealable_stack_fill = 0;
366 if (sgen_get_major_collector ()->alloc_worker_data)
367 workers_data [i].major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
370 LOCK_INIT (workers_job_queue_mutex);
372 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry));
374 mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock);
375 mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock);
376 mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others);
377 mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited);
380 /* only the GC thread is allowed to start and join workers */
383 workers_start_worker (int index)
385 g_assert (index >= 0 && index < workers_num);
387 g_assert (!workers_data [index].thread);
388 mono_native_thread_create (&workers_data [index].thread, workers_thread_func, &workers_data [index]);
392 sgen_workers_start_all_workers (void)
396 if (!sgen_collection_is_parallel ())
399 if (sgen_get_major_collector ()->init_worker_thread)
400 sgen_get_major_collector ()->init_worker_thread (workers_gc_thread_data.major_collector_data);
402 g_assert (!workers_gc_in_progress);
403 workers_gc_in_progress = TRUE;
404 workers_marking = FALSE;
405 workers_done_posted = 0;
407 if (workers_started) {
408 if (workers_num_waiting != workers_num)
409 g_error ("Expecting all %d sgen workers to be parked, but only %d are", workers_num, workers_num_waiting);
410 workers_wake_up_all ();
414 for (i = 0; i < workers_num; ++i)
415 workers_start_worker (i);
417 workers_started = TRUE;
421 sgen_workers_start_marking (void)
423 if (!sgen_collection_is_parallel ())
426 g_assert (workers_started && workers_gc_in_progress);
427 g_assert (!workers_marking);
429 workers_marking = TRUE;
431 workers_wake_up_all ();
435 sgen_workers_join (void)
439 if (!sgen_collection_is_parallel ())
442 g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
443 g_assert (sgen_gray_object_queue_is_empty (&workers_distribute_gray_queue));
445 g_assert (workers_gc_in_progress);
446 workers_gc_in_progress = FALSE;
447 if (workers_num_waiting == workers_num) {
449 * All the workers might have shut down at this point
450 * and posted the done semaphore but we don't know it
451 * yet. It's not a big deal to wake them up again -
452 * they'll just do one iteration of their loop trying to
453 * find something to do and then go back to waiting
456 workers_wake_up_all ();
458 MONO_SEM_WAIT (&workers_done_sem);
459 workers_marking = FALSE;
461 if (sgen_get_major_collector ()->reset_worker_data) {
462 for (i = 0; i < workers_num; ++i)
463 sgen_get_major_collector ()->reset_worker_data (workers_data [i].major_collector_data);
466 g_assert (workers_done_posted);
468 g_assert (!workers_gc_thread_data.stealable_stack_fill);
469 g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
470 for (i = 0; i < workers_num; ++i) {
471 g_assert (!workers_data [i].stealable_stack_fill);
472 g_assert (sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue));
477 sgen_is_worker_thread (MonoNativeThreadId thread)
481 if (sgen_get_major_collector ()->is_worker_thread && sgen_get_major_collector ()->is_worker_thread (thread))
484 for (i = 0; i < workers_num; ++i) {
485 if (workers_data [i].thread == thread)
492 sgen_workers_is_distributed_queue (SgenGrayQueue *queue)
494 return queue == &workers_distribute_gray_queue;
498 sgen_workers_get_distribute_gray_queue (void)
500 return &workers_distribute_gray_queue;
504 sgen_workers_reset_data (void)
506 if (sgen_get_major_collector ()->reset_worker_data)
507 sgen_get_major_collector ()->reset_worker_data (workers_gc_thread_data.major_collector_data);