2 * Copyright 2001-2003 Ximian, Inc
3 * Copyright 2003-2010 Novell, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #define STEALABLE_STACK_SIZE 512
27 typedef struct _WorkerData WorkerData;
30 void *major_collector_data;
32 GrayQueue private_gray_queue; /* only read/written by worker thread */
34 pthread_mutex_t stealable_stack_mutex;
35 volatile int stealable_stack_fill;
36 char *stealable_stack [STEALABLE_STACK_SIZE];
39 typedef void (*JobFunc) (WorkerData *worker_data, void *job_data);
41 typedef struct _JobQueueEntry JobQueueEntry;
42 struct _JobQueueEntry {
46 volatile JobQueueEntry *next;
49 static int workers_num;
50 static WorkerData *workers_data;
51 static WorkerData workers_gc_thread_data;
53 static GrayQueue workers_distribute_gray_queue;
54 static SgenInternalAllocator workers_distribute_gray_queue_allocator;
56 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (major_collector.is_parallel ? &workers_distribute_gray_queue : &gray_queue)
58 static volatile gboolean workers_gc_in_progress = FALSE;
59 static volatile gboolean workers_marking = FALSE;
60 static gboolean workers_started = FALSE;
61 static volatile int workers_num_waiting = 0;
62 static MonoSemType workers_waiting_sem;
63 static MonoSemType workers_done_sem;
64 static volatile int workers_done_posted = 0;
66 static volatile int workers_job_queue_num_entries = 0;
67 static volatile JobQueueEntry *workers_job_queue = NULL;
68 static LOCK_DECLARE (workers_job_queue_mutex);
70 static long long stat_workers_stolen_from_self_lock;
71 static long long stat_workers_stolen_from_self_no_lock;
72 static long long stat_workers_stolen_from_others;
73 static long long stat_workers_num_waited;
76 workers_wake_up (int max)
80 for (i = 0; i < max; ++i) {
83 num = workers_num_waiting;
86 } while (InterlockedCompareExchange (&workers_num_waiting, num - 1, num) != num);
87 MONO_SEM_POST (&workers_waiting_sem);
92 workers_wake_up_all (void)
94 workers_wake_up (workers_num);
101 ++stat_workers_num_waited;
103 num = workers_num_waiting;
104 } while (InterlockedCompareExchange (&workers_num_waiting, num + 1, num) != num);
105 if (num + 1 == workers_num && !workers_gc_in_progress) {
106 /* Make sure the done semaphore is only posted once. */
109 posted = workers_done_posted;
112 } while (InterlockedCompareExchange (&workers_done_posted, 1, 0) != 0);
114 MONO_SEM_POST (&workers_done_sem);
116 MONO_SEM_WAIT (&workers_waiting_sem);
120 workers_enqueue_job (JobFunc func, void *data)
123 JobQueueEntry *entry = mono_sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY);
127 pthread_mutex_lock (&workers_job_queue_mutex);
128 entry->next = workers_job_queue;
129 workers_job_queue = entry;
130 num_entries = ++workers_job_queue_num_entries;
131 pthread_mutex_unlock (&workers_job_queue_mutex);
133 workers_wake_up (num_entries);
137 workers_dequeue_and_do_job (WorkerData *data)
139 JobQueueEntry *entry;
141 if (!workers_job_queue_num_entries)
144 pthread_mutex_lock (&workers_job_queue_mutex);
145 entry = (JobQueueEntry*)workers_job_queue;
147 workers_job_queue = entry->next;
148 --workers_job_queue_num_entries;
150 pthread_mutex_unlock (&workers_job_queue_mutex);
155 entry->func (data, entry->data);
156 mono_sgen_free_internal (entry, INTERNAL_MEM_JOB_QUEUE_ENTRY);
161 workers_steal (WorkerData *data, WorkerData *victim_data, gboolean lock)
163 GrayQueue *queue = &data->private_gray_queue;
166 g_assert (!queue->first);
168 if (!victim_data->stealable_stack_fill)
171 if (lock && pthread_mutex_trylock (&victim_data->stealable_stack_mutex))
174 n = num = (victim_data->stealable_stack_fill + 1) / 2;
175 /* We're stealing num entries. */
178 int m = MIN (SGEN_GRAY_QUEUE_SECTION_SIZE, n);
181 gray_object_alloc_queue_section (queue);
182 memcpy (queue->first->objects,
183 victim_data->stealable_stack + victim_data->stealable_stack_fill - num + n,
185 queue->first->end = m;
188 victim_data->stealable_stack_fill -= num;
191 pthread_mutex_unlock (&victim_data->stealable_stack_mutex);
193 if (data == victim_data) {
195 stat_workers_stolen_from_self_lock += num;
197 stat_workers_stolen_from_self_no_lock += num;
199 stat_workers_stolen_from_others += num;
206 workers_get_work (WorkerData *data)
210 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
212 /* Try to steal from our own stack. */
213 if (workers_steal (data, data, TRUE))
216 /* Then from the GC thread's stack. */
217 if (workers_steal (data, &workers_gc_thread_data, TRUE))
220 /* Finally, from another worker. */
221 for (i = 0; i < workers_num; ++i) {
222 WorkerData *victim_data = &workers_data [i];
223 if (data == victim_data)
225 if (workers_steal (data, victim_data, TRUE))
229 /* Nobody to steal from */
230 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
235 workers_gray_queue_share_redirect (GrayQueue *queue)
237 GrayQueueSection *section;
238 WorkerData *data = queue->alloc_prepare_data;
240 if (data->stealable_stack_fill) {
242 * There are still objects in the stealable stack, so
243 * wake up any workers that might be sleeping
245 if (workers_gc_in_progress)
246 workers_wake_up_all ();
250 /* The stealable stack is empty, so fill it. */
251 pthread_mutex_lock (&data->stealable_stack_mutex);
253 while (data->stealable_stack_fill < STEALABLE_STACK_SIZE &&
254 (section = gray_object_dequeue_section (queue))) {
255 int num = MIN (section->end, STEALABLE_STACK_SIZE - data->stealable_stack_fill);
257 memcpy (data->stealable_stack + data->stealable_stack_fill,
258 section->objects + section->end - num,
259 sizeof (char*) * num);
262 data->stealable_stack_fill += num;
265 gray_object_enqueue_section (queue, section);
267 gray_object_free_queue_section (section, queue->allocator);
270 if (data != &workers_gc_thread_data && gray_object_queue_is_empty (queue))
271 workers_steal (data, data, FALSE);
273 pthread_mutex_unlock (&data->stealable_stack_mutex);
275 if (workers_gc_in_progress)
276 workers_wake_up_all ();
280 workers_thread_func (void *data_untyped)
282 WorkerData *data = data_untyped;
283 SgenInternalAllocator allocator;
285 if (major_collector.init_worker_thread)
286 major_collector.init_worker_thread (data->major_collector_data);
288 memset (&allocator, 0, sizeof (allocator));
289 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
290 allocator.thread = pthread_self ();
293 gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue, &allocator,
294 workers_gray_queue_share_redirect, data);
297 gboolean did_work = FALSE;
299 while (workers_dequeue_and_do_job (data))
302 if (workers_marking && workers_get_work (data)) {
303 g_assert (!gray_object_queue_is_empty (&data->private_gray_queue));
305 while (!drain_gray_stack (&data->private_gray_queue, 32))
306 workers_gray_queue_share_redirect (&data->private_gray_queue);
307 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
309 gray_object_queue_init (&data->private_gray_queue, &allocator);
318 /* dummy return to make compilers happy */
323 workers_distribute_gray_queue_sections (void)
325 if (!major_collector.is_parallel)
328 workers_gray_queue_share_redirect (&workers_distribute_gray_queue);
332 workers_init_distribute_gray_queue (void)
334 if (!major_collector.is_parallel)
337 gray_object_queue_init (&workers_distribute_gray_queue, &workers_distribute_gray_queue_allocator);
338 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
339 workers_distribute_gray_queue_allocator.thread = pthread_self ();
344 workers_init (int num_workers)
348 if (!major_collector.is_parallel)
351 //g_print ("initing %d workers\n", num_workers);
353 workers_num = num_workers;
355 workers_data = mono_sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA);
356 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
358 MONO_SEM_INIT (&workers_waiting_sem, 0);
359 MONO_SEM_INIT (&workers_done_sem, 0);
361 gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue, &workers_distribute_gray_queue_allocator,
362 workers_gray_queue_share_redirect, &workers_gc_thread_data);
363 pthread_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL);
364 workers_gc_thread_data.stealable_stack_fill = 0;
366 if (major_collector.alloc_worker_data)
367 workers_gc_thread_data.major_collector_data = major_collector.alloc_worker_data ();
369 for (i = 0; i < workers_num; ++i) {
370 /* private gray queue is inited by the thread itself */
371 pthread_mutex_init (&workers_data [i].stealable_stack_mutex, NULL);
372 workers_data [i].stealable_stack_fill = 0;
374 if (major_collector.alloc_worker_data)
375 workers_data [i].major_collector_data = major_collector.alloc_worker_data ();
378 LOCK_INIT (workers_job_queue_mutex);
380 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry));
382 mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock);
383 mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock);
384 mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others);
385 mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited);
388 /* only the GC thread is allowed to start and join workers */
391 workers_start_worker (int index)
393 g_assert (index >= 0 && index < workers_num);
395 g_assert (!workers_data [index].thread);
396 pthread_create (&workers_data [index].thread, NULL, workers_thread_func, &workers_data [index]);
400 workers_start_all_workers (void)
404 if (!major_collector.is_parallel)
407 if (major_collector.init_worker_thread)
408 major_collector.init_worker_thread (workers_gc_thread_data.major_collector_data);
410 g_assert (!workers_gc_in_progress);
411 workers_gc_in_progress = TRUE;
412 workers_marking = FALSE;
413 workers_done_posted = 0;
415 if (workers_started) {
416 g_assert (workers_num_waiting == workers_num);
417 workers_wake_up_all ();
421 for (i = 0; i < workers_num; ++i)
422 workers_start_worker (i);
424 workers_started = TRUE;
428 workers_start_marking (void)
430 g_assert (workers_started && workers_gc_in_progress);
431 g_assert (!workers_marking);
433 workers_marking = TRUE;
435 workers_wake_up_all ();
443 if (!major_collector.is_parallel)
446 g_assert (gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
447 g_assert (gray_object_queue_is_empty (&workers_distribute_gray_queue));
449 g_assert (workers_gc_in_progress);
450 workers_gc_in_progress = FALSE;
451 if (workers_num_waiting == workers_num)
452 workers_wake_up_all ();
453 MONO_SEM_WAIT (&workers_done_sem);
454 workers_marking = FALSE;
456 if (major_collector.reset_worker_data) {
457 for (i = 0; i < workers_num; ++i)
458 major_collector.reset_worker_data (workers_data [i].major_collector_data);
461 g_assert (workers_done_posted);
462 g_assert (workers_num_waiting == workers_num);
464 g_assert (!workers_gc_thread_data.stealable_stack_fill);
465 g_assert (gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
466 for (i = 0; i < workers_num; ++i) {
467 g_assert (!workers_data [i].stealable_stack_fill);
468 g_assert (gray_object_queue_is_empty (&workers_data [i].private_gray_queue));
473 mono_sgen_is_worker_thread (pthread_t thread)
477 if (major_collector.is_worker_thread && major_collector.is_worker_thread (thread))
480 if (!major_collector.is_parallel)
483 for (i = 0; i < workers_num; ++i) {
484 if (workers_data [i].thread == thread)