2 * Copyright 2001-2003 Ximian, Inc
3 * Copyright 2003-2010 Novell, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #define STEALABLE_STACK_SIZE 512
27 typedef struct _WorkerData WorkerData;
30 void *major_collector_data;
32 GrayQueue private_gray_queue; /* only read/written by worker thread */
34 pthread_mutex_t stealable_stack_mutex;
35 volatile int stealable_stack_fill;
36 char *stealable_stack [STEALABLE_STACK_SIZE];
39 static int workers_num;
40 static WorkerData *workers_data;
41 static WorkerData workers_gc_thread_data;
43 static GrayQueue workers_distribute_gray_queue;
45 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (major_collector.is_parallel ? &workers_distribute_gray_queue : &gray_queue)
47 static volatile gboolean workers_gc_in_progress = FALSE;
48 static gboolean workers_started = FALSE;
49 static volatile int workers_num_waiting = 0;
50 static MonoSemType workers_waiting_sem;
51 static MonoSemType workers_done_sem;
52 static volatile int workers_done_posted = 0;
54 static long long stat_workers_stolen_from_self_lock;
55 static long long stat_workers_stolen_from_self_no_lock;
56 static long long stat_workers_stolen_from_others;
57 static long long stat_workers_num_waited;
60 workers_wake_up_all (void)
65 max = workers_num_waiting;
66 for (i = 0; i < max; ++i) {
69 num = workers_num_waiting;
72 } while (InterlockedCompareExchange (&workers_num_waiting, num - 1, num) != num);
73 MONO_SEM_POST (&workers_waiting_sem);
81 ++stat_workers_num_waited;
83 num = workers_num_waiting;
84 } while (InterlockedCompareExchange (&workers_num_waiting, num + 1, num) != num);
85 if (num + 1 == workers_num && !workers_gc_in_progress) {
86 /* Make sure the done semaphore is only posted once. */
89 posted = workers_done_posted;
92 } while (InterlockedCompareExchange (&workers_done_posted, 1, 0) != 0);
94 MONO_SEM_POST (&workers_done_sem);
96 MONO_SEM_WAIT (&workers_waiting_sem);
100 workers_steal (WorkerData *data, WorkerData *victim_data, gboolean lock)
102 GrayQueue *queue = &data->private_gray_queue;
105 g_assert (!queue->first);
107 if (!victim_data->stealable_stack_fill)
110 if (lock && pthread_mutex_trylock (&victim_data->stealable_stack_mutex))
113 n = num = (victim_data->stealable_stack_fill + 1) / 2;
114 /* We're stealing num entries. */
117 int m = MIN (SGEN_GRAY_QUEUE_SECTION_SIZE, n);
120 gray_object_alloc_queue_section (queue);
121 memcpy (queue->first->objects,
122 victim_data->stealable_stack + victim_data->stealable_stack_fill - num + n,
124 queue->first->end = m;
127 victim_data->stealable_stack_fill -= num;
130 pthread_mutex_unlock (&victim_data->stealable_stack_mutex);
132 if (data == victim_data) {
134 stat_workers_stolen_from_self_lock += num;
136 stat_workers_stolen_from_self_no_lock += num;
138 stat_workers_stolen_from_others += num;
145 workers_get_work (WorkerData *data)
147 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
152 /* Try to steal from our own stack. */
153 if (workers_steal (data, data, TRUE))
156 /* Then from the GC thread's stack. */
157 if (workers_steal (data, &workers_gc_thread_data, TRUE))
160 /* Finally, from another worker. */
161 for (i = 0; i < workers_num; ++i) {
162 WorkerData *victim_data = &workers_data [i];
163 if (data == victim_data)
165 if (workers_steal (data, victim_data, TRUE))
169 /* Nobody to steal from, so wait. */
170 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
176 workers_gray_queue_share_redirect (GrayQueue *queue)
178 GrayQueueSection *section;
179 WorkerData *data = queue->alloc_prepare_data;
181 if (data->stealable_stack_fill) {
183 * There are still objects in the stealable stack, so
184 * wake up any workers that might be sleeping
186 if (workers_gc_in_progress)
187 workers_wake_up_all ();
191 /* The stealable stack is empty, so fill it. */
192 pthread_mutex_lock (&data->stealable_stack_mutex);
194 while (data->stealable_stack_fill < STEALABLE_STACK_SIZE &&
195 (section = gray_object_dequeue_section (queue))) {
196 int num = MIN (section->end, STEALABLE_STACK_SIZE - data->stealable_stack_fill);
198 memcpy (data->stealable_stack + data->stealable_stack_fill,
199 section->objects + section->end - num,
200 sizeof (char*) * num);
203 data->stealable_stack_fill += num;
206 gray_object_enqueue_section (queue, section);
208 gray_object_free_queue_section (section, queue->allocator);
211 if (data != &workers_gc_thread_data && gray_object_queue_is_empty (queue))
212 workers_steal (data, data, FALSE);
214 pthread_mutex_unlock (&data->stealable_stack_mutex);
216 if (workers_gc_in_progress)
217 workers_wake_up_all ();
221 workers_thread_func (void *data_untyped)
223 WorkerData *data = data_untyped;
224 SgenInternalAllocator allocator;
226 if (major_collector.init_worker_thread)
227 major_collector.init_worker_thread (data->major_collector_data);
229 memset (&allocator, 0, sizeof (allocator));
231 gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue, &allocator,
232 workers_gray_queue_share_redirect, data);
235 gboolean got_work = workers_get_work (data);
237 g_assert (!gray_object_queue_is_empty (&data->private_gray_queue));
239 while (!drain_gray_stack (&data->private_gray_queue, 32))
240 workers_gray_queue_share_redirect (&data->private_gray_queue);
241 g_assert (gray_object_queue_is_empty (&data->private_gray_queue));
243 gray_object_queue_init (&data->private_gray_queue, &allocator);
246 /* dummy return to make compilers happy */
251 workers_distribute_gray_queue_sections (void)
253 if (!major_collector.is_parallel)
256 workers_gray_queue_share_redirect (&workers_distribute_gray_queue);
260 workers_init (int num_workers)
264 if (!major_collector.is_parallel)
267 //g_print ("initing %d workers\n", num_workers);
269 workers_num = num_workers;
271 workers_data = mono_sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA);
272 memset (workers_data, 0, sizeof (WorkerData) * num_workers);
274 MONO_SEM_INIT (&workers_waiting_sem, 0);
275 MONO_SEM_INIT (&workers_done_sem, 0);
277 gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue, mono_sgen_get_unmanaged_allocator (),
278 workers_gray_queue_share_redirect, &workers_gc_thread_data);
279 pthread_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL);
280 workers_gc_thread_data.stealable_stack_fill = 0;
282 if (major_collector.alloc_worker_data)
283 workers_gc_thread_data.major_collector_data = major_collector.alloc_worker_data ();
285 for (i = 0; i < workers_num; ++i) {
286 /* private gray queue is inited by the thread itself */
287 pthread_mutex_init (&workers_data [i].stealable_stack_mutex, NULL);
288 workers_data [i].stealable_stack_fill = 0;
290 if (major_collector.alloc_worker_data)
291 workers_data [i].major_collector_data = major_collector.alloc_worker_data ();
294 mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock);
295 mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock);
296 mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others);
297 mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited);
300 /* only the GC thread is allowed to start and join workers */
303 workers_start_worker (int index)
305 g_assert (index >= 0 && index < workers_num);
307 g_assert (!workers_data [index].thread);
308 pthread_create (&workers_data [index].thread, NULL, workers_thread_func, &workers_data [index]);
312 workers_start_all_workers (void)
316 if (!major_collector.is_parallel)
319 if (major_collector.init_worker_thread)
320 major_collector.init_worker_thread (workers_gc_thread_data.major_collector_data);
322 g_assert (!workers_gc_in_progress);
323 workers_gc_in_progress = TRUE;
324 workers_done_posted = 0;
326 if (workers_started) {
327 g_assert (workers_num_waiting == workers_num);
328 workers_wake_up_all ();
332 for (i = 0; i < workers_num; ++i)
333 workers_start_worker (i);
335 workers_started = TRUE;
343 if (!major_collector.is_parallel)
346 g_assert (gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
347 g_assert (gray_object_queue_is_empty (&workers_distribute_gray_queue));
349 g_assert (workers_gc_in_progress);
350 workers_gc_in_progress = FALSE;
351 if (workers_num_waiting == workers_num)
352 workers_wake_up_all ();
353 MONO_SEM_WAIT (&workers_done_sem);
355 if (major_collector.reset_worker_data) {
356 for (i = 0; i < workers_num; ++i)
357 major_collector.reset_worker_data (workers_data [i].major_collector_data);
360 g_assert (workers_done_posted);
361 g_assert (workers_num_waiting == workers_num);
363 g_assert (!workers_gc_thread_data.stealable_stack_fill);
364 g_assert (gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
365 for (i = 0; i < workers_num; ++i) {
366 g_assert (!workers_data [i].stealable_stack_fill);
367 g_assert (gray_object_queue_is_empty (&workers_data [i].private_gray_queue));
372 mono_sgen_is_worker_thread (pthread_t thread)
376 if (major_collector.is_worker_thread && major_collector.is_worker_thread (thread))
379 if (!major_collector.is_parallel)
382 for (i = 0; i < workers_num; ++i) {
383 if (workers_data [i].thread == thread)