2 * sgen-gray.c: Gray queue management.
4 * Copyright 2001-2003 Ximian, Inc
5 * Copyright 2003-2010 Novell, Inc.
6 * Copyright (C) 2012 Xamarin Inc
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #include "mono/sgen/sgen-gc.h"
14 #include "mono/sgen/sgen-protocol.h"
16 #ifdef HEAVY_STATISTICS
17 guint64 stat_gray_queue_section_alloc;
18 guint64 stat_gray_queue_section_free;
19 guint64 stat_gray_queue_enqueue_fast_path;
20 guint64 stat_gray_queue_dequeue_fast_path;
21 guint64 stat_gray_queue_enqueue_slow_path;
22 guint64 stat_gray_queue_dequeue_slow_path;
25 #define GRAY_QUEUE_LENGTH_LIMIT 64
27 #ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
28 #define STATE_TRANSITION(s,o,n) do { \
30 if (InterlockedCompareExchange ((volatile int*)&(s)->state, (n), __old) != __old) \
31 g_assert_not_reached (); \
33 #define STATE_SET(s,v) (s)->state = (v)
34 #define STATE_ASSERT(s,v) g_assert ((s)->state == (v))
36 #define STATE_TRANSITION(s,o,n)
37 #define STATE_SET(s,v)
38 #define STATE_ASSERT(s,v)
42 * Whenever we dispose a gray queue, we save its free list. Then, in the next collection,
43 * we reuse that free list for the new gray queue.
45 static GrayQueueSection *last_gray_queue_free_list;
48 sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel)
50 GrayQueueSection *section;
52 if (queue->free_list) {
53 /* Use the previously allocated queue sections if possible */
54 section = queue->free_list;
55 queue->free_list = section->next;
56 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
58 HEAVY_STAT (stat_gray_queue_section_alloc ++);
60 /* Allocate a new section */
61 section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
62 STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
65 /* Section is empty */
68 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
70 /* Link it with the others */
71 section->next = queue->first;
74 queue->first->prev = section;
76 queue->last = section;
77 queue->first = section;
78 queue->cursor = section->entries - 1;
81 mono_memory_write_barrier ();
84 * we could probably optimize the code to only rely on the write barrier
85 * for synchronization with the stealer thread. Additionally we could also
86 * do a write barrier once every other gray queue change, and request
87 * to have a minimum of sections before stealing, to keep consistency.
89 InterlockedIncrement (&queue->num_sections);
91 queue->num_sections++;
96 sgen_gray_object_free_queue_section (GrayQueueSection *section)
98 HEAVY_STAT (stat_gray_queue_section_free ++);
100 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
101 sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
105 * The following two functions are called in the inner loops of the
106 * collector, so they need to be as fast as possible. We have macros
107 * for them in sgen-gc.h.
111 sgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc, gboolean is_parallel)
113 GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
115 HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++);
117 SGEN_ASSERT (9, obj, "enqueueing a null object");
118 //sgen_check_objref (obj);
120 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
121 if (queue->enqueue_check_func)
122 queue->enqueue_check_func (obj);
125 if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
128 * We don't actively update the section size with each push/pop. For the first
129 * section we determine the size from the cursor position. For the reset of the
130 * sections we need to have the size set.
132 queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
135 sgen_gray_object_alloc_queue_section (queue, is_parallel);
137 STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
138 SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
139 *++queue->cursor = entry;
141 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
142 binary_protocol_gray_enqueue (queue, queue->cursor, obj);
147 * We attempt to spread the objects in the gray queue across a number
148 * of sections. If the queue has more sections, then it's already spread,
149 * if it doesn't have enough sections, then we allocate as many as we
153 sgen_gray_object_spread (SgenGrayQueue *queue, int num_sections)
155 GrayQueueSection *section_start, *section_end;
156 int total_entries = 0, num_entries_per_section;
157 int num_sections_final;
159 if (queue->num_sections >= num_sections)
165 /* Compute number of elements in the gray queue */
166 queue->first->size = queue->cursor - queue->first->entries + 1;
167 total_entries = queue->first->size;
168 for (section_start = queue->first->next; section_start != NULL; section_start = section_start->next) {
169 SGEN_ASSERT (0, section_start->size == SGEN_GRAY_QUEUE_SECTION_SIZE, "We expect all section aside from the first one to be full");
170 total_entries += section_start->size;
173 /* Compute how many sections we should have and elements per section */
174 num_sections_final = (total_entries > num_sections) ? num_sections : total_entries;
175 num_entries_per_section = total_entries / num_sections_final;
177 /* Allocate all needed sections */
178 while (queue->num_sections < num_sections_final)
179 sgen_gray_object_alloc_queue_section (queue, TRUE);
181 /* Spread out the elements in the sections. By design, sections at the end are fuller. */
182 section_start = queue->first;
183 section_end = queue->last;
184 while (section_start != section_end) {
185 /* We move entries from end to start, until they meet */
186 while (section_start->size < num_entries_per_section) {
187 GrayQueueEntry entry;
188 if (section_end->size <= num_entries_per_section) {
189 section_end = section_end->prev;
190 if (section_end == section_start)
193 if (section_end->size <= num_entries_per_section)
197 entry = section_end->entries [section_end->size];
198 section_start->entries [section_start->size] = entry;
199 section_start->size++;
201 section_start = section_start->next;
204 queue->cursor = queue->first->entries + queue->first->size - 1;
205 queue->num_sections = num_sections_final;
209 sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel)
211 GrayQueueEntry entry;
213 HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++);
215 if (sgen_gray_object_queue_is_empty (queue)) {
220 STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
221 SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue);
223 entry = *queue->cursor--;
225 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
226 binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);
229 if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
230 GrayQueueSection *section;
231 gint32 old_num_sections = 0;
234 old_num_sections = InterlockedDecrement (&queue->num_sections);
236 queue->num_sections--;
238 if (is_parallel && old_num_sections <= 0) {
239 mono_os_mutex_lock (&queue->steal_mutex);
242 section = queue->first;
243 queue->first = section->next;
245 queue->first->prev = NULL;
248 SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?");
250 section->next = queue->free_list;
252 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
254 queue->free_list = section;
255 queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
257 if (is_parallel && old_num_sections <= 0) {
258 mono_os_mutex_unlock (&queue->steal_mutex);
266 sgen_gray_object_dequeue_section (SgenGrayQueue *queue)
268 GrayQueueSection *section;
273 /* We never steal from this queue */
274 queue->num_sections--;
276 section = queue->first;
277 queue->first = section->next;
279 queue->first->prev = NULL;
283 section->next = NULL;
284 section->size = queue->cursor - section->entries + 1;
286 queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
288 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
294 sgen_gray_object_steal_section (SgenGrayQueue *queue)
296 gint32 sections_remaining;
297 GrayQueueSection *section = NULL;
300 * With each push/pop into the queue we increment the number of sections.
301 * There is only one thread accessing the top (the owner) and potentially
302 * multiple workers trying to steal sections from the bottom, so we need
303 * to lock. A num sections decrement from the owner means that the first
304 * section is reserved, while a decrement by the stealer means that the
305 * last section is reserved. If after we decrement the num sections, we
306 * have at least one more section present, it means we can't race with
307 * the other thread. If this is not the case the steal end abandons the
308 * pop, setting back the num_sections, while the owner end will take a
309 * lock to make sure we are not racing with the stealer (since the stealer
310 * might have popped an entry and be in the process of updating the entry
311 * that the owner is trying to pop.
314 if (queue->num_sections <= 1)
317 /* Give up if there is contention on the last section */
318 if (mono_os_mutex_trylock (&queue->steal_mutex) != 0)
321 sections_remaining = InterlockedDecrement (&queue->num_sections);
322 if (sections_remaining <= 0) {
323 /* The section that we tried to steal might be the head of the queue. */
324 InterlockedIncrement (&queue->num_sections);
326 /* We have reserved for us the tail section of the queue */
327 section = queue->last;
328 SGEN_ASSERT (0, section, "Why we don't have any sections to steal?");
329 SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?");
330 queue->last = section->prev;
331 section->prev = NULL;
332 SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?");
333 queue->last->next = NULL;
335 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
338 mono_os_mutex_unlock (&queue->steal_mutex);
343 sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel)
345 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
348 queue->first->size = queue->cursor - queue->first->entries + 1;
350 section->next = queue->first;
351 section->prev = NULL;
353 queue->first->prev = section;
355 queue->last = section;
356 queue->first = section;
357 queue->cursor = queue->first->entries + queue->first->size - 1;
358 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
359 if (queue->enqueue_check_func) {
361 for (i = 0; i < section->size; ++i)
362 queue->enqueue_check_func (section->entries [i].obj);
366 mono_memory_write_barrier ();
367 InterlockedIncrement (&queue->num_sections);
369 queue->num_sections++;
374 sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
376 GrayQueueSection *section, *next;
378 for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
379 STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
384 while (section->next) {
385 next = section->next;
386 section->next = next->next;
387 STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
388 sgen_gray_object_free_queue_section (next);
393 sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list)
395 memset (queue, 0, sizeof (SgenGrayQueue));
397 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
398 queue->enqueue_check_func = enqueue_check_func;
401 mono_os_mutex_init (&queue->steal_mutex);
403 if (reuse_free_list) {
404 queue->free_list = last_gray_queue_free_list;
405 last_gray_queue_free_list = NULL;
410 sgen_gray_object_queue_dispose (SgenGrayQueue *queue)
412 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue), "Why are we disposing a gray queue that's not empty?");
414 /* Free the extra sections allocated during the last collection */
415 sgen_gray_object_queue_trim_free_list (queue);
417 SGEN_ASSERT (0, !last_gray_queue_free_list, "Are we disposing two gray queues after another?");
418 last_gray_queue_free_list = queue->free_list;
420 /* just to make sure */
421 memset (queue, 0, sizeof (SgenGrayQueue));
425 sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
427 g_assert (!queue->first);
428 while (queue->free_list) {
429 GrayQueueSection *next = queue->free_list->next;
430 STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
431 sgen_gray_object_free_queue_section (queue->free_list);
432 queue->free_list = next;
437 lock_section_queue (SgenSectionGrayQueue *queue)
442 mono_os_mutex_lock (&queue->lock);
446 unlock_section_queue (SgenSectionGrayQueue *queue)
451 mono_os_mutex_unlock (&queue->lock);
455 sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
457 g_assert (sgen_section_gray_queue_is_empty (queue));
459 queue->locked = locked;
461 mono_os_mutex_init_recursive (&queue->lock);
464 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
465 queue->enqueue_check_func = enqueue_check_func;
470 sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
472 return !queue->first;
476 sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
478 GrayQueueSection *section;
480 lock_section_queue (queue);
483 section = queue->first;
484 queue->first = section->next;
486 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
488 section->next = NULL;
493 unlock_section_queue (queue);
499 sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
501 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
503 lock_section_queue (queue);
505 section->next = queue->first;
506 queue->first = section;
507 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
508 if (queue->enqueue_check_func) {
510 for (i = 0; i < section->size; ++i)
511 queue->enqueue_check_func (section->entries [i].obj);
515 unlock_section_queue (queue);
519 sgen_init_gray_queues (void)
521 #ifdef HEAVY_STATISTICS
522 mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_alloc);
523 mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_free);
524 mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_fast_path);
525 mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_fast_path);
526 mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_slow_path);
527 mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_slow_path);