[bcl] Enable tests for the monodroid profile.
[mono.git] / mono / sgen / sgen-gray.c
1 /*
2  * sgen-gray.c: Gray queue management.
3  *
4  * Copyright 2001-2003 Ximian, Inc
5  * Copyright 2003-2010 Novell, Inc.
6  * Copyright (C) 2012 Xamarin Inc
7  *
8  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
9  */
10 #include "config.h"
11 #ifdef HAVE_SGEN_GC
12
13 #include "mono/sgen/sgen-gc.h"
14 #include "mono/sgen/sgen-protocol.h"
15
16 #ifdef HEAVY_STATISTICS
17 guint64 stat_gray_queue_section_alloc;
18 guint64 stat_gray_queue_section_free;
19 guint64 stat_gray_queue_enqueue_fast_path;
20 guint64 stat_gray_queue_dequeue_fast_path;
21 guint64 stat_gray_queue_enqueue_slow_path;
22 guint64 stat_gray_queue_dequeue_slow_path;
23 #endif
24
25 #define GRAY_QUEUE_LENGTH_LIMIT 64
26
27 #ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
28 #define STATE_TRANSITION(s,o,n) do {                                    \
29                 int __old = (o);                                        \
30                 if (InterlockedCompareExchange ((volatile int*)&(s)->state, (n), __old) != __old) \
31                         g_assert_not_reached ();                        \
32         } while (0)
33 #define STATE_SET(s,v)          (s)->state = (v)
34 #define STATE_ASSERT(s,v)       g_assert ((s)->state == (v))
35 #else
36 #define STATE_TRANSITION(s,o,n)
37 #define STATE_SET(s,v)
38 #define STATE_ASSERT(s,v)
39 #endif
40
41 /*
42  * Whenever we dispose a gray queue, we save its free list.  Then, in the next collection,
43  * we reuse that free list for the new gray queue.
44  */
45 static GrayQueueSection *last_gray_queue_free_list;
46
47 void
48 sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel)
49 {
50         GrayQueueSection *section;
51
52         if (queue->free_list) {
53                 /* Use the previously allocated queue sections if possible */
54                 section = queue->free_list;
55                 queue->free_list = section->next;
56                 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
57         } else {
58                 HEAVY_STAT (stat_gray_queue_section_alloc ++);
59
60                 /* Allocate a new section */
61                 section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
62                 STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
63         }
64
65         /* Section is empty */
66         section->size = 0;
67
68         STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
69
70         /* Link it with the others */
71         section->next = queue->first;
72         section->prev = NULL;
73         if (queue->first)
74                 queue->first->prev = section;
75         else
76                 queue->last = section;
77         queue->first = section;
78         queue->cursor = section->entries - 1;
79
80         if (is_parallel) {
81                 mono_memory_write_barrier ();
82                 /*
83                  * FIXME
84                  * we could probably optimize the code to only rely on the write barrier
85                  * for synchronization with the stealer thread. Additionally we could also
86                  * do a write barrier once every other gray queue change, and request
87                  * to have a minimum of sections before stealing, to keep consistency.
88                  */
89                 InterlockedIncrement (&queue->num_sections);
90         } else {
91                 queue->num_sections++;
92         }
93 }
94
95 void
96 sgen_gray_object_free_queue_section (GrayQueueSection *section)
97 {
98         HEAVY_STAT (stat_gray_queue_section_free ++);
99
100         STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
101         sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
102 }
103
104 /*
105  * The following two functions are called in the inner loops of the
106  * collector, so they need to be as fast as possible.  We have macros
107  * for them in sgen-gc.h.
108  */
109
110 void
111 sgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc, gboolean is_parallel)
112 {
113         GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
114
115         HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++);
116
117         SGEN_ASSERT (9, obj, "enqueueing a null object");
118         //sgen_check_objref (obj);
119
120 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
121         if (queue->enqueue_check_func)
122                 queue->enqueue_check_func (obj);
123 #endif
124
125         if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
126                 if (queue->first) {
127                         /*
128                          * We don't actively update the section size with each push/pop. For the first
129                          * section we determine the size from the cursor position. For the reset of the
130                          * sections we need to have the size set.
131                          */
132                         queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
133                 }
134
135                 sgen_gray_object_alloc_queue_section (queue, is_parallel);
136         }
137         STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
138         SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
139         *++queue->cursor = entry;
140
141 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
142         binary_protocol_gray_enqueue (queue, queue->cursor, obj);
143 #endif
144 }
145
146 /*
147  * We attempt to spread the objects in the gray queue across a number
148  * of sections. If the queue has more sections, then it's already spread,
149  * if it doesn't have enough sections, then we allocate as many as we
150  * can.
151  */
152 void
153 sgen_gray_object_spread (SgenGrayQueue *queue, int num_sections)
154 {
155         GrayQueueSection *section_start, *section_end;
156         int total_entries = 0, num_entries_per_section;
157         int num_sections_final;
158
159         if (queue->num_sections >= num_sections)
160                 return;
161
162         if (!queue->first)
163                 return;
164
165         /* Compute number of elements in the gray queue */
166         queue->first->size = queue->cursor - queue->first->entries + 1;
167         total_entries = queue->first->size;
168         for (section_start = queue->first->next; section_start != NULL; section_start = section_start->next) {
169                 SGEN_ASSERT (0, section_start->size == SGEN_GRAY_QUEUE_SECTION_SIZE, "We expect all section aside from the first one to be full");
170                 total_entries += section_start->size;
171         }
172
173         /* Compute how many sections we should have and elements per section */
174         num_sections_final = (total_entries > num_sections) ? num_sections : total_entries;
175         num_entries_per_section = total_entries / num_sections_final;
176
177         /* Allocate all needed sections */
178         while (queue->num_sections < num_sections_final)
179                 sgen_gray_object_alloc_queue_section (queue, TRUE);
180
181         /* Spread out the elements in the sections. By design, sections at the end are fuller. */
182         section_start = queue->first;
183         section_end = queue->last;
184         while (section_start != section_end) {
185                 /* We move entries from end to start, until they meet */
186                 while (section_start->size < num_entries_per_section) {
187                         GrayQueueEntry entry;
188                         if (section_end->size <= num_entries_per_section) {
189                                 section_end = section_end->prev;
190                                 if (section_end == section_start)
191                                         break;
192                         }
193                         if (section_end->size <= num_entries_per_section)
194                                 break;
195
196                         section_end->size--;
197                         entry = section_end->entries [section_end->size];
198                         section_start->entries [section_start->size] = entry;
199                         section_start->size++;
200                 }
201                 section_start = section_start->next;
202         }
203
204         queue->cursor = queue->first->entries + queue->first->size - 1;
205         queue->num_sections = num_sections_final;
206 }
207
208 GrayQueueEntry
209 sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel)
210 {
211         GrayQueueEntry entry;
212
213         HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++);
214
215         if (sgen_gray_object_queue_is_empty (queue)) {
216                 entry.obj = NULL;
217                 return entry;
218         }
219
220         STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
221         SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue);
222
223         entry = *queue->cursor--;
224
225 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
226         binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);
227 #endif
228
229         if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
230                 GrayQueueSection *section;
231                 gint32 old_num_sections = 0;
232
233                 if (is_parallel)
234                         old_num_sections = InterlockedDecrement (&queue->num_sections);
235                 else
236                         queue->num_sections--;
237
238                 if (is_parallel && old_num_sections <= 0) {
239                         mono_os_mutex_lock (&queue->steal_mutex);
240                 }
241
242                 section = queue->first;
243                 queue->first = section->next;
244                 if (queue->first) {
245                         queue->first->prev = NULL;
246                 } else {
247                         queue->last = NULL;
248                         SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?");
249                 }
250                 section->next = queue->free_list;
251
252                 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
253
254                 queue->free_list = section;
255                 queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
256
257                 if (is_parallel && old_num_sections <= 0) {
258                         mono_os_mutex_unlock (&queue->steal_mutex);
259                 }
260         }
261
262         return entry;
263 }
264
265 GrayQueueSection*
266 sgen_gray_object_dequeue_section (SgenGrayQueue *queue)
267 {
268         GrayQueueSection *section;
269
270         if (!queue->first)
271                 return NULL;
272
273         /* We never steal from this queue */
274         queue->num_sections--;
275
276         section = queue->first;
277         queue->first = section->next;
278         if (queue->first)
279                 queue->first->prev = NULL;
280         else
281                 queue->last = NULL;
282
283         section->next = NULL;
284         section->size = queue->cursor - section->entries + 1;
285
286         queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
287
288         STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
289
290         return section;
291 }
292
293 GrayQueueSection*
294 sgen_gray_object_steal_section (SgenGrayQueue *queue)
295 {
296         gint32 sections_remaining;
297         GrayQueueSection *section = NULL;
298
299         /*
300          * With each push/pop into the queue we increment the number of sections.
301          * There is only one thread accessing the top (the owner) and potentially
302          * multiple workers trying to steal sections from the bottom, so we need
303          * to lock. A num sections decrement from the owner means that the first
304          * section is reserved, while a decrement by the stealer means that the
305          * last section is reserved. If after we decrement the num sections, we
306          * have at least one more section present, it means we can't race with
307          * the other thread. If this is not the case the steal end abandons the
308          * pop, setting back the num_sections, while the owner end will take a
309          * lock to make sure we are not racing with the stealer (since the stealer
310          * might have popped an entry and be in the process of updating the entry
311          * that the owner is trying to pop.
312          */
313
314         if (queue->num_sections <= 1)
315                 return NULL;
316
317         /* Give up if there is contention on the last section */
318         if (mono_os_mutex_trylock (&queue->steal_mutex) != 0)
319                 return NULL;
320
321         sections_remaining = InterlockedDecrement (&queue->num_sections);
322         if (sections_remaining <= 0) {
323                 /* The section that we tried to steal might be the head of the queue. */
324                 InterlockedIncrement (&queue->num_sections);
325         } else {
326                 /* We have reserved for us the tail section of the queue */
327                 section = queue->last;
328                 SGEN_ASSERT (0, section, "Why we don't have any sections to steal?");
329                 SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?");
330                 queue->last = section->prev;
331                 section->prev = NULL;
332                 SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?");
333                 queue->last->next = NULL;
334
335                 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
336         }
337
338         mono_os_mutex_unlock (&queue->steal_mutex);
339         return section;
340 }
341
342 void
343 sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel)
344 {
345         STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
346
347         if (queue->first)
348                 queue->first->size = queue->cursor - queue->first->entries + 1;
349
350         section->next = queue->first;
351         section->prev = NULL;
352         if (queue->first)
353                 queue->first->prev = section;
354         else
355                 queue->last = section;
356         queue->first = section;
357         queue->cursor = queue->first->entries + queue->first->size - 1;
358 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
359         if (queue->enqueue_check_func) {
360                 int i;
361                 for (i = 0; i < section->size; ++i)
362                         queue->enqueue_check_func (section->entries [i].obj);
363         }
364 #endif
365         if (is_parallel) {
366                 mono_memory_write_barrier ();
367                 InterlockedIncrement (&queue->num_sections);
368         } else {
369                 queue->num_sections++;
370         }
371 }
372
373 void
374 sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
375 {
376         GrayQueueSection *section, *next;
377         int i = 0;
378         for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
379                 STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
380                 i ++;
381         }
382         if (!section)
383                 return;
384         while (section->next) {
385                 next = section->next;
386                 section->next = next->next;
387                 STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
388                 sgen_gray_object_free_queue_section (next);
389         }
390 }
391
392 void
393 sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list)
394 {
395         memset (queue, 0, sizeof (SgenGrayQueue));
396
397 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
398         queue->enqueue_check_func = enqueue_check_func;
399 #endif
400
401         mono_os_mutex_init (&queue->steal_mutex);
402
403         if (reuse_free_list) {
404                 queue->free_list = last_gray_queue_free_list;
405                 last_gray_queue_free_list = NULL;
406         }
407 }
408
409 void
410 sgen_gray_object_queue_dispose (SgenGrayQueue *queue)
411 {
412         SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue), "Why are we disposing a gray queue that's not empty?");
413
414         /* Free the extra sections allocated during the last collection */
415         sgen_gray_object_queue_trim_free_list (queue);
416
417         SGEN_ASSERT (0, !last_gray_queue_free_list, "Are we disposing two gray queues after another?");
418         last_gray_queue_free_list = queue->free_list;
419
420         /* just to make sure */
421         memset (queue, 0, sizeof (SgenGrayQueue));
422 }
423
424 void
425 sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
426 {
427         g_assert (!queue->first);
428         while (queue->free_list) {
429                 GrayQueueSection *next = queue->free_list->next;
430                 STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
431                 sgen_gray_object_free_queue_section (queue->free_list);
432                 queue->free_list = next;
433         }
434 }
435
436 static void
437 lock_section_queue (SgenSectionGrayQueue *queue)
438 {
439         if (!queue->locked)
440                 return;
441
442         mono_os_mutex_lock (&queue->lock);
443 }
444
445 static void
446 unlock_section_queue (SgenSectionGrayQueue *queue)
447 {
448         if (!queue->locked)
449                 return;
450
451         mono_os_mutex_unlock (&queue->lock);
452 }
453
454 void
455 sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
456 {
457         g_assert (sgen_section_gray_queue_is_empty (queue));
458
459         queue->locked = locked;
460         if (locked) {
461                 mono_os_mutex_init_recursive (&queue->lock);
462         }
463
464 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
465         queue->enqueue_check_func = enqueue_check_func;
466 #endif
467 }
468
469 gboolean
470 sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
471 {
472         return !queue->first;
473 }
474
475 GrayQueueSection*
476 sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
477 {
478         GrayQueueSection *section;
479
480         lock_section_queue (queue);
481
482         if (queue->first) {
483                 section = queue->first;
484                 queue->first = section->next;
485
486                 STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
487
488                 section->next = NULL;
489         } else {
490                 section = NULL;
491         }
492
493         unlock_section_queue (queue);
494
495         return section;
496 }
497
498 void
499 sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
500 {
501         STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
502
503         lock_section_queue (queue);
504
505         section->next = queue->first;
506         queue->first = section;
507 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
508         if (queue->enqueue_check_func) {
509                 int i;
510                 for (i = 0; i < section->size; ++i)
511                         queue->enqueue_check_func (section->entries [i].obj);
512         }
513 #endif
514
515         unlock_section_queue (queue);
516 }
517
518 void
519 sgen_init_gray_queues (void)
520 {
521 #ifdef HEAVY_STATISTICS
522         mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_alloc);
523         mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_free);
524         mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_fast_path);
525         mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_fast_path);
526         mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_slow_path);
527         mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_slow_path);
528 #endif
529 }
530 #endif