[sgen] Add parallel copying infrastructure
[mono.git] / mono / sgen / sgen-marksweep.c
1 /*
2  * sgen-marksweep.c: The Mark & Sweep major collector.
3  *
4  * Author:
5  *      Mark Probst <mark.probst@gmail.com>
6  *
7  * Copyright 2009-2010 Novell, Inc.
8  * Copyright (C) 2012 Xamarin Inc
9  *
10  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
11  */
12
13 #include "config.h"
14
15 #ifdef HAVE_SGEN_GC
16
17 #include <math.h>
18 #include <errno.h>
19 #include <string.h>
20 #include <stdlib.h>
21
22 #include "mono/sgen/sgen-gc.h"
23 #include "mono/sgen/sgen-protocol.h"
24 #include "mono/sgen/sgen-cardtable.h"
25 #include "mono/sgen/sgen-memory-governor.h"
26 #include "mono/sgen/sgen-layout-stats.h"
27 #include "mono/sgen/sgen-pointer-queue.h"
28 #include "mono/sgen/sgen-array-list.h"
29 #include "mono/sgen/sgen-pinning.h"
30 #include "mono/sgen/sgen-workers.h"
31 #include "mono/sgen/sgen-thread-pool.h"
32 #include "mono/sgen/sgen-client.h"
33 #include "mono/utils/mono-memory-model.h"
34
35 #if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
36 #define MS_BLOCK_SIZE   ARCH_MIN_MS_BLOCK_SIZE
37 #define MS_BLOCK_SIZE_SHIFT     ARCH_MIN_MS_BLOCK_SIZE_SHIFT
38 #else
39 #define MS_BLOCK_SIZE_SHIFT     14      /* INT FASTENABLE */
40 #define MS_BLOCK_SIZE           (1 << MS_BLOCK_SIZE_SHIFT)
41 #endif
42 #define MAJOR_SECTION_SIZE      MS_BLOCK_SIZE
43 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
44
45 /*
46  * Don't allocate single blocks, but alloc a contingent of this many
47  * blocks in one swoop.  This must be a power of two.
48  */
49 #define MS_BLOCK_ALLOC_NUM      32
50
51 /*
52  * Number of bytes before the first object in a block.  At the start
53  * of a block is the MSBlockHeader, then opional padding, then come
54  * the objects, so this must be >= sizeof (MSBlockHeader).
55  */
56 #define MS_BLOCK_SKIP   ((sizeof (MSBlockHeader) + 15) & ~15)
57
58 #define MS_BLOCK_FREE   (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
59
60 #define MS_NUM_MARK_WORDS       (MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (guint32) * 8 - 1) / (sizeof (guint32) * 8)
61
62 /*
63  * Blocks progress from one state to the next:
64  *
65  * SWEPT           The block is fully swept.  It might or might not be in
66  *                 a free list.
67  *
68  * MARKING         The block might or might not contain live objects.  If
69  *                 we're in between an initial collection pause and the
70  *                 finishing pause, the block might or might not be in a
71  *                 free list.
72  *
73  * CHECKING        The sweep thread is investigating the block to determine
74  *                 whether or not it contains live objects.  The block is
75  *                 not in a free list.
76  *
77  * NEED_SWEEPING   The block contains live objects but has not yet been
78  *                 swept.  It also contains free slots.  It is in a block
79  *                 free list.
80  *
81  * SWEEPING        The block is being swept.  It might be in a free list.
82  */
83
84 enum {
85         BLOCK_STATE_SWEPT,
86         BLOCK_STATE_MARKING,
87         BLOCK_STATE_CHECKING,
88         BLOCK_STATE_NEED_SWEEPING,
89         BLOCK_STATE_SWEEPING
90 };
91
92 typedef struct _MSBlockInfo MSBlockInfo;
93 struct _MSBlockInfo {
94         guint16 obj_size;
95         /*
96          * FIXME: Do we even need this? It's only used during sweep and might be worth
97          * recalculating to save the space.
98          */
99         guint16 obj_size_index;
100         /* FIXME: Reduce this - it only needs a byte. */
101         volatile gint32 state;
102         gint16 nused;
103         unsigned int pinned : 1;
104         unsigned int has_references : 1;
105         unsigned int has_pinned : 1;    /* means cannot evacuate */
106         unsigned int is_to_space : 1;
107         void ** volatile free_list;
108         MSBlockInfo * volatile next_free;
109         guint8 * volatile cardtable_mod_union;
110         guint32 mark_words [MS_NUM_MARK_WORDS];
111 };
112
113 #define MS_BLOCK_FOR_BLOCK_INFO(b)      ((char*)(b))
114
115 #define MS_BLOCK_OBJ(b,i)               ((GCObject *)(MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i)))
116 #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size)             (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
117 #define MS_BLOCK_DATA_FOR_OBJ(o)        ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
118
119 typedef struct {
120         MSBlockInfo info;
121 } MSBlockHeader;
122
123 #define MS_BLOCK_FOR_OBJ(o)             (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
124
125 /* object index will always be small */
126 #define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
127
128 //casting to int is fine since blocks are 32k
129 #define MS_CALC_MARK_BIT(w,b,o)         do {                            \
130                 int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
131                 (w) = i >> 5;                                           \
132                 (b) = i & 31;                                           \
133         } while (0)
134
135 #define MS_MARK_BIT(bl,w,b)     ((bl)->mark_words [(w)] & (ONE_P << (b)))
136 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
137 #define MS_SET_MARK_BIT_PAR(bl,w,b,first)       do {                    \
138                 guint32 tmp_mark_word = (bl)->mark_words [(w)];         \
139                 guint32 old_mark_word;                                  \
140                 first = FALSE;                                          \
141                 while (!(tmp_mark_word & (ONE_P << (b)))) {             \
142                         old_mark_word = tmp_mark_word;                  \
143                         tmp_mark_word = InterlockedCompareExchange ((volatile gint32*)&(bl)->mark_words [w], old_mark_word | (ONE_P << (b)), old_mark_word); \
144                         if (tmp_mark_word == old_mark_word) {           \
145                                 first = TRUE;                           \
146                                 break;                                  \
147                         }                                               \
148                 }                                                       \
149         } while (0)
150
151
152 #define MS_OBJ_ALLOCED(o,b)     (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
153
154 #define MS_BLOCK_OBJ_SIZE_FACTOR        (pow (2.0, 1.0 / 3))
155
156 /*
157  * This way we can lookup block object size indexes for sizes up to
158  * 256 bytes with a single load.
159  */
160 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES      32
161
162 static int *block_obj_sizes;
163 static int num_block_obj_sizes;
164 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
165
166 #define MS_BLOCK_FLAG_PINNED    1
167 #define MS_BLOCK_FLAG_REFS      2
168
169 #define MS_BLOCK_TYPE_MAX       4
170
171 static gboolean *evacuate_block_obj_sizes;
172 static float evacuation_threshold = 0.666f;
173
174 static gboolean lazy_sweep = TRUE;
175
176 enum {
177         SWEEP_STATE_SWEPT,
178         SWEEP_STATE_NEED_SWEEPING,
179         SWEEP_STATE_SWEEPING,
180         SWEEP_STATE_SWEEPING_AND_ITERATING,
181         SWEEP_STATE_COMPACTING
182 };
183
184 static volatile int sweep_state = SWEEP_STATE_SWEPT;
185
186 static gboolean concurrent_mark;
187 static gboolean concurrent_sweep = TRUE;
188
189 #define BLOCK_IS_TAGGED_HAS_REFERENCES(bl)      SGEN_POINTER_IS_TAGGED_1 ((bl))
190 #define BLOCK_TAG_HAS_REFERENCES(bl)            SGEN_POINTER_TAG_1 ((bl))
191
192 #define BLOCK_IS_TAGGED_CHECKING(bl)            SGEN_POINTER_IS_TAGGED_2 ((bl))
193 #define BLOCK_TAG_CHECKING(bl)                  SGEN_POINTER_TAG_2 ((bl))
194
195 #define BLOCK_UNTAG(bl)                         ((MSBlockInfo *)SGEN_POINTER_UNTAG_12 ((bl)))
196
197 #define BLOCK_TAG(bl)                           ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
198
199 /* all allocated blocks in the system */
200 static SgenArrayList allocated_blocks = SGEN_ARRAY_LIST_INIT (NULL, sgen_array_list_default_is_slot_set, sgen_array_list_default_cas_setter, INTERNAL_MEM_PIN_QUEUE);
201
202 /* non-allocated block free-list */
203 static void *empty_blocks = NULL;
204 static size_t num_empty_blocks = 0;
205
206 /*
207  * We can iterate the block list also while sweep is in progress but we
208  * need to account for blocks that will be checked for sweeping and even
209  * freed in the process.
210  */
211 #define FOREACH_BLOCK_NO_LOCK(bl) {                                     \
212         volatile gpointer *slot;                                                \
213         SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) {        \
214                 (bl) = BLOCK_UNTAG (*slot);                             \
215                 if (!(bl))                                              \
216                         continue;
217 #define FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK(bl,hr) {                   \
218         volatile gpointer *slot;                                                \
219         SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) {        \
220                 (bl) = (MSBlockInfo *) (*slot);                 \
221                 if (!(bl))                                              \
222                         continue;                                       \
223                 (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl));           \
224                 (bl) = BLOCK_UNTAG ((bl));
225 #define END_FOREACH_BLOCK_NO_LOCK       } SGEN_ARRAY_LIST_END_FOREACH_SLOT; }
226
227 static volatile size_t num_major_sections = 0;
228 /*
229  * One free block list for each block object size.  We add and remove blocks from these
230  * lists lock-free via CAS.
231  *
232  * Blocks accessed/removed from `free_block_lists`:
233  *   from the mutator (with GC lock held)
234  *   in nursery collections
235  *   in non-concurrent major collections
236  *   in the finishing pause of concurrent major collections (whole list is cleared)
237  *
238  * Blocks added to `free_block_lists`:
239  *   in the sweeping thread
240  *   during nursery collections
241  *   from domain clearing (with the world stopped and no sweeping happening)
242  *
243  * The only item of those that doesn't require the GC lock is the sweep thread.  The sweep
244  * thread only ever adds blocks to the free list, so the ABA problem can't occur.
245  */
246 static MSBlockInfo * volatile *free_block_lists [MS_BLOCK_TYPE_MAX];
247 static MonoNativeTlsKey worker_block_free_list_key;
248
249 static guint64 stat_major_blocks_alloced = 0;
250 static guint64 stat_major_blocks_freed = 0;
251 static guint64 stat_major_blocks_lazy_swept = 0;
252
253 static guint64 stat_major_blocks_freed_ideal = 0;
254 static guint64 stat_major_blocks_freed_less_ideal = 0;
255 static guint64 stat_major_blocks_freed_individual = 0;
256 static guint64 stat_major_blocks_alloced_less_ideal = 0;
257
258 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
259 static guint64 num_major_objects_marked = 0;
260 #define INC_NUM_MAJOR_OBJECTS_MARKED()  (++num_major_objects_marked)
261 #else
262 #define INC_NUM_MAJOR_OBJECTS_MARKED()
263 #endif
264
265 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
266 static mono_mutex_t scanned_objects_list_lock;
267 static SgenPointerQueue scanned_objects_list;
268
269 static void
270 add_scanned_object (void *ptr)
271 {
272         if (!binary_protocol_is_enabled ())
273                 return;
274
275         mono_os_mutex_lock (&scanned_objects_list_lock);
276         sgen_pointer_queue_add (&scanned_objects_list, ptr);
277         mono_os_mutex_unlock (&scanned_objects_list_lock);
278 }
279 #endif
280
281 static gboolean sweep_block (MSBlockInfo *block);
282
283 static int
284 ms_find_block_obj_size_index (size_t size)
285 {
286         int i;
287         SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %zd is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
288         for (i = 0; i < num_block_obj_sizes; ++i)
289                 if (block_obj_sizes [i] >= size)
290                         return i;
291         g_error ("no object of size %zd\n", size);
292         return -1;
293 }
294
295 #define FREE_BLOCKS_FROM(lists,p,r)     (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
296 #define FREE_BLOCKS(p,r)                (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
297 #define FREE_BLOCKS_LOCAL(p,r)          (FREE_BLOCKS_FROM (((MSBlockInfo***)mono_native_tls_get_value (worker_block_free_list_key)), (p), (r)))
298
299 #define MS_BLOCK_OBJ_SIZE_INDEX(s)                              \
300         (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ?      \
301          fast_block_obj_size_indexes [((s)+7)>>3] :             \
302          ms_find_block_obj_size_index ((s)))
303
304 static void*
305 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
306 {
307         char *start;
308         if (nursery_align)
309                 start = (char *)sgen_alloc_os_memory_aligned (nursery_size, nursery_align, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery", MONO_MEM_ACCOUNT_SGEN_NURSERY);
310         else
311                 start = (char *)sgen_alloc_os_memory (nursery_size, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery", MONO_MEM_ACCOUNT_SGEN_NURSERY);
312
313         return start;
314 }
315
316 static void
317 update_heap_boundaries_for_block (MSBlockInfo *block)
318 {
319         sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
320 }
321
322 /*
323  * Thread safe
324  */
325 static void*
326 ms_get_empty_block (void)
327 {
328         char *p;
329         int i;
330         void *block, *empty, *next;
331
332  retry:
333         if (!empty_blocks) {
334                 /*
335                  * We try allocating MS_BLOCK_ALLOC_NUM blocks first.  If that's
336                  * unsuccessful, we halve the number of blocks and try again, until we're at
337                  * 1.  If that doesn't work, either, we assert.
338                  */
339                 int alloc_num = MS_BLOCK_ALLOC_NUM;
340                 for (;;) {
341                         p = (char *)sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE,
342                                 (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE),
343                                 alloc_num == 1 ? "major heap section" : NULL, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
344                         if (p)
345                                 break;
346                         alloc_num >>= 1;
347                 }
348
349                 for (i = 0; i < alloc_num; ++i) {
350                         block = p;
351                         /*
352                          * We do the free list update one after the
353                          * other so that other threads can use the new
354                          * blocks as quickly as possible.
355                          */
356                         do {
357                                 empty = empty_blocks;
358                                 *(void**)block = empty;
359                         } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
360                         p += MS_BLOCK_SIZE;
361                 }
362
363                 SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
364
365                 stat_major_blocks_alloced += alloc_num;
366 #if SIZEOF_VOID_P != 8
367                 if (alloc_num != MS_BLOCK_ALLOC_NUM)
368                         stat_major_blocks_alloced_less_ideal += alloc_num;
369 #endif
370         }
371
372         do {
373                 empty = empty_blocks;
374                 if (!empty)
375                         goto retry;
376                 block = empty;
377                 next = *(void**)block;
378         } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
379
380         SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
381
382         *(void**)block = NULL;
383
384         g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
385
386         return block;
387 }
388
389 /*
390  * This doesn't actually free a block immediately, but enqueues it into the `empty_blocks`
391  * list, where it will either be freed later on, or reused in nursery collections.
392  */
393 static void
394 ms_free_block (MSBlockInfo *info)
395 {
396         void *empty;
397         char *block = MS_BLOCK_FOR_BLOCK_INFO (info);
398
399         sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
400         if (info->cardtable_mod_union)
401                 sgen_card_table_free_mod_union (info->cardtable_mod_union, block, MS_BLOCK_SIZE);
402         memset (block, 0, MS_BLOCK_SIZE);
403
404         do {
405                 empty = empty_blocks;
406                 *(void**)block = empty;
407         } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
408
409         SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
410
411         binary_protocol_block_free (block, MS_BLOCK_SIZE);
412 }
413
414 static gboolean
415 sweep_in_progress (void)
416 {
417         int state = sweep_state;
418         return state == SWEEP_STATE_SWEEPING ||
419                 state == SWEEP_STATE_SWEEPING_AND_ITERATING ||
420                 state == SWEEP_STATE_COMPACTING;
421 }
422
423 static inline gboolean
424 block_is_swept_or_marking (MSBlockInfo *block)
425 {
426         gint32 state = block->state;
427         return state == BLOCK_STATE_SWEPT || state == BLOCK_STATE_MARKING;
428 }
429
430 //#define MARKSWEEP_CONSISTENCY_CHECK
431
432 #ifdef MARKSWEEP_CONSISTENCY_CHECK
433 static void
434 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
435 {
436         SGEN_ASSERT (0, !sweep_in_progress (), "Can't examine allocated blocks during sweep");
437         for (; block; block = block->next_free) {
438                 SGEN_ASSERT (0, block->state != BLOCK_STATE_CHECKING, "Can't have a block we're checking in a free list.");
439                 g_assert (block->obj_size == size);
440                 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
441
442                 /* blocks in the free lists must have at least
443                    one free slot */
444                 g_assert (block->free_list);
445
446                 /* the block must be in the allocated_blocks array */
447                 g_assert (sgen_array_list_find (&allocated_blocks, BLOCK_TAG (block)) != (guint32)-1);
448         }
449 }
450
451 static void
452 check_empty_blocks (void)
453 {
454         void *p;
455         size_t i = 0;
456         for (p = empty_blocks; p; p = *(void**)p)
457                 ++i;
458         g_assert (i == num_empty_blocks);
459 }
460
461 static void
462 consistency_check (void)
463 {
464         MSBlockInfo *block;
465         int i;
466
467         /* check all blocks */
468         FOREACH_BLOCK_NO_LOCK (block) {
469                 int count = MS_BLOCK_FREE / block->obj_size;
470                 int num_free = 0;
471                 void **free;
472
473                 /* count number of free slots */
474                 for (i = 0; i < count; ++i) {
475                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
476                         if (!MS_OBJ_ALLOCED (obj, block))
477                                 ++num_free;
478                 }
479
480                 /* check free list */
481                 for (free = block->free_list; free; free = (void**)*free) {
482                         g_assert (MS_BLOCK_FOR_OBJ (free) == block);
483                         --num_free;
484                 }
485                 g_assert (num_free == 0);
486
487                 /* check all mark words are zero */
488                 if (!sgen_concurrent_collection_in_progress () && block_is_swept_or_marking (block)) {
489                         for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
490                                 g_assert (block->mark_words [i] == 0);
491                 }
492         } END_FOREACH_BLOCK_NO_LOCK;
493
494         /* check free blocks */
495         for (i = 0; i < num_block_obj_sizes; ++i) {
496                 int j;
497                 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
498                         check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
499         }
500
501         check_empty_blocks ();
502 }
503 #endif
504
505 static void
506 add_free_block (MSBlockInfo * volatile *free_blocks, int size_index, MSBlockInfo *block)
507 {
508         MSBlockInfo *old;
509         do {
510                 block->next_free = old = free_blocks [size_index];
511         } while (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], block, old) != old);
512 }
513
514 static void major_finish_sweep_checking (void);
515
516 static gboolean
517 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
518 {
519         int size = block_obj_sizes [size_index];
520         int count = MS_BLOCK_FREE / size;
521         MSBlockInfo *info;
522         MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
523         char *obj_start;
524         int i;
525
526         if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
527                 return FALSE;
528
529         info = (MSBlockInfo*)ms_get_empty_block ();
530
531         SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
532
533         info->obj_size = size;
534         info->obj_size_index = size_index;
535         info->pinned = pinned;
536         info->has_references = has_references;
537         info->has_pinned = pinned;
538         /*
539          * Blocks that are to-space are not evacuated from.  During an major collection
540          * blocks are allocated for two reasons: evacuating objects from the nursery and
541          * evacuating them from major blocks marked for evacuation.  In both cases we don't
542          * want further evacuation. We also don't want to evacuate objects allocated during
543          * the concurrent mark since it would add pointless stress on the finishing pause.
544          */
545         info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD) || sgen_concurrent_collection_in_progress ();
546         info->state = info->is_to_space ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
547         SGEN_ASSERT (6, !sweep_in_progress () || info->state == BLOCK_STATE_SWEPT, "How do we add a new block to be swept while sweeping?");
548         info->cardtable_mod_union = NULL;
549
550         update_heap_boundaries_for_block (info);
551
552         binary_protocol_block_alloc (info, MS_BLOCK_SIZE);
553
554         /* build free list */
555         obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
556         info->free_list = (void**)obj_start;
557         /* we're skipping the last one - it must be nulled */
558         for (i = 0; i < count - 1; ++i) {
559                 char *next_obj_start = obj_start + size;
560                 *(void**)obj_start = next_obj_start;
561                 obj_start = next_obj_start;
562         }
563         /* the last one */
564         *(void**)obj_start = NULL;
565
566         add_free_block (free_blocks, size_index, info);
567
568         sgen_array_list_add (&allocated_blocks, BLOCK_TAG (info), 0, FALSE);
569
570         SGEN_ATOMIC_ADD_P (num_major_sections, 1);
571         return TRUE;
572 }
573
574 static gboolean
575 ptr_is_in_major_block (char *ptr, char **start, gboolean *pinned)
576 {
577         MSBlockInfo *block;
578
579         FOREACH_BLOCK_NO_LOCK (block) {
580                 if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
581                         int count = MS_BLOCK_FREE / block->obj_size;
582                         int i;
583
584                         if (start)
585                                 *start = NULL;
586                         for (i = 0; i <= count; ++i) {
587                                 if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
588                                         if (start)
589                                                 *start = (char *)MS_BLOCK_OBJ (block, i);
590                                         break;
591                                 }
592                         }
593                         if (pinned)
594                                 *pinned = block->pinned;
595                         return TRUE;
596                 }
597         } END_FOREACH_BLOCK_NO_LOCK;
598         return FALSE;
599 }
600
601 static gboolean
602 ptr_is_from_pinned_alloc (char *ptr)
603 {
604         gboolean pinned;
605         if (ptr_is_in_major_block (ptr, NULL, &pinned))
606                 return pinned;
607         return FALSE;
608 }
609
610 static void
611 ensure_can_access_block_free_list (MSBlockInfo *block)
612 {
613  retry:
614         for (;;) {
615                 switch (block->state) {
616                 case BLOCK_STATE_SWEPT:
617                 case BLOCK_STATE_MARKING:
618                         return;
619                 case BLOCK_STATE_CHECKING:
620                         SGEN_ASSERT (0, FALSE, "How did we get a block that's being checked from a free list?");
621                         break;
622                 case BLOCK_STATE_NEED_SWEEPING:
623                         if (sweep_block (block))
624                                 ++stat_major_blocks_lazy_swept;
625                         break;
626                 case BLOCK_STATE_SWEEPING:
627                         /* FIXME: do this more elegantly */
628                         g_usleep (100);
629                         goto retry;
630                 default:
631                         SGEN_ASSERT (0, FALSE, "Illegal block state");
632                         break;
633                 }
634         }
635 }
636
637 static void*
638 unlink_slot_from_free_list_uncontested (MSBlockInfo * volatile *free_blocks, int size_index)
639 {
640         MSBlockInfo *block, *next_free_block;
641         void *obj, *next_free_slot;
642
643  retry:
644         block = free_blocks [size_index];
645         SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
646
647         ensure_can_access_block_free_list (block);
648
649         obj = block->free_list;
650         SGEN_ASSERT (6, obj, "block %p in free list had no available object to alloc from", block);
651
652         next_free_slot = *(void**)obj;
653         if (next_free_slot) {
654                 block->free_list = (gpointer *)next_free_slot;
655                 return obj;
656         }
657
658         next_free_block = block->next_free;
659         if (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], next_free_block, block) != block)
660                 goto retry;
661
662         block->free_list = NULL;
663         block->next_free = NULL;
664
665         return obj;
666 }
667
668 static GCObject*
669 alloc_obj (GCVTable vtable, size_t size, gboolean pinned, gboolean has_references)
670 {
671         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
672         MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
673         void *obj;
674
675         if (!free_blocks [size_index]) {
676                 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
677                         return NULL;
678         }
679
680         obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
681
682         /* FIXME: assumes object layout */
683         *(GCVTable*)obj = vtable;
684
685         total_allocated_major += block_obj_sizes [size_index]; 
686
687         return (GCObject *)obj;
688 }
689
690 static GCObject*
691 major_alloc_object (GCVTable vtable, size_t size, gboolean has_references)
692 {
693         return alloc_obj (vtable, size, FALSE, has_references);
694 }
695
696 /*
697  * This can only be called by sgen workers. While this is called we assume
698  * that no other thread is accessing the block free lists. The world should
699  * be stopped and the gc thread should be waiting for workers to finish.
700  */
701 static GCObject*
702 major_alloc_object_par (GCVTable vtable, size_t size, gboolean has_references)
703 {
704         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
705         MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (FALSE, has_references);
706         MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (FALSE, has_references);
707         void *obj;
708
709         if (free_blocks_local [size_index]) {
710 get_slot:
711                 obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
712         } else {
713                 MSBlockInfo *block;
714 get_block:
715                 block = free_blocks [size_index];
716                 if (!block) {
717                         if (G_UNLIKELY (!ms_alloc_block (size_index, FALSE, has_references)))
718                                 return NULL;
719                         goto get_block;
720                 } else {
721                         MSBlockInfo *next_free = block->next_free;
722                         /*
723                          * Once a block is removed from the main list, it cannot return on the list until
724                          * all the workers are finished and sweep is starting. This means we don't need
725                          * to account for ABA problems.
726                          */
727                         if (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], next_free, block) != block)
728                                 goto get_block;
729                         g_assert (block->free_list);
730                         block->next_free = free_blocks_local [size_index];
731                         free_blocks_local [size_index] = block;
732
733                         goto get_slot;
734                 }
735         }
736
737         /* FIXME: assumes object layout */
738         *(GCVTable*)obj = vtable;
739
740         /* FIXME is it worth CAS-ing here */
741         total_allocated_major += block_obj_sizes [size_index]; 
742
743         return (GCObject *)obj;
744 }
745
746 /*
747  * We're not freeing the block if it's empty.  We leave that work for
748  * the next major collection.
749  *
750  * This is just called from the domain clearing code, which runs in a
751  * single thread and has the GC lock, so we don't need an extra lock.
752  */
753 static void
754 free_object (GCObject *obj, size_t size, gboolean pinned)
755 {
756         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
757         int word, bit;
758         gboolean in_free_list;
759
760         SGEN_ASSERT (9, sweep_state == SWEEP_STATE_SWEPT, "Should have waited for sweep to free objects.");
761
762         ensure_can_access_block_free_list (block);
763         SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
764         SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
765         MS_CALC_MARK_BIT (word, bit, obj);
766         SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set", obj);
767
768         memset (obj, 0, size);
769
770         in_free_list = !!block->free_list;
771         *(void**)obj = block->free_list;
772         block->free_list = (void**)obj;
773
774         if (!in_free_list) {
775                 MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (pinned, block->has_references);
776                 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
777                 SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks", block);
778                 add_free_block (free_blocks, size_index, block);
779         }
780 }
781
782 static void
783 major_free_non_pinned_object (GCObject *obj, size_t size)
784 {
785         free_object (obj, size, FALSE);
786 }
787
788 /* size is a multiple of SGEN_ALLOC_ALIGN */
789 static GCObject*
790 major_alloc_small_pinned_obj (GCVTable vtable, size_t size, gboolean has_references)
791 {
792         void *res;
793
794         res = alloc_obj (vtable, size, TRUE, has_references);
795          /*If we failed to alloc memory, we better try releasing memory
796           *as pinned alloc is requested by the runtime.
797           */
798          if (!res) {
799                 sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE, TRUE);
800                 res = alloc_obj (vtable, size, TRUE, has_references);
801          }
802          return (GCObject *)res;
803 }
804
805 static void
806 free_pinned_object (GCObject *obj, size_t size)
807 {
808         free_object (obj, size, TRUE);
809 }
810
811 /*
812  * size is already rounded up and we hold the GC lock.
813  */
814 static GCObject*
815 major_alloc_degraded (GCVTable vtable, size_t size)
816 {
817         GCObject *obj;
818
819         obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
820         if (G_LIKELY (obj)) {
821                 HEAVY_STAT (++stat_objects_alloced_degraded);
822                 HEAVY_STAT (stat_bytes_alloced_degraded += size);
823         }
824         return obj;
825 }
826
827 /*
828  * obj is some object.  If it's not in the major heap (i.e. if it's in
829  * the nursery or LOS), return FALSE.  Otherwise return whether it's
830  * been marked or copied.
831  */
832 static gboolean
833 major_is_object_live (GCObject *obj)
834 {
835         MSBlockInfo *block;
836         int word, bit;
837         mword objsize;
838
839         if (sgen_ptr_in_nursery (obj))
840                 return FALSE;
841
842         objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size (obj));
843
844         /* LOS */
845         if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
846                 return FALSE;
847
848         /* now we know it's in a major block */
849         block = MS_BLOCK_FOR_OBJ (obj);
850         SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?", block);
851         MS_CALC_MARK_BIT (word, bit, obj);
852         return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
853 }
854
855 static gboolean
856 major_ptr_is_in_non_pinned_space (char *ptr, char **start)
857 {
858         gboolean pinned;
859         if (ptr_is_in_major_block (ptr, start, &pinned))
860                 return !pinned;
861         return FALSE;
862 }
863
864 static gboolean
865 try_set_sweep_state (int new_, int expected)
866 {
867         int old = SGEN_CAS (&sweep_state, new_, expected);
868         return old == expected;
869 }
870
871 static void
872 set_sweep_state (int new_, int expected)
873 {
874         gboolean success = try_set_sweep_state (new_, expected);
875         SGEN_ASSERT (0, success, "Could not set sweep state.");
876 }
877
878 static gboolean ensure_block_is_checked_for_sweeping (guint32 block_index, gboolean wait, gboolean *have_checked);
879
880 static SgenThreadPoolJob * volatile sweep_job;
881 static SgenThreadPoolJob * volatile sweep_blocks_job;
882
883 static void
884 major_finish_sweep_checking (void)
885 {
886         guint32 block_index;
887         SgenThreadPoolJob *job;
888
889  retry:
890         switch (sweep_state) {
891         case SWEEP_STATE_SWEPT:
892         case SWEEP_STATE_NEED_SWEEPING:
893                 return;
894         case SWEEP_STATE_SWEEPING:
895                 if (try_set_sweep_state (SWEEP_STATE_SWEEPING_AND_ITERATING, SWEEP_STATE_SWEEPING))
896                         break;
897                 goto retry;
898         case SWEEP_STATE_SWEEPING_AND_ITERATING:
899                 SGEN_ASSERT (0, FALSE, "Is there another minor collection running?");
900                 goto retry;
901         case SWEEP_STATE_COMPACTING:
902                 goto wait;
903         default:
904                 SGEN_ASSERT (0, FALSE, "Invalid sweep state.");
905                 break;
906         }
907
908         /*
909          * We're running with the world stopped and the only other thread doing work is the
910          * sweep thread, which doesn't add blocks to the array, so we can safely access
911          * `next_slot`.
912          */
913         for (block_index = 0; block_index < allocated_blocks.next_slot; ++block_index)
914                 ensure_block_is_checked_for_sweeping (block_index, FALSE, NULL);
915
916         set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_SWEEPING_AND_ITERATING);
917
918  wait:
919         job = sweep_job;
920         if (job)
921                 sgen_thread_pool_job_wait (job);
922         SGEN_ASSERT (0, !sweep_job, "Why did the sweep job not null itself?");
923         SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "How is the sweep job done but we're not swept?");
924 }
925
926 static void
927 major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
928 {
929         gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
930         gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
931         gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
932         MSBlockInfo *block;
933
934         /* No actual sweeping will take place if we are in the middle of a major collection. */
935         major_finish_sweep_checking ();
936         FOREACH_BLOCK_NO_LOCK (block) {
937                 int count = MS_BLOCK_FREE / block->obj_size;
938                 int i;
939
940                 if (block->pinned && !pinned)
941                         continue;
942                 if (!block->pinned && !non_pinned)
943                         continue;
944                 if (sweep && lazy_sweep && !block_is_swept_or_marking (block)) {
945                         sweep_block (block);
946                         SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEPT, "Block must be swept after sweeping");
947                 }
948
949                 for (i = 0; i < count; ++i) {
950                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
951                         if (MS_OBJ_ALLOCED (obj, block))
952                                 callback ((GCObject*)obj, block->obj_size, data);
953                 }
954         } END_FOREACH_BLOCK_NO_LOCK;
955 }
956
957 static gboolean
958 major_is_valid_object (char *object)
959 {
960         MSBlockInfo *block;
961
962         FOREACH_BLOCK_NO_LOCK (block) {
963                 int idx;
964                 char *obj;
965
966                 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
967                         continue;
968
969                 idx = MS_BLOCK_OBJ_INDEX (object, block);
970                 obj = (char*)MS_BLOCK_OBJ (block, idx);
971                 if (obj != object)
972                         return FALSE;
973                 return MS_OBJ_ALLOCED (obj, block);
974         } END_FOREACH_BLOCK_NO_LOCK;
975
976         return FALSE;
977 }
978
979
980 static GCVTable
981 major_describe_pointer (char *ptr)
982 {
983         MSBlockInfo *block;
984
985         FOREACH_BLOCK_NO_LOCK (block) {
986                 int idx;
987                 char *obj;
988                 gboolean live;
989                 GCVTable vtable;
990                 int w, b;
991                 gboolean marked;
992
993                 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
994                         continue;
995
996                 SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
997                         MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
998
999                 idx = MS_BLOCK_OBJ_INDEX (ptr, block);
1000                 obj = (char*)MS_BLOCK_OBJ (block, idx);
1001                 live = MS_OBJ_ALLOCED (obj, block);
1002                 vtable = live ? SGEN_LOAD_VTABLE ((GCObject*)obj) : NULL;
1003
1004                 MS_CALC_MARK_BIT (w, b, obj);
1005                 marked = MS_MARK_BIT (block, w, b);
1006
1007                 if (obj == ptr) {
1008                         SGEN_LOG (0, "\t(");
1009                         if (live)
1010                                 SGEN_LOG (0, "object");
1011                         else
1012                                 SGEN_LOG (0, "dead-object");
1013                 } else {
1014                         if (live)
1015                                 SGEN_LOG (0, "interior-ptr offset %zd", ptr - obj);
1016                         else
1017                                 SGEN_LOG (0, "dead-interior-ptr offset %zd", ptr - obj);
1018                 }
1019
1020                 SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
1021
1022                 return vtable;
1023         } END_FOREACH_BLOCK_NO_LOCK;
1024
1025         return NULL;
1026 }
1027
1028 static void
1029 major_check_scan_starts (void)
1030 {
1031 }
1032
1033 static void
1034 major_dump_heap (FILE *heap_dump_file)
1035 {
1036         MSBlockInfo *block;
1037         int *slots_available = (int *)alloca (sizeof (int) * num_block_obj_sizes);
1038         int *slots_used = (int *)alloca (sizeof (int) * num_block_obj_sizes);
1039         int i;
1040
1041         for (i = 0; i < num_block_obj_sizes; ++i)
1042                 slots_available [i] = slots_used [i] = 0;
1043
1044         FOREACH_BLOCK_NO_LOCK (block) {
1045                 int index = ms_find_block_obj_size_index (block->obj_size);
1046                 int count = MS_BLOCK_FREE / block->obj_size;
1047
1048                 slots_available [index] += count;
1049                 for (i = 0; i < count; ++i) {
1050                         if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
1051                                 ++slots_used [index];
1052                 }
1053         } END_FOREACH_BLOCK_NO_LOCK;
1054
1055         fprintf (heap_dump_file, "<occupancies>\n");
1056         for (i = 0; i < num_block_obj_sizes; ++i) {
1057                 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
1058                                 block_obj_sizes [i], slots_available [i], slots_used [i]);
1059         }
1060         fprintf (heap_dump_file, "</occupancies>\n");
1061
1062         FOREACH_BLOCK_NO_LOCK (block) {
1063                 int count = MS_BLOCK_FREE / block->obj_size;
1064                 int i;
1065                 int start = -1;
1066
1067                 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
1068
1069                 for (i = 0; i <= count; ++i) {
1070                         if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
1071                                 if (start < 0)
1072                                         start = i;
1073                         } else {
1074                                 if (start >= 0) {
1075                                         sgen_dump_occupied ((char *)MS_BLOCK_OBJ (block, start), (char *)MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
1076                                         start = -1;
1077                                 }
1078                         }
1079                 }
1080
1081                 fprintf (heap_dump_file, "</section>\n");
1082         } END_FOREACH_BLOCK_NO_LOCK;
1083 }
1084
1085 static guint8*
1086 get_cardtable_mod_union_for_block (MSBlockInfo *block, gboolean allocate)
1087 {
1088         guint8 *mod_union = block->cardtable_mod_union;
1089         guint8 *other;
1090         if (mod_union)
1091                 return mod_union;
1092         else if (!allocate)
1093                 return NULL;
1094         mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
1095         other = (guint8 *)SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL);
1096         if (!other) {
1097                 SGEN_ASSERT (0, block->cardtable_mod_union == mod_union, "Why did CAS not replace?");
1098                 return mod_union;
1099         }
1100         sgen_card_table_free_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
1101         return other;
1102 }
1103
1104 static inline guint8*
1105 major_get_cardtable_mod_union_for_reference (char *ptr)
1106 {
1107         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (ptr);
1108         size_t offset = sgen_card_table_get_card_offset (ptr, (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block)));
1109         guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
1110         SGEN_ASSERT (0, mod_union, "FIXME: optionally allocate the mod union if it's not here and CAS it in.");
1111         return &mod_union [offset];
1112 }
1113
1114 /*
1115  * Mark the mod-union card for `ptr`, which must be a reference within the object `obj`.
1116  */
1117 static void
1118 mark_mod_union_card (GCObject *obj, void **ptr, GCObject *value_obj)
1119 {
1120         int type = sgen_obj_get_descriptor (obj) & DESC_TYPE_MASK;
1121         if (sgen_safe_object_is_small (obj, type)) {
1122                 guint8 *card_byte = major_get_cardtable_mod_union_for_reference ((char*)ptr);
1123                 SGEN_ASSERT (0, MS_BLOCK_FOR_OBJ (obj) == MS_BLOCK_FOR_OBJ (ptr), "How can an object and a reference inside it not be in the same block?");
1124                 *card_byte = 1;
1125         } else {
1126                 sgen_los_mark_mod_union_card (obj, ptr);
1127         }
1128         binary_protocol_mod_union_remset (obj, ptr, value_obj, SGEN_LOAD_VTABLE (value_obj));
1129 }
1130
1131 static inline gboolean
1132 major_block_is_evacuating (MSBlockInfo *block)
1133 {
1134         if (evacuate_block_obj_sizes [block->obj_size_index] &&
1135                         !block->has_pinned &&
1136                         !block->is_to_space)
1137                 return TRUE;
1138         return FALSE;
1139 }
1140
1141 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do {           \
1142                 int __word, __bit;                                      \
1143                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
1144                 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
1145                 if (!MS_MARK_BIT ((block), __word, __bit)) {            \
1146                         MS_SET_MARK_BIT ((block), __word, __bit);       \
1147                         if (sgen_gc_descr_has_references (desc))                        \
1148                                 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
1149                         binary_protocol_mark ((obj), (gpointer)SGEN_LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
1150                         INC_NUM_MAJOR_OBJECTS_MARKED ();                \
1151                 }                                                       \
1152         } while (0)
1153 #define MS_MARK_OBJECT_AND_ENQUEUE_PAR(obj,desc,block,queue) do {       \
1154                 int __word, __bit;                                      \
1155                 gboolean first;                                         \
1156                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
1157                 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
1158                 MS_SET_MARK_BIT_PAR ((block), __word, __bit, first);    \
1159                 if (first) {                                            \
1160                         if (sgen_gc_descr_has_references (desc))        \
1161                                 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
1162                         binary_protocol_mark ((obj), (gpointer)SGEN_LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
1163                         INC_NUM_MAJOR_OBJECTS_MARKED ();                \
1164                 }                                                       \
1165         } while (0)
1166
1167
1168
1169 static void
1170 pin_major_object (GCObject *obj, SgenGrayQueue *queue)
1171 {
1172         MSBlockInfo *block;
1173
1174         if (concurrent_mark)
1175                 g_assert_not_reached ();
1176
1177         block = MS_BLOCK_FOR_OBJ (obj);
1178         block->has_pinned = TRUE;
1179         MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
1180 }
1181
1182 #define COPY_OR_MARK_PARALLEL
1183 #include "sgen-major-copy-object.h"
1184
1185 static long long
1186 major_get_and_reset_num_major_objects_marked (void)
1187 {
1188 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
1189         long long num = num_major_objects_marked;
1190         num_major_objects_marked = 0;
1191         return num;
1192 #else
1193         return 0;
1194 #endif
1195 }
1196
1197 #define PREFETCH_CARDS          1       /* BOOL FASTENABLE */
1198 #if !PREFETCH_CARDS
1199 #undef PREFETCH_CARDS
1200 #endif
1201
1202 /* gcc 4.2.1 from xcode4 crashes on sgen_card_table_get_card_address () when this is enabled */
1203 #if defined(PLATFORM_MACOSX)
1204 #if MONO_GNUC_VERSION <= 40300
1205 #undef PREFETCH_CARDS
1206 #endif
1207 #endif
1208
1209 #ifdef HEAVY_STATISTICS
1210 static guint64 stat_optimized_copy;
1211 static guint64 stat_optimized_copy_nursery;
1212 static guint64 stat_optimized_copy_nursery_forwarded;
1213 static guint64 stat_optimized_copy_nursery_pinned;
1214 static guint64 stat_optimized_copy_major;
1215 static guint64 stat_optimized_copy_major_small_fast;
1216 static guint64 stat_optimized_copy_major_small_slow;
1217 static guint64 stat_optimized_copy_major_large;
1218 static guint64 stat_optimized_copy_major_forwarded;
1219 static guint64 stat_optimized_copy_major_small_evacuate;
1220 static guint64 stat_optimized_major_scan;
1221 static guint64 stat_optimized_major_scan_no_refs;
1222
1223 static guint64 stat_drain_prefetch_fills;
1224 static guint64 stat_drain_prefetch_fill_failures;
1225 static guint64 stat_drain_loops;
1226 #endif
1227
1228 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_no_evacuation
1229 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_no_evacuation
1230 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_no_evacuation
1231 #include "sgen-marksweep-drain-gray-stack.h"
1232
1233 #define COPY_OR_MARK_WITH_EVACUATION
1234 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_with_evacuation
1235 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_with_evacuation
1236 #define SCAN_VTYPE_FUNCTION_NAME        major_scan_vtype_with_evacuation
1237 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_with_evacuation
1238 #define SCAN_PTR_FIELD_FUNCTION_NAME    major_scan_ptr_field_with_evacuation
1239 #include "sgen-marksweep-drain-gray-stack.h"
1240
1241 #define COPY_OR_MARK_CONCURRENT
1242 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_concurrent_no_evacuation
1243 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_concurrent_no_evacuation
1244 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_concurrent_no_evacuation
1245 #include "sgen-marksweep-drain-gray-stack.h"
1246
1247 #define COPY_OR_MARK_PARALLEL
1248 #define COPY_OR_MARK_CONCURRENT
1249 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_concurrent_par_no_evacuation
1250 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_concurrent_par_no_evacuation
1251 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_concurrent_par_no_evacuation
1252 #include "sgen-marksweep-drain-gray-stack.h"
1253
1254 #define COPY_OR_MARK_CONCURRENT_WITH_EVACUATION
1255 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_concurrent_with_evacuation
1256 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_concurrent_with_evacuation
1257 #define SCAN_VTYPE_FUNCTION_NAME        major_scan_vtype_concurrent_with_evacuation
1258 #define SCAN_PTR_FIELD_FUNCTION_NAME    major_scan_ptr_field_concurrent_with_evacuation
1259 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_concurrent_with_evacuation
1260 #include "sgen-marksweep-drain-gray-stack.h"
1261
1262 #define COPY_OR_MARK_PARALLEL
1263 #define COPY_OR_MARK_CONCURRENT_WITH_EVACUATION
1264 #define COPY_OR_MARK_FUNCTION_NAME      major_copy_or_mark_object_concurrent_par_with_evacuation
1265 #define SCAN_OBJECT_FUNCTION_NAME       major_scan_object_concurrent_par_with_evacuation
1266 #define SCAN_VTYPE_FUNCTION_NAME        major_scan_vtype_concurrent_par_with_evacuation
1267 #define SCAN_PTR_FIELD_FUNCTION_NAME    major_scan_ptr_field_concurrent_par_with_evacuation
1268 #define DRAIN_GRAY_STACK_FUNCTION_NAME  drain_gray_stack_concurrent_par_with_evacuation
1269 #include "sgen-marksweep-drain-gray-stack.h"
1270
1271 static inline gboolean
1272 major_is_evacuating (void)
1273 {
1274         int i;
1275         for (i = 0; i < num_block_obj_sizes; ++i) {
1276                 if (evacuate_block_obj_sizes [i]) {
1277                         return TRUE;
1278                 }
1279         }
1280
1281         return FALSE;
1282 }
1283
1284 static gboolean
1285 drain_gray_stack (SgenGrayQueue *queue)
1286 {
1287         if (major_is_evacuating ())
1288                 return drain_gray_stack_with_evacuation (queue);
1289         else
1290                 return drain_gray_stack_no_evacuation (queue);
1291 }
1292
1293 static gboolean
1294 drain_gray_stack_concurrent (SgenGrayQueue *queue)
1295 {
1296         if (major_is_evacuating ())
1297                 return drain_gray_stack_concurrent_with_evacuation (queue);
1298         else
1299                 return drain_gray_stack_concurrent_no_evacuation (queue);
1300 }
1301
1302 static gboolean
1303 drain_gray_stack_concurrent_par (SgenGrayQueue *queue)
1304 {
1305         if (major_is_evacuating ())
1306                 return drain_gray_stack_concurrent_par_with_evacuation (queue);
1307         else
1308                 return drain_gray_stack_concurrent_par_no_evacuation (queue);
1309 }
1310
1311 static void
1312 major_copy_or_mark_object_canonical (GCObject **ptr, SgenGrayQueue *queue)
1313 {
1314         major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
1315 }
1316
1317 static void
1318 major_copy_or_mark_object_concurrent_canonical (GCObject **ptr, SgenGrayQueue *queue)
1319 {
1320         major_copy_or_mark_object_concurrent_with_evacuation (ptr, *ptr, queue);
1321 }
1322
1323 static void
1324 major_copy_or_mark_object_concurrent_par_canonical (GCObject **ptr, SgenGrayQueue *queue)
1325 {
1326         major_copy_or_mark_object_concurrent_par_with_evacuation (ptr, *ptr, queue);
1327 }
1328
1329 static void
1330 major_copy_or_mark_object_concurrent_finish_canonical (GCObject **ptr, SgenGrayQueue *queue)
1331 {
1332         major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
1333 }
1334
1335 static void
1336 mark_pinned_objects_in_block (MSBlockInfo *block, size_t first_entry, size_t last_entry, SgenGrayQueue *queue)
1337 {
1338         void **entry, **end;
1339         int last_index = -1;
1340
1341         if (first_entry == last_entry)
1342                 return;
1343
1344         entry = sgen_pinning_get_entry (first_entry);
1345         end = sgen_pinning_get_entry (last_entry);
1346
1347         for (; entry < end; ++entry) {
1348                 int index = MS_BLOCK_OBJ_INDEX (*entry, block);
1349                 GCObject *obj;
1350                 SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, (int)(MS_BLOCK_FREE / block->obj_size));
1351                 if (index == last_index)
1352                         continue;
1353                 obj = MS_BLOCK_OBJ (block, index);
1354                 if (!MS_OBJ_ALLOCED (obj, block))
1355                         continue;
1356                 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
1357                 sgen_pin_stats_register_object (obj, GENERATION_OLD);
1358                 last_index = index;
1359         }
1360
1361         /*
1362          * There might have been potential pinning "pointers" into this block, but none of
1363          * them pointed to occupied slots, in which case we don't have to pin the block.
1364          */
1365         if (last_index >= 0)
1366                 block->has_pinned = TRUE;
1367 }
1368
1369 static inline void
1370 sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
1371 {
1372         int obj_index;
1373
1374         for (obj_index = 0; obj_index < count; ++obj_index) {
1375                 int word, bit;
1376                 void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
1377
1378                 MS_CALC_MARK_BIT (word, bit, obj);
1379                 if (MS_MARK_BIT (block, word, bit)) {
1380                         SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
1381                 } else {
1382                         /* an unmarked object */
1383                         if (MS_OBJ_ALLOCED (obj, block)) {
1384                                 /*
1385                                  * FIXME: Merge consecutive
1386                                  * slots for lower reporting
1387                                  * overhead.  Maybe memset
1388                                  * will also benefit?
1389                                  */
1390                                 binary_protocol_empty (obj, obj_size);
1391                                 memset (obj, 0, obj_size);
1392                         }
1393                         *(void**)obj = block->free_list;
1394                         block->free_list = (void **)obj;
1395                 }
1396         }
1397 }
1398
1399 static inline gboolean
1400 try_set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
1401 {
1402         gint32 old_state = SGEN_CAS (&block->state, new_state, expected_state);
1403         gboolean success = old_state == expected_state;
1404         if (success)
1405                 binary_protocol_block_set_state (block, MS_BLOCK_SIZE, old_state, new_state);
1406         return success;
1407 }
1408
1409 static inline void
1410 set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
1411 {
1412         SGEN_ASSERT (6, block->state == expected_state, "Block state incorrect before set");
1413         block->state = new_state;
1414         binary_protocol_block_set_state (block, MS_BLOCK_SIZE, expected_state, new_state);
1415 }
1416
1417 /*
1418  * If `block` needs sweeping, sweep it and return TRUE.  Otherwise return FALSE.
1419  *
1420  * Sweeping means iterating through the block's slots and building the free-list from the
1421  * unmarked ones.  They will also be zeroed.  The mark bits will be reset.
1422  */
1423 static gboolean
1424 sweep_block (MSBlockInfo *block)
1425 {
1426         int count;
1427         void *reversed = NULL;
1428
1429  retry:
1430         switch (block->state) {
1431         case BLOCK_STATE_SWEPT:
1432                 return FALSE;
1433         case BLOCK_STATE_MARKING:
1434         case BLOCK_STATE_CHECKING:
1435                 SGEN_ASSERT (0, FALSE, "How did we get to sweep a block that's being marked or being checked?");
1436                 goto retry;
1437         case BLOCK_STATE_SWEEPING:
1438                 /* FIXME: Do this more elegantly */
1439                 g_usleep (100);
1440                 goto retry;
1441         case BLOCK_STATE_NEED_SWEEPING:
1442                 if (!try_set_block_state (block, BLOCK_STATE_SWEEPING, BLOCK_STATE_NEED_SWEEPING))
1443                         goto retry;
1444                 break;
1445         default:
1446                 SGEN_ASSERT (0, FALSE, "Illegal block state");
1447         }
1448
1449         SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEEPING, "How did we get here without setting state to sweeping?");
1450
1451         count = MS_BLOCK_FREE / block->obj_size;
1452
1453         block->free_list = NULL;
1454
1455         /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
1456         // FIXME: Add more sizes
1457         switch (block->obj_size) {
1458         case 16:
1459                 sweep_block_for_size (block, count, 16);
1460                 break;
1461         default:
1462                 sweep_block_for_size (block, count, block->obj_size);
1463                 break;
1464         }
1465
1466         /* reset mark bits */
1467         memset (block->mark_words, 0, sizeof (guint32) * MS_NUM_MARK_WORDS);
1468
1469         /* Reverse free list so that it's in address order */
1470         reversed = NULL;
1471         while (block->free_list) {
1472                 void *next = *(void**)block->free_list;
1473                 *(void**)block->free_list = reversed;
1474                 reversed = block->free_list;
1475                 block->free_list = (void **)next;
1476         }
1477         block->free_list = (void **)reversed;
1478
1479         mono_memory_write_barrier ();
1480
1481         set_block_state (block, BLOCK_STATE_SWEPT, BLOCK_STATE_SWEEPING);
1482
1483         return TRUE;
1484 }
1485
1486 static inline int
1487 bitcount (mword d)
1488 {
1489         int count = 0;
1490
1491 #ifdef __GNUC__
1492         if (sizeof (mword) == 8)
1493                 count += __builtin_popcountll (d);
1494         else
1495                 count += __builtin_popcount (d);
1496 #else
1497         while (d) {
1498                 count ++;
1499                 d &= (d - 1);
1500         }
1501 #endif
1502         return count;
1503 }
1504
1505 /* statistics for evacuation */
1506 static size_t *sweep_slots_available;
1507 static size_t *sweep_slots_used;
1508 static size_t *sweep_num_blocks;
1509
1510 static volatile size_t num_major_sections_before_sweep;
1511 static volatile size_t num_major_sections_freed_in_sweep;
1512
1513 static void
1514 sgen_worker_clear_free_block_lists (WorkerData *worker)
1515 {
1516         int i, j;
1517
1518         if (!worker->free_block_lists)
1519                 return;
1520
1521         for (i = 0; i < MS_BLOCK_TYPE_MAX; i++) {
1522                 for (j = 0; j < num_block_obj_sizes; j++) {
1523                         ((MSBlockInfo***) worker->free_block_lists) [i][j] = NULL;
1524                 }
1525         }
1526 }
1527
1528 static void
1529 sweep_start (void)
1530 {
1531         int i;
1532
1533         for (i = 0; i < num_block_obj_sizes; ++i)
1534                 sweep_slots_available [i] = sweep_slots_used [i] = sweep_num_blocks [i] = 0;
1535
1536         /* clear all the free lists */
1537         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1538                 MSBlockInfo * volatile *free_blocks = free_block_lists [i];
1539                 int j;
1540                 for (j = 0; j < num_block_obj_sizes; ++j)
1541                         free_blocks [j] = NULL;
1542         }
1543
1544         sgen_workers_foreach (sgen_worker_clear_free_block_lists);
1545 }
1546
1547 static void sweep_finish (void);
1548
1549 /*
1550  * If `wait` is TRUE and the block is currently being checked, this function will wait until
1551  * the checking has finished.
1552  *
1553  * Returns whether the block is still there.  If `wait` is FALSE, the return value will not
1554  * be correct, i.e. must not be used.
1555  */
1556 static gboolean
1557 ensure_block_is_checked_for_sweeping (guint32 block_index, gboolean wait, gboolean *have_checked)
1558 {
1559         int count;
1560         gboolean have_live = FALSE;
1561         gboolean have_free = FALSE;
1562         int nused = 0;
1563         int block_state;
1564         int i;
1565         void *tagged_block;
1566         MSBlockInfo *block;
1567         volatile gpointer *block_slot = sgen_array_list_get_slot (&allocated_blocks, block_index);
1568
1569         SGEN_ASSERT (6, sweep_in_progress (), "Why do we call this function if there's no sweep in progress?");
1570
1571         if (have_checked)
1572                 *have_checked = FALSE;
1573
1574  retry:
1575         tagged_block = *(void * volatile *)block_slot;
1576         if (!tagged_block)
1577                 return FALSE;
1578
1579         if (BLOCK_IS_TAGGED_CHECKING (tagged_block)) {
1580                 if (!wait)
1581                         return FALSE;
1582                 /* FIXME: do this more elegantly */
1583                 g_usleep (100);
1584                 goto retry;
1585         }
1586
1587         if (SGEN_CAS_PTR (block_slot, BLOCK_TAG_CHECKING (tagged_block), tagged_block) != tagged_block)
1588                 goto retry;
1589
1590         block = BLOCK_UNTAG (tagged_block);
1591         block_state = block->state;
1592
1593         if (!sweep_in_progress ()) {
1594                 SGEN_ASSERT (6, block_state != BLOCK_STATE_SWEEPING && block_state != BLOCK_STATE_CHECKING, "Invalid block state.");
1595                 if (!lazy_sweep)
1596                         SGEN_ASSERT (6, block_state != BLOCK_STATE_NEED_SWEEPING, "Invalid block state.");
1597         }
1598
1599         switch (block_state) {
1600         case BLOCK_STATE_SWEPT:
1601         case BLOCK_STATE_NEED_SWEEPING:
1602         case BLOCK_STATE_SWEEPING:
1603                 goto done;
1604         case BLOCK_STATE_MARKING:
1605                 break;
1606         case BLOCK_STATE_CHECKING:
1607                 SGEN_ASSERT (0, FALSE, "We set the CHECKING bit - how can the stage be CHECKING?");
1608                 goto done;
1609         default:
1610                 SGEN_ASSERT (0, FALSE, "Illegal block state");
1611                 break;
1612         }
1613
1614         SGEN_ASSERT (6, block->state == BLOCK_STATE_MARKING, "When we sweep all blocks must start out marking.");
1615         set_block_state (block, BLOCK_STATE_CHECKING, BLOCK_STATE_MARKING);
1616
1617         if (have_checked)
1618                 *have_checked = TRUE;
1619
1620         block->has_pinned = block->pinned;
1621
1622         block->is_to_space = FALSE;
1623
1624         count = MS_BLOCK_FREE / block->obj_size;
1625
1626         if (block->cardtable_mod_union)
1627                 memset (block->cardtable_mod_union, 0, CARDS_PER_BLOCK);
1628
1629         /* Count marked objects in the block */
1630         for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
1631                 nused += bitcount (block->mark_words [i]);
1632
1633         block->nused = nused;
1634         if (nused)
1635                 have_live = TRUE;
1636         if (nused < count)
1637                 have_free = TRUE;
1638
1639         if (have_live) {
1640                 int obj_size_index = block->obj_size_index;
1641                 gboolean has_pinned = block->has_pinned;
1642
1643                 set_block_state (block, BLOCK_STATE_NEED_SWEEPING, BLOCK_STATE_CHECKING);
1644
1645                 /*
1646                  * FIXME: Go straight to SWEPT if there are no free slots.  We need
1647                  * to set the free slot list to NULL, though, and maybe update some
1648                  * statistics.
1649                  */
1650                 if (!lazy_sweep)
1651                         sweep_block (block);
1652
1653                 if (!has_pinned) {
1654                         ++sweep_num_blocks [obj_size_index];
1655                         sweep_slots_used [obj_size_index] += nused;
1656                         sweep_slots_available [obj_size_index] += count;
1657                 }
1658
1659                 /*
1660                  * If there are free slots in the block, add
1661                  * the block to the corresponding free list.
1662                  */
1663                 if (have_free) {
1664                         MSBlockInfo * volatile *free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1665
1666                         if (!lazy_sweep)
1667                                 SGEN_ASSERT (6, block->free_list, "How do we not have a free list when there are free slots?");
1668
1669                         add_free_block (free_blocks, obj_size_index, block);
1670                 }
1671
1672                 /* FIXME: Do we need the heap boundaries while we do nursery collections? */
1673                 update_heap_boundaries_for_block (block);
1674         } else {
1675                 /*
1676                  * Blocks without live objects are removed from the
1677                  * block list and freed.
1678                  */
1679                 SGEN_ASSERT (6, block_index < allocated_blocks.next_slot, "How did the number of blocks shrink?");
1680                 SGEN_ASSERT (6, *block_slot == BLOCK_TAG_CHECKING (tagged_block), "How did the block move?");
1681
1682                 binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
1683                 ms_free_block (block);
1684
1685                 SGEN_ATOMIC_ADD_P (num_major_sections, -1);
1686                 SGEN_ATOMIC_ADD_P (num_major_sections_freed_in_sweep, 1);
1687
1688                 tagged_block = NULL;
1689         }
1690
1691  done:
1692         /*
1693          * Once the block is written back without the checking bit other threads are
1694          * free to access it. Make sure the block state is visible before we write it
1695          * back.
1696          */
1697         mono_memory_write_barrier ();
1698         *block_slot = tagged_block;
1699         return !!tagged_block;
1700 }
1701
1702 static void
1703 sweep_blocks_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
1704 {
1705         volatile gpointer *slot;
1706         MSBlockInfo *bl;
1707
1708         SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) {
1709                 bl = BLOCK_UNTAG (*slot);
1710                 if (bl)
1711                         sweep_block (bl);
1712         } SGEN_ARRAY_LIST_END_FOREACH_SLOT;
1713
1714         mono_memory_write_barrier ();
1715
1716         sweep_blocks_job = NULL;
1717 }
1718
1719 static void
1720 sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
1721 {
1722         guint32 block_index;
1723         guint32 num_blocks = num_major_sections_before_sweep;
1724
1725         SGEN_ASSERT (0, sweep_in_progress (), "Sweep thread called with wrong state");
1726         SGEN_ASSERT (0, num_blocks <= allocated_blocks.next_slot, "How did we lose blocks?");
1727
1728         /*
1729          * We traverse the block array from high to low.  Nursery collections will have to
1730          * cooperate with the sweep thread to finish sweeping, and they will traverse from
1731          * low to high, to avoid constantly colliding on the same blocks.
1732          */
1733         for (block_index = allocated_blocks.next_slot; block_index-- > 0;) {
1734                 ensure_block_is_checked_for_sweeping (block_index, TRUE, NULL);
1735         }
1736
1737         while (!try_set_sweep_state (SWEEP_STATE_COMPACTING, SWEEP_STATE_SWEEPING)) {
1738                 /*
1739                  * The main GC thread is currently iterating over the block array to help us
1740                  * finish the sweep.  We have already finished, but we don't want to mess up
1741                  * that iteration, so we just wait for it.
1742                  */
1743                 g_usleep (100);
1744         }
1745
1746         if (SGEN_MAX_ASSERT_LEVEL >= 6) {
1747                 for (block_index = num_blocks; block_index < allocated_blocks.next_slot; ++block_index) {
1748                         MSBlockInfo *block = BLOCK_UNTAG (*sgen_array_list_get_slot (&allocated_blocks, block_index));
1749                         SGEN_ASSERT (6, block && block->state == BLOCK_STATE_SWEPT, "How did a new block to be swept get added while swept?");
1750                 }
1751         }
1752
1753         /*
1754          * Concurrently sweep all the blocks to reduce workload during minor
1755          * pauses where we need certain blocks to be swept. At the start of
1756          * the next major we need all blocks to be swept anyway.
1757          */
1758         if (concurrent_sweep && lazy_sweep) {
1759                 sweep_blocks_job = sgen_thread_pool_job_alloc ("sweep_blocks", sweep_blocks_job_func, sizeof (SgenThreadPoolJob));
1760                 sgen_thread_pool_job_enqueue (sweep_blocks_job);
1761         }
1762
1763         sweep_finish ();
1764
1765         sweep_job = NULL;
1766 }
1767
1768 static void
1769 sweep_finish (void)
1770 {
1771         mword used_slots_size = 0;
1772         int i;
1773
1774         for (i = 0; i < num_block_obj_sizes; ++i) {
1775                 float usage = (float)sweep_slots_used [i] / (float)sweep_slots_available [i];
1776                 if (sweep_num_blocks [i] > 5 && usage < evacuation_threshold) {
1777                         evacuate_block_obj_sizes [i] = TRUE;
1778                         /*
1779                         g_print ("slot size %d - %d of %d used\n",
1780                                         block_obj_sizes [i], slots_used [i], slots_available [i]);
1781                         */
1782                 } else {
1783                         evacuate_block_obj_sizes [i] = FALSE;
1784                 }
1785
1786                 used_slots_size += sweep_slots_used [i] * block_obj_sizes [i];
1787         }
1788
1789         sgen_memgov_major_post_sweep (used_slots_size);
1790
1791         set_sweep_state (SWEEP_STATE_SWEPT, SWEEP_STATE_COMPACTING);
1792         if (concurrent_sweep)
1793                 binary_protocol_concurrent_sweep_end (sgen_timestamp ());
1794 }
1795
1796 static void
1797 major_sweep (void)
1798 {
1799         set_sweep_state (SWEEP_STATE_SWEEPING, SWEEP_STATE_NEED_SWEEPING);
1800
1801         sweep_start ();
1802
1803         num_major_sections_before_sweep = num_major_sections;
1804         num_major_sections_freed_in_sweep = 0;
1805
1806         SGEN_ASSERT (0, !sweep_job, "We haven't finished the last sweep?");
1807         if (concurrent_sweep) {
1808                 sweep_job = sgen_thread_pool_job_alloc ("sweep", sweep_job_func, sizeof (SgenThreadPoolJob));
1809                 sgen_thread_pool_job_enqueue (sweep_job);
1810         } else {
1811                 sweep_job_func (NULL, NULL);
1812         }
1813 }
1814
1815 static gboolean
1816 major_have_swept (void)
1817 {
1818         return sweep_state == SWEEP_STATE_SWEPT;
1819 }
1820
1821 static int count_pinned_ref;
1822 static int count_pinned_nonref;
1823 static int count_nonpinned_ref;
1824 static int count_nonpinned_nonref;
1825
1826 static void
1827 count_nonpinned_callback (GCObject *obj, size_t size, void *data)
1828 {
1829         GCVTable vtable = SGEN_LOAD_VTABLE (obj);
1830
1831         if (SGEN_VTABLE_HAS_REFERENCES (vtable))
1832                 ++count_nonpinned_ref;
1833         else
1834                 ++count_nonpinned_nonref;
1835 }
1836
1837 static void
1838 count_pinned_callback (GCObject *obj, size_t size, void *data)
1839 {
1840         GCVTable vtable = SGEN_LOAD_VTABLE (obj);
1841
1842         if (SGEN_VTABLE_HAS_REFERENCES (vtable))
1843                 ++count_pinned_ref;
1844         else
1845                 ++count_pinned_nonref;
1846 }
1847
1848 static G_GNUC_UNUSED void
1849 count_ref_nonref_objs (void)
1850 {
1851         int total;
1852
1853         count_pinned_ref = 0;
1854         count_pinned_nonref = 0;
1855         count_nonpinned_ref = 0;
1856         count_nonpinned_nonref = 0;
1857
1858         major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
1859         major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
1860
1861         total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1862
1863         g_print ("ref: %d pinned %d non-pinned   non-ref: %d pinned %d non-pinned  --  %.1f\n",
1864                         count_pinned_ref, count_nonpinned_ref,
1865                         count_pinned_nonref, count_nonpinned_nonref,
1866                         (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1867 }
1868
1869 static int
1870 ms_calculate_block_obj_sizes (double factor, int *arr)
1871 {
1872         double target_size;
1873         int num_sizes = 0;
1874         int last_size = 0;
1875
1876         /*
1877          * Have every possible slot size starting with the minimal
1878          * object size up to and including four times that size.  Then
1879          * proceed by increasing geometrically with the given factor.
1880          */
1881
1882         for (int size = SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size <= 4 * SGEN_CLIENT_MINIMUM_OBJECT_SIZE; size += SGEN_ALLOC_ALIGN) {
1883                 if (arr)
1884                         arr [num_sizes] = size;
1885                 ++num_sizes;
1886                 last_size = size;
1887         }
1888         target_size = (double)last_size;
1889
1890         do {
1891                 int target_count = (int)floor (MS_BLOCK_FREE / target_size);
1892                 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1893
1894                 if (size != last_size) {
1895                         if (arr)
1896                                 arr [num_sizes] = size;
1897                         ++num_sizes;
1898                         last_size = size;
1899                 }
1900
1901                 target_size *= factor;
1902         } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1903
1904         return num_sizes;
1905 }
1906
1907 /* only valid during minor collections */
1908 static mword old_num_major_sections;
1909
1910 static void
1911 major_start_nursery_collection (void)
1912 {
1913 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1914         consistency_check ();
1915 #endif
1916
1917         old_num_major_sections = num_major_sections;
1918 }
1919
1920 static void
1921 major_finish_nursery_collection (void)
1922 {
1923 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1924         consistency_check ();
1925 #endif
1926 }
1927
1928 static int
1929 block_usage_comparer (const void *bl1, const void *bl2)
1930 {
1931         const gint16 nused1 = (*(MSBlockInfo**)bl1)->nused;
1932         const gint16 nused2 = (*(MSBlockInfo**)bl2)->nused;
1933
1934         return nused2 - nused1;
1935 }
1936
1937 static void
1938 sgen_evacuation_freelist_blocks (MSBlockInfo * volatile *block_list, int size_index)
1939 {
1940         MSBlockInfo **evacuated_blocks;
1941         size_t index = 0, count, num_blocks = 0, num_used = 0;
1942         MSBlockInfo *info;
1943         MSBlockInfo * volatile *prev;
1944
1945         for (info = *block_list; info != NULL; info = info->next_free) {
1946                 num_blocks++;
1947                 num_used += info->nused;
1948         }
1949
1950         /*
1951          * We have a set of blocks in the freelist which will be evacuated. Instead
1952          * of evacuating all of the blocks into new ones, we traverse the freelist
1953          * sorting it by the number of occupied slots, evacuating the objects from
1954          * blocks with fewer used slots into fuller blocks.
1955          *
1956          * The number of used slots is set at the end of the previous sweep. Since
1957          * we sequentially unlink slots from blocks, except for the head of the
1958          * freelist, for blocks on the freelist, the number of used slots is the same
1959          * as at the end of the previous sweep.
1960          */
1961         evacuated_blocks = (MSBlockInfo**)sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_blocks, INTERNAL_MEM_TEMPORARY, TRUE);
1962
1963         for (info = *block_list; info != NULL; info = info->next_free) {
1964                 evacuated_blocks [index++] = info;
1965         }
1966
1967         SGEN_ASSERT (0, num_blocks == index, "Why did the freelist change ?");
1968
1969         sgen_qsort (evacuated_blocks, num_blocks, sizeof (gpointer), block_usage_comparer);
1970
1971         /*
1972          * Form a new freelist with the fullest blocks. These blocks will also be
1973          * marked as to_space so we don't evacuate from them.
1974          */
1975         count = MS_BLOCK_FREE / block_obj_sizes [size_index];
1976         prev = block_list;
1977         for (index = 0; index < (num_used + count - 1) / count; index++) {
1978                 SGEN_ASSERT (0, index < num_blocks, "Why do we need more blocks for compaction than we already had ?");
1979                 info = evacuated_blocks [index];
1980                 info->is_to_space = TRUE;
1981                 *prev = info;
1982                 prev = &info->next_free;
1983         }
1984         *prev = NULL;
1985
1986         sgen_free_internal_dynamic (evacuated_blocks, sizeof (MSBlockInfo*) * num_blocks, INTERNAL_MEM_TEMPORARY);
1987 }
1988
1989 static void
1990 major_start_major_collection (void)
1991 {
1992         MSBlockInfo *block;
1993         int i;
1994
1995         major_finish_sweep_checking ();
1996
1997         /*
1998          * Clear the free lists for block sizes where we do evacuation.  For those block
1999          * sizes we will have to allocate new blocks.
2000          */
2001         for (i = 0; i < num_block_obj_sizes; ++i) {
2002                 if (!evacuate_block_obj_sizes [i])
2003                         continue;
2004
2005                 binary_protocol_evacuating_blocks (block_obj_sizes [i]);
2006
2007                 sgen_evacuation_freelist_blocks (&free_block_lists [0][i], i);
2008                 sgen_evacuation_freelist_blocks (&free_block_lists [MS_BLOCK_FLAG_REFS][i], i);
2009         }
2010
2011         if (lazy_sweep && concurrent_sweep) {
2012                 /*
2013                  * sweep_blocks_job is created before sweep_finish, which we wait for above
2014                  * (major_finish_sweep_checking). After the end of sweep, if we don't have
2015                  * sweep_blocks_job set, it means that it has already been run.
2016                  */
2017                 SgenThreadPoolJob *job = sweep_blocks_job;
2018                 if (job)
2019                         sgen_thread_pool_job_wait (job);
2020         }
2021
2022         if (lazy_sweep && !concurrent_sweep)
2023                 binary_protocol_sweep_begin (GENERATION_OLD, TRUE);
2024         /* Sweep all unswept blocks and set them to MARKING */
2025         FOREACH_BLOCK_NO_LOCK (block) {
2026                 if (lazy_sweep && !concurrent_sweep)
2027                         sweep_block (block);
2028                 SGEN_ASSERT (0, block->state == BLOCK_STATE_SWEPT, "All blocks must be swept when we're pinning.");
2029                 set_block_state (block, BLOCK_STATE_MARKING, BLOCK_STATE_SWEPT);
2030                 /*
2031                  * Swept blocks that have a null free_list are full. Evacuation is not
2032                  * effective on these blocks since we expect them to have high usage anyway,
2033                  * given that the survival rate for majors is relatively high.
2034                  */
2035                 if (evacuate_block_obj_sizes [block->obj_size_index] && !block->free_list)
2036                         block->is_to_space = TRUE;
2037         } END_FOREACH_BLOCK_NO_LOCK;
2038         if (lazy_sweep && !concurrent_sweep)
2039                 binary_protocol_sweep_end (GENERATION_OLD, TRUE);
2040
2041         set_sweep_state (SWEEP_STATE_NEED_SWEEPING, SWEEP_STATE_SWEPT);
2042 }
2043
2044 static void
2045 major_finish_major_collection (ScannedObjectCounts *counts)
2046 {
2047 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
2048         if (binary_protocol_is_enabled ()) {
2049                 counts->num_scanned_objects = scanned_objects_list.next_slot;
2050
2051                 sgen_pointer_queue_sort_uniq (&scanned_objects_list);
2052                 counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
2053
2054                 sgen_pointer_queue_clear (&scanned_objects_list);
2055         }
2056 #endif
2057 }
2058
2059 static int
2060 compare_pointers (const void *va, const void *vb) {
2061         char *a = *(char**)va, *b = *(char**)vb;
2062         if (a < b)
2063                 return -1;
2064         if (a > b)
2065                 return 1;
2066         return 0;
2067 }
2068
2069 /*
2070  * This is called with sweep completed and the world stopped.
2071  */
2072 static void
2073 major_free_swept_blocks (size_t section_reserve)
2074 {
2075         SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Sweeping must have finished before freeing blocks");
2076
2077 #ifdef TARGET_WIN32
2078                 /*
2079                  * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
2080                  * a VirtualAlloc ()-ed block.
2081                  */
2082                 return;
2083 #endif
2084
2085         {
2086                 int i, num_empty_blocks_orig, num_blocks, arr_length;
2087                 void *block;
2088                 void **empty_block_arr;
2089                 void **rebuild_next;
2090
2091                 if (num_empty_blocks <= section_reserve)
2092                         return;
2093                 SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
2094
2095                 num_empty_blocks_orig = num_empty_blocks;
2096                 empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
2097                                 INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
2098                 if (!empty_block_arr)
2099                         goto fallback;
2100
2101                 i = 0;
2102                 for (block = empty_blocks; block; block = *(void**)block)
2103                         empty_block_arr [i++] = block;
2104                 SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
2105
2106                 sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
2107
2108                 /*
2109                  * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
2110                  * contiguous ones.  If we do, we free them.  If that's not enough to get to
2111                  * section_reserve, we halve the number of contiguous blocks we're looking
2112                  * for and have another go, until we're done with looking for pairs of
2113                  * blocks, at which point we give up and go to the fallback.
2114                  */
2115                 arr_length = num_empty_blocks_orig;
2116                 num_blocks = MS_BLOCK_ALLOC_NUM;
2117                 while (num_empty_blocks > section_reserve && num_blocks > 1) {
2118                         int first = -1;
2119                         int dest = 0;
2120
2121                         dest = 0;
2122                         for (i = 0; i < arr_length; ++i) {
2123                                 int d = dest;
2124                                 void *block = empty_block_arr [i];
2125                                 SGEN_ASSERT (6, block, "we're not shifting correctly");
2126                                 if (i != dest) {
2127                                         empty_block_arr [dest] = block;
2128                                         /*
2129                                          * This is not strictly necessary, but we're
2130                                          * cautious.
2131                                          */
2132                                         empty_block_arr [i] = NULL;
2133                                 }
2134                                 ++dest;
2135
2136                                 if (first < 0) {
2137                                         first = d;
2138                                         continue;
2139                                 }
2140
2141                                 SGEN_ASSERT (6, first >= 0 && d > first, "algorithm is wrong");
2142
2143                                 if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
2144                                         first = d;
2145                                         continue;
2146                                 }
2147
2148                                 if (d + 1 - first == num_blocks) {
2149                                         /*
2150                                          * We found num_blocks contiguous blocks.  Free them
2151                                          * and null their array entries.  As an optimization
2152                                          * we could, instead of nulling the entries, shift
2153                                          * the following entries over to the left, while
2154                                          * we're iterating.
2155                                          */
2156                                         int j;
2157                                         sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
2158                                         for (j = first; j <= d; ++j)
2159                                                 empty_block_arr [j] = NULL;
2160                                         dest = first;
2161                                         first = -1;
2162
2163                                         num_empty_blocks -= num_blocks;
2164
2165                                         stat_major_blocks_freed += num_blocks;
2166                                         if (num_blocks == MS_BLOCK_ALLOC_NUM)
2167                                                 stat_major_blocks_freed_ideal += num_blocks;
2168                                         else
2169                                                 stat_major_blocks_freed_less_ideal += num_blocks;
2170
2171                                 }
2172                         }
2173
2174                         SGEN_ASSERT (6, dest <= i && dest <= arr_length, "array length is off");
2175                         arr_length = dest;
2176                         SGEN_ASSERT (6, arr_length == num_empty_blocks, "array length is off");
2177
2178                         num_blocks >>= 1;
2179                 }
2180
2181                 /* rebuild empty_blocks free list */
2182                 rebuild_next = (void**)&empty_blocks;
2183                 for (i = 0; i < arr_length; ++i) {
2184                         void *block = empty_block_arr [i];
2185                         SGEN_ASSERT (6, block, "we're missing blocks");
2186                         *rebuild_next = block;
2187                         rebuild_next = (void**)block;
2188                 }
2189                 *rebuild_next = NULL;
2190
2191                 /* free array */
2192                 sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
2193         }
2194
2195         SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
2196
2197  fallback:
2198         /*
2199          * This is our threshold.  If there's not more empty than used blocks, we won't
2200          * release uncontiguous blocks, in fear of fragmenting the address space.
2201          */
2202         if (num_empty_blocks <= num_major_sections)
2203                 return;
2204
2205         while (num_empty_blocks > section_reserve) {
2206                 void *next = *(void**)empty_blocks;
2207                 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP, MONO_MEM_ACCOUNT_SGEN_MARKSWEEP);
2208                 empty_blocks = next;
2209                 /*
2210                  * Needs not be atomic because this is running
2211                  * single-threaded.
2212                  */
2213                 --num_empty_blocks;
2214
2215                 ++stat_major_blocks_freed;
2216                 ++stat_major_blocks_freed_individual;
2217         }
2218 }
2219
2220 static void
2221 major_pin_objects (SgenGrayQueue *queue)
2222 {
2223         MSBlockInfo *block;
2224
2225         FOREACH_BLOCK_NO_LOCK (block) {
2226                 size_t first_entry, last_entry;
2227                 SGEN_ASSERT (6, block_is_swept_or_marking (block), "All blocks must be swept when we're pinning.");
2228                 sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
2229                                 &first_entry, &last_entry);
2230                 mark_pinned_objects_in_block (block, first_entry, last_entry, queue);
2231         } END_FOREACH_BLOCK_NO_LOCK;
2232 }
2233
2234 static void
2235 major_init_to_space (void)
2236 {
2237 }
2238
2239 static void
2240 major_report_pinned_memory_usage (void)
2241 {
2242         g_assert_not_reached ();
2243 }
2244
2245 static gint64
2246 major_get_used_size (void)
2247 {
2248         gint64 size = 0;
2249         MSBlockInfo *block;
2250
2251         /*
2252          * We're holding the GC lock, but the sweep thread might be running.  Make sure it's
2253          * finished, then we can iterate over the block array.
2254          */
2255         major_finish_sweep_checking ();
2256
2257         FOREACH_BLOCK_NO_LOCK (block) {
2258                 int count = MS_BLOCK_FREE / block->obj_size;
2259                 void **iter;
2260                 size += count * block->obj_size;
2261                 for (iter = block->free_list; iter; iter = (void**)*iter)
2262                         size -= block->obj_size;
2263         } END_FOREACH_BLOCK_NO_LOCK;
2264
2265         return size;
2266 }
2267
2268 /* FIXME: return number of bytes, not of sections */
2269 static size_t
2270 get_num_major_sections (void)
2271 {
2272         return num_major_sections;
2273 }
2274
2275 /*
2276  * Returns the number of bytes in blocks that were present when the last sweep was
2277  * initiated, and were not freed during the sweep.  They are the basis for calculating the
2278  * allowance.
2279  */
2280 static size_t
2281 get_bytes_survived_last_sweep (void)
2282 {
2283         SGEN_ASSERT (0, sweep_state == SWEEP_STATE_SWEPT, "Can only query unswept sections after sweep");
2284         return (num_major_sections_before_sweep - num_major_sections_freed_in_sweep) * MS_BLOCK_SIZE;
2285 }
2286
2287 static gboolean
2288 major_handle_gc_param (const char *opt)
2289 {
2290         if (g_str_has_prefix (opt, "evacuation-threshold=")) {
2291                 const char *arg = strchr (opt, '=') + 1;
2292                 int percentage = atoi (arg);
2293                 if (percentage < 0 || percentage > 100) {
2294                         fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
2295                         exit (1);
2296                 }
2297                 evacuation_threshold = (float)percentage / 100.0f;
2298                 return TRUE;
2299         } else if (!strcmp (opt, "lazy-sweep")) {
2300                 lazy_sweep = TRUE;
2301                 return TRUE;
2302         } else if (!strcmp (opt, "no-lazy-sweep")) {
2303                 lazy_sweep = FALSE;
2304                 return TRUE;
2305         } else if (!strcmp (opt, "concurrent-sweep")) {
2306                 concurrent_sweep = TRUE;
2307                 return TRUE;
2308         } else if (!strcmp (opt, "no-concurrent-sweep")) {
2309                 concurrent_sweep = FALSE;
2310                 return TRUE;
2311         }
2312
2313         return FALSE;
2314 }
2315
2316 static void
2317 major_print_gc_param_usage (void)
2318 {
2319         fprintf (stderr,
2320                         ""
2321                         "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
2322                         "  (no-)lazy-sweep\n"
2323                         "  (no-)concurrent-sweep\n"
2324                         );
2325 }
2326
2327 /*
2328  * This callback is used to clear cards, move cards to the shadow table and do counting.
2329  */
2330 static void
2331 major_iterate_block_ranges (sgen_cardtable_block_callback callback)
2332 {
2333         MSBlockInfo *block;
2334         gboolean has_references;
2335
2336         FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
2337                 if (has_references)
2338                         callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
2339         } END_FOREACH_BLOCK_NO_LOCK;
2340 }
2341
2342 static void
2343 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
2344 {
2345         MSBlockInfo *block;
2346         gboolean has_references;
2347
2348         major_finish_sweep_checking ();
2349         FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
2350                 if (has_references)
2351                         callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
2352         } END_FOREACH_BLOCK_NO_LOCK;
2353 }
2354
2355 #ifdef HEAVY_STATISTICS
2356 extern guint64 marked_cards;
2357 extern guint64 scanned_cards;
2358 extern guint64 scanned_objects;
2359 extern guint64 remarked_cards;
2360 #endif
2361
2362 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
2363 /*
2364  * MS blocks are 16K aligned.
2365  * Cardtables are 4K aligned, at least.
2366  * This means that the cardtable of a given block is 32 bytes aligned.
2367  */
2368 static guint8*
2369 initial_skip_card (guint8 *card_data)
2370 {
2371         mword *cards = (mword*)card_data;
2372         mword card;
2373         int i;
2374         for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
2375                 card = cards [i];
2376                 if (card)
2377                         break;
2378         }
2379
2380         if (i == CARD_WORDS_PER_BLOCK)
2381                 return card_data + CARDS_PER_BLOCK;
2382
2383 #if defined(__i386__) && defined(__GNUC__)
2384         return card_data + i * 4 +  (__builtin_ffs (card) - 1) / 8;
2385 #elif defined(__x86_64__) && defined(__GNUC__)
2386         return card_data + i * 8 +  (__builtin_ffsll (card) - 1) / 8;
2387 #elif defined(__s390x__) && defined(__GNUC__)
2388         return card_data + i * 8 +  (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
2389 #else
2390         for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
2391                 if (card_data [i])
2392                         return &card_data [i];
2393         }
2394         return card_data;
2395 #endif
2396 }
2397
2398 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
2399 #define MS_BLOCK_OBJ_FAST(b,os,i)                       ((b) + MS_BLOCK_SKIP + (os) * (i))
2400 #define MS_OBJ_ALLOCED_FAST(o,b)                (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
2401
2402 static void
2403 scan_card_table_for_block (MSBlockInfo *block, CardTableScanType scan_type, ScanCopyContext ctx)
2404 {
2405         SgenGrayQueue *queue = ctx.queue;
2406         ScanObjectFunc scan_func = ctx.ops->scan_object;
2407 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
2408         guint8 cards_copy [CARDS_PER_BLOCK];
2409 #endif
2410         guint8 cards_preclean [CARDS_PER_BLOCK];
2411         gboolean small_objects;
2412         int block_obj_size;
2413         char *block_start;
2414         guint8 *card_data, *card_base;
2415         guint8 *card_data_end;
2416         char *scan_front = NULL;
2417
2418         /* The concurrent mark doesn't enter evacuating blocks */
2419         if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN && major_block_is_evacuating (block))
2420                 return;
2421
2422         block_obj_size = block->obj_size;
2423         small_objects = block_obj_size < CARD_SIZE_IN_BYTES;
2424
2425         block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
2426
2427         /*
2428          * This is safe in face of card aliasing for the following reason:
2429          *
2430          * Major blocks are 16k aligned, or 32 cards aligned.
2431          * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
2432          * sizes, they won't overflow the cardtable overlap modulus.
2433          */
2434         if (scan_type & CARDTABLE_SCAN_MOD_UNION) {
2435                 card_data = card_base = block->cardtable_mod_union;
2436                 /*
2437                  * This happens when the nursery collection that precedes finishing
2438                  * the concurrent collection allocates new major blocks.
2439                  */
2440                 if (!card_data)
2441                         return;
2442
2443                 if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN) {
2444                         sgen_card_table_preclean_mod_union (card_data, cards_preclean, CARDS_PER_BLOCK);
2445                         card_data = card_base = cards_preclean;
2446                 }
2447         } else {
2448 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
2449                 card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
2450 #else
2451                 if (!sgen_card_table_get_card_data (cards_copy, (mword)block_start, CARDS_PER_BLOCK))
2452                         return;
2453                 card_data = card_base = cards_copy;
2454 #endif
2455         }
2456         card_data_end = card_data + CARDS_PER_BLOCK;
2457
2458         card_data += MS_BLOCK_SKIP >> CARD_BITS;
2459
2460         card_data = initial_skip_card (card_data);
2461         while (card_data < card_data_end) {
2462                 size_t card_index, first_object_index;
2463                 char *start;
2464                 char *end;
2465                 char *first_obj, *obj;
2466
2467                 HEAVY_STAT (++scanned_cards);
2468
2469                 if (!*card_data) {
2470                         ++card_data;
2471                         continue;
2472                 }
2473
2474                 card_index = card_data - card_base;
2475                 start = (char*)(block_start + card_index * CARD_SIZE_IN_BYTES);
2476                 end = start + CARD_SIZE_IN_BYTES;
2477
2478                 if (!block_is_swept_or_marking (block))
2479                         sweep_block (block);
2480
2481                 HEAVY_STAT (++marked_cards);
2482
2483                 if (small_objects)
2484                         sgen_card_table_prepare_card_for_scanning (card_data);
2485
2486                 /*
2487                  * If the card we're looking at starts at or in the block header, we
2488                  * must start at the first object in the block, without calculating
2489                  * the index of the object we're hypothetically starting at, because
2490                  * it would be negative.
2491                  */
2492                 if (card_index <= (MS_BLOCK_SKIP >> CARD_BITS))
2493                         first_object_index = 0;
2494                 else
2495                         first_object_index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
2496
2497                 obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, first_object_index);
2498
2499                 binary_protocol_card_scan (first_obj, end - first_obj);
2500
2501                 while (obj < end) {
2502                         if (obj < scan_front || !MS_OBJ_ALLOCED_FAST (obj, block_start))
2503                                 goto next_object;
2504
2505                         if (scan_type & CARDTABLE_SCAN_MOD_UNION) {
2506                                 /* FIXME: do this more efficiently */
2507                                 int w, b;
2508                                 MS_CALC_MARK_BIT (w, b, obj);
2509                                 if (!MS_MARK_BIT (block, w, b))
2510                                         goto next_object;
2511                         }
2512
2513                         GCObject *object = (GCObject*)obj;
2514
2515                         if (small_objects) {
2516                                 HEAVY_STAT (++scanned_objects);
2517                                 scan_func (object, sgen_obj_get_descriptor (object), queue);
2518                         } else {
2519                                 size_t offset = sgen_card_table_get_card_offset (obj, block_start);
2520                                 sgen_cardtable_scan_object (object, block_obj_size, card_base + offset, ctx);
2521                         }
2522                 next_object:
2523                         obj += block_obj_size;
2524                         g_assert (scan_front <= obj);
2525                         scan_front = obj;
2526                 }
2527
2528                 HEAVY_STAT (if (*card_data) ++remarked_cards);
2529
2530                 if (small_objects)
2531                         ++card_data;
2532                 else
2533                         card_data = card_base + sgen_card_table_get_card_offset (obj, block_start);
2534         }
2535 }
2536
2537 static void
2538 major_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx, int job_index, int job_split_count)
2539 {
2540         MSBlockInfo *block;
2541         gboolean has_references, was_sweeping, skip_scan;
2542
2543         if (!concurrent_mark)
2544                 g_assert (scan_type == CARDTABLE_SCAN_GLOBAL);
2545
2546         if (scan_type != CARDTABLE_SCAN_GLOBAL)
2547                 SGEN_ASSERT (0, !sweep_in_progress (), "Sweep should be finished when we scan mod union card table");
2548         was_sweeping = sweep_in_progress ();
2549
2550         binary_protocol_major_card_table_scan_start (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
2551         FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
2552                 if (__index % job_split_count != job_index)
2553                         continue;
2554 #ifdef PREFETCH_CARDS
2555                 int prefetch_index = __index + 6 * job_split_count;
2556                 if (prefetch_index < allocated_blocks.next_slot) {
2557                         MSBlockInfo *prefetch_block = BLOCK_UNTAG (*sgen_array_list_get_slot (&allocated_blocks, prefetch_index));
2558                         PREFETCH_READ (prefetch_block);
2559                         if (scan_type == CARDTABLE_SCAN_GLOBAL) {
2560                                 guint8 *prefetch_cards = sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (prefetch_block));
2561                                 PREFETCH_WRITE (prefetch_cards);
2562                                 PREFETCH_WRITE (prefetch_cards + 32);
2563                         }
2564                 }
2565 #endif
2566
2567                 if (!has_references)
2568                         continue;
2569                 skip_scan = FALSE;
2570
2571                 if (scan_type == CARDTABLE_SCAN_GLOBAL) {
2572                         gpointer *card_start = (gpointer*) sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (block));
2573                         gboolean has_dirty_cards = FALSE;
2574                         int i;
2575                         for (i = 0; i < CARDS_PER_BLOCK / sizeof(gpointer); i++) {
2576                                 if (card_start [i]) {
2577                                         has_dirty_cards = TRUE;
2578                                         break;
2579                                 }
2580                         }
2581                         if (!has_dirty_cards) {
2582                                 skip_scan = TRUE;
2583                         } else {
2584                                 /*
2585                                  * After the start of the concurrent collections, blocks change state
2586                                  * to marking. We should not sweep it in that case. We can't race with
2587                                  * sweep start since we are in a nursery collection. Also avoid CAS-ing
2588                                  */
2589                                 if (sweep_in_progress ()) {
2590                                         skip_scan = !ensure_block_is_checked_for_sweeping (__index, TRUE, NULL);
2591                                 } else if (was_sweeping) {
2592                                         /* Recheck in case sweep finished after dereferencing the slot */
2593                                         skip_scan = *sgen_array_list_get_slot (&allocated_blocks, __index) == 0;
2594                                 }
2595                         }
2596                 }
2597                 if (!skip_scan)
2598                         scan_card_table_for_block (block, scan_type, ctx);
2599         } END_FOREACH_BLOCK_NO_LOCK;
2600         binary_protocol_major_card_table_scan_end (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
2601 }
2602
2603 static void
2604 major_count_cards (long long *num_total_cards, long long *num_marked_cards)
2605 {
2606         MSBlockInfo *block;
2607         gboolean has_references;
2608         long long total_cards = 0;
2609         long long marked_cards = 0;
2610
2611         if (sweep_in_progress ()) {
2612                 *num_total_cards = -1;
2613                 *num_marked_cards = -1;
2614                 return;
2615         }
2616
2617         FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
2618                 guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
2619                 int i;
2620
2621                 if (!has_references)
2622                         continue;
2623
2624                 total_cards += CARDS_PER_BLOCK;
2625                 for (i = 0; i < CARDS_PER_BLOCK; ++i) {
2626                         if (cards [i])
2627                                 ++marked_cards;
2628                 }
2629         } END_FOREACH_BLOCK_NO_LOCK;
2630
2631         *num_total_cards = total_cards;
2632         *num_marked_cards = marked_cards;
2633 }
2634
2635 static void
2636 update_cardtable_mod_union (void)
2637 {
2638         MSBlockInfo *block;
2639
2640         FOREACH_BLOCK_NO_LOCK (block) {
2641                 gpointer *card_start = (gpointer*) sgen_card_table_get_card_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (block));
2642                 gboolean has_dirty_cards = FALSE;
2643                 int i;
2644                 for (i = 0; i < CARDS_PER_BLOCK / sizeof(gpointer); i++) {
2645                         if (card_start [i]) {
2646                                 has_dirty_cards = TRUE;
2647                                 break;
2648                         }
2649                 }
2650                 if (has_dirty_cards) {
2651                         size_t num_cards;
2652                         guint8 *mod_union = get_cardtable_mod_union_for_block (block, TRUE);
2653                         sgen_card_table_update_mod_union (mod_union, MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
2654                         SGEN_ASSERT (6, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
2655                 }
2656         } END_FOREACH_BLOCK_NO_LOCK;
2657 }
2658
2659 #undef pthread_create
2660
2661 static void
2662 post_param_init (SgenMajorCollector *collector)
2663 {
2664         collector->sweeps_lazily = lazy_sweep;
2665         collector->needs_thread_pool = concurrent_mark || concurrent_sweep;
2666 }
2667
2668 /* We are guaranteed to be called by the worker in question */
2669 static void
2670 sgen_worker_init_callback (gpointer worker_untyped)
2671 {
2672         int i;
2673         WorkerData *worker = (WorkerData*) worker_untyped;
2674         MSBlockInfo ***worker_free_blocks = (MSBlockInfo ***) sgen_alloc_internal_dynamic (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX, INTERNAL_MEM_MS_TABLES, TRUE);
2675
2676         for (i = 0; i < MS_BLOCK_TYPE_MAX; i++)
2677                 worker_free_blocks [i] = (MSBlockInfo **) sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2678
2679         worker->free_block_lists = worker_free_blocks;
2680
2681         mono_native_tls_set_value (worker_block_free_list_key, worker_free_blocks);
2682 }
2683
2684 static void
2685 sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent, gboolean is_parallel)
2686 {
2687         int i;
2688
2689         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
2690
2691         num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
2692         block_obj_sizes = (int *)sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2693         ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
2694
2695         evacuate_block_obj_sizes = (gboolean *)sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2696         for (i = 0; i < num_block_obj_sizes; ++i)
2697                 evacuate_block_obj_sizes [i] = FALSE;
2698
2699         sweep_slots_available = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2700         sweep_slots_used = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2701         sweep_num_blocks = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2702
2703         /*
2704         {
2705                 int i;
2706                 g_print ("block object sizes:\n");
2707                 for (i = 0; i < num_block_obj_sizes; ++i)
2708                         g_print ("%d\n", block_obj_sizes [i]);
2709         }
2710         */
2711
2712         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
2713                 free_block_lists [i] = (MSBlockInfo *volatile *)sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2714
2715         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
2716                 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
2717         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
2718                 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
2719
2720         mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
2721         mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
2722         mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
2723         mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
2724         mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
2725         mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
2726         mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
2727
2728         collector->section_size = MAJOR_SECTION_SIZE;
2729
2730         concurrent_mark = is_concurrent;
2731         collector->is_concurrent = is_concurrent;
2732         collector->is_parallel = is_parallel;
2733         collector->needs_thread_pool = is_concurrent || concurrent_sweep;
2734         collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
2735         collector->supports_cardtable = TRUE;
2736
2737         collector->alloc_heap = major_alloc_heap;
2738         collector->is_object_live = major_is_object_live;
2739         collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2740         collector->alloc_degraded = major_alloc_degraded;
2741
2742         collector->alloc_object = major_alloc_object;
2743         collector->alloc_object_par = major_alloc_object_par;
2744         collector->free_pinned_object = free_pinned_object;
2745         collector->iterate_objects = major_iterate_objects;
2746         collector->free_non_pinned_object = major_free_non_pinned_object;
2747         collector->pin_objects = major_pin_objects;
2748         collector->pin_major_object = pin_major_object;
2749         collector->scan_card_table = major_scan_card_table;
2750         collector->iterate_live_block_ranges = major_iterate_live_block_ranges;
2751         collector->iterate_block_ranges = major_iterate_block_ranges;
2752         if (is_concurrent) {
2753                 collector->update_cardtable_mod_union = update_cardtable_mod_union;
2754                 collector->get_cardtable_mod_union_for_reference = major_get_cardtable_mod_union_for_reference;
2755         }
2756         collector->init_to_space = major_init_to_space;
2757         collector->sweep = major_sweep;
2758         collector->have_swept = major_have_swept;
2759         collector->finish_sweeping = major_finish_sweep_checking;
2760         collector->free_swept_blocks = major_free_swept_blocks;
2761         collector->check_scan_starts = major_check_scan_starts;
2762         collector->dump_heap = major_dump_heap;
2763         collector->get_used_size = major_get_used_size;
2764         collector->start_nursery_collection = major_start_nursery_collection;
2765         collector->finish_nursery_collection = major_finish_nursery_collection;
2766         collector->start_major_collection = major_start_major_collection;
2767         collector->finish_major_collection = major_finish_major_collection;
2768         collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2769         collector->ptr_is_from_pinned_alloc = ptr_is_from_pinned_alloc;
2770         collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2771         collector->get_num_major_sections = get_num_major_sections;
2772         collector->get_bytes_survived_last_sweep = get_bytes_survived_last_sweep;
2773         collector->handle_gc_param = major_handle_gc_param;
2774         collector->print_gc_param_usage = major_print_gc_param_usage;
2775         collector->post_param_init = post_param_init;
2776         collector->is_valid_object = major_is_valid_object;
2777         collector->describe_pointer = major_describe_pointer;
2778         collector->count_cards = major_count_cards;
2779
2780         collector->major_ops_serial.copy_or_mark_object = major_copy_or_mark_object_canonical;
2781         collector->major_ops_serial.scan_object = major_scan_object_with_evacuation;
2782         collector->major_ops_serial.drain_gray_stack = drain_gray_stack;
2783         if (is_concurrent) {
2784                 collector->major_ops_concurrent_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
2785                 collector->major_ops_concurrent_start.scan_object = major_scan_object_concurrent_with_evacuation;
2786                 collector->major_ops_concurrent_start.scan_vtype = major_scan_vtype_concurrent_with_evacuation;
2787                 collector->major_ops_concurrent_start.scan_ptr_field = major_scan_ptr_field_concurrent_with_evacuation;
2788                 collector->major_ops_concurrent_start.drain_gray_stack = drain_gray_stack_concurrent;
2789
2790                 collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
2791                 collector->major_ops_concurrent_finish.scan_object = major_scan_object_with_evacuation;
2792                 collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_with_evacuation;
2793                 collector->major_ops_concurrent_finish.scan_ptr_field = major_scan_ptr_field_with_evacuation;
2794                 collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack;
2795
2796                 if (is_parallel) {
2797                         collector->major_ops_conc_par_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_par_canonical;
2798                         collector->major_ops_conc_par_start.scan_object = major_scan_object_concurrent_par_with_evacuation;
2799                         collector->major_ops_conc_par_start.scan_vtype = major_scan_vtype_concurrent_par_with_evacuation;
2800                         collector->major_ops_conc_par_start.scan_ptr_field = major_scan_ptr_field_concurrent_par_with_evacuation;
2801                         collector->major_ops_conc_par_start.drain_gray_stack = drain_gray_stack_concurrent_par;
2802
2803                         /* FIXME use parallel obj ops */
2804                         collector->major_ops_conc_par_finish = collector->major_ops_concurrent_finish;
2805
2806                         collector->worker_init_cb = sgen_worker_init_callback;
2807
2808                         mono_native_tls_alloc (&worker_block_free_list_key, NULL);
2809                 }
2810         }
2811
2812 #ifdef HEAVY_STATISTICS
2813         mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
2814         mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
2815         mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
2816         mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
2817         mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
2818         mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
2819         mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
2820         mono_counters_register ("Optimized copy major small evacuate", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_evacuate);
2821         mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
2822         mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
2823         mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
2824
2825         mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
2826         mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
2827         mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
2828 #endif
2829
2830 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
2831         mono_os_mutex_init (&scanned_objects_list_lock);
2832 #endif
2833
2834         SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
2835
2836         /*cardtable requires major pages to be 8 cards aligned*/
2837         g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2838 }
2839
2840 void
2841 sgen_marksweep_init (SgenMajorCollector *collector)
2842 {
2843         sgen_marksweep_init_internal (collector, FALSE, FALSE);
2844 }
2845
2846 void
2847 sgen_marksweep_conc_init (SgenMajorCollector *collector)
2848 {
2849         sgen_marksweep_init_internal (collector, TRUE, FALSE);
2850 }
2851
2852 void
2853 sgen_marksweep_conc_par_init (SgenMajorCollector *collector)
2854 {
2855         sgen_marksweep_init_internal (collector, TRUE, TRUE);
2856 }
2857
2858 #endif