Add new whole heap verifier that can catch a whole lot more problems than previous...
[mono.git] / mono / metadata / sgen-marksweep.c
1 /*
2  * sgen-marksweep.c: Simple generational GC.
3  *
4  * Author:
5  *      Mark Probst <mark.probst@gmail.com>
6  *
7  * Copyright 2009-2010 Novell, Inc.
8  * 
9  * Permission is hereby granted, free of charge, to any person obtaining
10  * a copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sublicense, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  * 
17  * The above copyright notice and this permission notice shall be
18  * included in all copies or substantial portions of the Software.
19  * 
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
24  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  */
28
29 #include "config.h"
30
31 #ifdef HAVE_SGEN_GC
32
33 #include <math.h>
34 #include <errno.h>
35
36 #include "utils/mono-counters.h"
37 #include "utils/mono-semaphore.h"
38 #include "utils/mono-time.h"
39 #include "metadata/object-internals.h"
40 #include "metadata/profiler-private.h"
41
42 #include "metadata/sgen-gc.h"
43 #include "metadata/sgen-protocol.h"
44 #include "metadata/sgen-cardtable.h"
45 #include "metadata/gc-internal.h"
46
47 #define MS_BLOCK_SIZE   (16*1024)
48 #define MS_BLOCK_SIZE_SHIFT     14
49 #define MAJOR_SECTION_SIZE      MS_BLOCK_SIZE
50 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
51
52 #ifdef FIXED_HEAP
53 #define MS_DEFAULT_HEAP_NUM_BLOCKS      (32 * 1024) /* 512 MB */
54 #endif
55
56 /*
57  * Don't allocate single blocks, but alloc a contingent of this many
58  * blocks in one swoop.
59  */
60 #define MS_BLOCK_ALLOC_NUM      32
61
62 /*
63  * Number of bytes before the first object in a block.  At the start
64  * of a block is the MSBlockHeader, then opional padding, then come
65  * the objects, so this must be >= sizeof (MSBlockHeader).
66  */
67 #ifdef FIXED_HEAP
68 #define MS_BLOCK_SKIP   0
69 #else
70 #define MS_BLOCK_SKIP   16
71 #endif
72
73 #define MS_BLOCK_FREE   (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
74
75 #define MS_NUM_MARK_WORDS       ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
76
77 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
78 #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
79 #endif
80
81 typedef struct _MSBlockInfo MSBlockInfo;
82 struct _MSBlockInfo {
83         int obj_size;
84         int obj_size_index;
85         int pin_queue_num_entries;
86         unsigned int pinned : 1;
87         unsigned int has_references : 1;
88         unsigned int has_pinned : 1;    /* means cannot evacuate */
89         unsigned int is_to_space : 1;
90 #ifdef FIXED_HEAP
91         unsigned int used : 1;
92         unsigned int zeroed : 1;
93 #endif
94         MSBlockInfo *next;
95         char *block;
96         void **free_list;
97         MSBlockInfo *next_free;
98         void **pin_queue_start;
99         mword mark_words [MS_NUM_MARK_WORDS];
100 };
101
102 #ifdef FIXED_HEAP
103 static int ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
104
105 static char *ms_heap_start;
106 static char *ms_heap_end;
107
108 #define MS_PTR_IN_SMALL_MAJOR_HEAP(p)   ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
109
110 /* array of all all block infos in the system */
111 static MSBlockInfo *block_infos;
112 #endif
113
114 #define MS_BLOCK_OBJ(b,i)               ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
115 #define MS_BLOCK_DATA_FOR_OBJ(o)        ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
116
117 #ifdef FIXED_HEAP
118 #define MS_BLOCK_FOR_OBJ(o)             (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
119 #else
120 typedef struct {
121         MSBlockInfo *info;
122 } MSBlockHeader;
123
124 #define MS_BLOCK_FOR_OBJ(o)             (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
125 #endif
126
127 #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
128
129 #define MS_CALC_MARK_BIT(w,b,o)         do {                            \
130                 int i = ((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o))) >> SGEN_ALLOC_ALIGN_BITS; \
131                 if (sizeof (mword) == 4) {                              \
132                         (w) = i >> 5;                                   \
133                         (b) = i & 31;                                   \
134                 } else {                                                \
135                         (w) = i >> 6;                                   \
136                         (b) = i & 63;                                   \
137                 }                                                       \
138         } while (0)
139
140 #define MS_MARK_BIT(bl,w,b)     ((bl)->mark_words [(w)] & (1L << (b)))
141 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
142 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b)  do {                    \
143                 mword __old = (bl)->mark_words [(w)];                   \
144                 mword __bitmask = 1L << (b);                            \
145                 if (__old & __bitmask) {                                \
146                         was_marked = TRUE;                              \
147                         break;                                          \
148                 }                                                       \
149                 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)],   \
150                                                 (gpointer)(__old | __bitmask), \
151                                                 (gpointer)__old) ==     \
152                                 (gpointer)__old) {                      \
153                         was_marked = FALSE;                             \
154                         break;                                          \
155                 }                                                       \
156         } while (1)
157
158 #define MS_OBJ_ALLOCED(o,b)     (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
159
160 #define MS_BLOCK_OBJ_SIZE_FACTOR        (sqrt (2.0))
161
162 /*
163  * This way we can lookup block object size indexes for sizes up to
164  * 256 bytes with a single load.
165  */
166 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES      32
167
168 static int *block_obj_sizes;
169 static int num_block_obj_sizes;
170 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
171
172 #define MS_BLOCK_FLAG_PINNED    1
173 #define MS_BLOCK_FLAG_REFS      2
174
175 #define MS_BLOCK_TYPE_MAX       4
176
177 #ifdef SGEN_PARALLEL_MARK
178 static LOCK_DECLARE (ms_block_list_mutex);
179 #define LOCK_MS_BLOCK_LIST mono_mutex_lock (&ms_block_list_mutex)
180 #define UNLOCK_MS_BLOCK_LIST mono_mutex_unlock (&ms_block_list_mutex)
181 #endif
182
183 static gboolean *evacuate_block_obj_sizes;
184 static float evacuation_threshold = 0.666;
185
186 static gboolean concurrent_sweep = FALSE;
187 static gboolean have_swept;
188
189 /* all allocated blocks in the system */
190 static MSBlockInfo *all_blocks;
191
192 #ifdef FIXED_HEAP
193 /* non-allocated block free-list */
194 static MSBlockInfo *empty_blocks = NULL;
195 #else
196 /* non-allocated block free-list */
197 static void *empty_blocks = NULL;
198 static int num_empty_blocks = 0;
199 #endif
200
201 #define FOREACH_BLOCK(bl)       for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
202 #define END_FOREACH_BLOCK       }
203
204 static int num_major_sections = 0;
205 /* one free block list for each block object size */
206 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
207
208 #ifdef SGEN_PARALLEL_MARK
209 #ifdef HAVE_KW_THREAD
210 static __thread MSBlockInfo ***workers_free_block_lists;
211 #else
212 static MonoNativeTlsKey workers_free_block_lists_key;
213 #endif
214 #endif
215
216 static long long stat_major_blocks_alloced = 0;
217 static long long stat_major_blocks_freed = 0;
218 static long long stat_major_objects_evacuated = 0;
219 static long long stat_time_wait_for_sweep = 0;
220
221 static gboolean ms_sweep_in_progress = FALSE;
222 static MonoNativeThreadId ms_sweep_thread;
223 static MonoSemType ms_sweep_cmd_semaphore;
224 static MonoSemType ms_sweep_done_semaphore;
225
226 static void
227 ms_signal_sweep_command (void)
228 {
229         if (!concurrent_sweep)
230                 return;
231
232         g_assert (!ms_sweep_in_progress);
233         ms_sweep_in_progress = TRUE;
234         MONO_SEM_POST (&ms_sweep_cmd_semaphore);
235 }
236
237 static void
238 ms_signal_sweep_done (void)
239 {
240         if (!concurrent_sweep)
241                 return;
242
243         MONO_SEM_POST (&ms_sweep_done_semaphore);
244 }
245
246 static void
247 ms_wait_for_sweep_done (void)
248 {
249         SGEN_TV_DECLARE (atv);
250         SGEN_TV_DECLARE (btv);
251         int result;
252
253         if (!concurrent_sweep)
254                 return;
255
256         if (!ms_sweep_in_progress)
257                 return;
258
259         SGEN_TV_GETTIME (atv);
260         while ((result = MONO_SEM_WAIT (&ms_sweep_done_semaphore)) != 0) {
261                 if (errno != EINTR)
262                         g_error ("MONO_SEM_WAIT");
263         }
264         SGEN_TV_GETTIME (btv);
265         stat_time_wait_for_sweep += SGEN_TV_ELAPSED (atv, btv);
266
267         g_assert (ms_sweep_in_progress);
268         ms_sweep_in_progress = FALSE;
269 }
270
271 static int
272 ms_find_block_obj_size_index (int size)
273 {
274         int i;
275         DEBUG (9, g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE));
276         for (i = 0; i < num_block_obj_sizes; ++i)
277                 if (block_obj_sizes [i] >= size)
278                         return i;
279         g_error ("no object of size %d\n", size);
280 }
281
282 #define FREE_BLOCKS_FROM(lists,p,r)     (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
283 #define FREE_BLOCKS(p,r)                (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
284 #ifdef SGEN_PARALLEL_MARK
285 #ifdef HAVE_KW_THREAD
286 #define FREE_BLOCKS_LOCAL(p,r)          (FREE_BLOCKS_FROM (workers_free_block_lists, (p), (r)))
287 #else
288 #define FREE_BLOCKS_LOCAL(p,r)          (FREE_BLOCKS_FROM (((MSBlockInfo***)(mono_native_tls_get_value (workers_free_block_lists_key))), (p), (r)))
289 #endif
290 #else
291 //#define FREE_BLOCKS_LOCAL(p,r)                (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
292 #endif
293
294 #define MS_BLOCK_OBJ_SIZE_INDEX(s)                              \
295         (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ?      \
296          fast_block_obj_size_indexes [((s)+7)>>3] :             \
297          ms_find_block_obj_size_index ((s)))
298
299 #ifdef FIXED_HEAP
300 static void*
301 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
302 {
303         char *nursery_start;
304         mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
305         mword alloc_size = nursery_size + major_heap_size;
306         int i;
307
308         g_assert (ms_heap_num_blocks > 0);
309         g_assert (nursery_size % MS_BLOCK_SIZE == 0);
310         if (nursery_align)
311                 g_assert (nursery_align % MS_BLOCK_SIZE == 0);
312
313         nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, TRUE);
314         ms_heap_start = nursery_start + nursery_size;
315         ms_heap_end = ms_heap_start + major_heap_size;
316
317         block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO);
318
319         for (i = 0; i < ms_heap_num_blocks; ++i) {
320                 block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
321                 if (i < ms_heap_num_blocks - 1)
322                         block_infos [i].next_free = &block_infos [i + 1];
323                 else
324                         block_infos [i].next_free = NULL;
325                 block_infos [i].zeroed = TRUE;
326         }
327
328         empty_blocks = &block_infos [0];
329
330         return nursery_start;
331 }
332 #else
333 static void*
334 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
335 {
336         char *start;
337         if (nursery_align)
338                 start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, TRUE);
339         else
340                 start = sgen_alloc_os_memory (nursery_size, TRUE);
341
342         return start;
343 }
344 #endif
345
346 static void
347 update_heap_boundaries_for_block (MSBlockInfo *block)
348 {
349         sgen_update_heap_boundaries ((mword)block->block, (mword)block->block + MS_BLOCK_SIZE);
350 }
351
352 #ifdef FIXED_HEAP
353 static MSBlockInfo*
354 ms_get_empty_block (void)
355 {
356         MSBlockInfo *block;
357
358         g_assert (empty_blocks);
359
360         do {
361                 block = empty_blocks;
362         } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block->next_free, block) != block);
363
364         block->used = TRUE;
365
366         if (!block->zeroed)
367                 memset (block->block, 0, MS_BLOCK_SIZE);
368
369         return block;
370 }
371
372 static void
373 ms_free_block (MSBlockInfo *block)
374 {
375         block->next_free = empty_blocks;
376         empty_blocks = block;
377         block->used = FALSE;
378         block->zeroed = FALSE;
379         sgen_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
380 }
381 #else
382 static void*
383 ms_get_empty_block (void)
384 {
385         char *p;
386         int i;
387         void *block, *empty, *next;
388
389  retry:
390         if (!empty_blocks) {
391                 p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
392
393                 for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
394                         block = p;
395                         /*
396                          * We do the free list update one after the
397                          * other so that other threads can use the new
398                          * blocks as quickly as possible.
399                          */
400                         do {
401                                 empty = empty_blocks;
402                                 *(void**)block = empty;
403                         } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
404                         p += MS_BLOCK_SIZE;
405                 }
406
407                 SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
408
409                 stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
410         }
411
412         do {
413                 empty = empty_blocks;
414                 if (!empty)
415                         goto retry;
416                 block = empty;
417                 next = *(void**)block;
418         } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
419
420         SGEN_ATOMIC_ADD (num_empty_blocks, -1);
421
422         *(void**)block = NULL;
423
424         g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
425
426         return block;
427 }
428
429 static void
430 ms_free_block (void *block)
431 {
432         void *empty;
433
434         sgen_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
435         memset (block, 0, MS_BLOCK_SIZE);
436
437         do {
438                 empty = empty_blocks;
439                 *(void**)block = empty;
440         } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
441
442         SGEN_ATOMIC_ADD (num_empty_blocks, 1);
443 }
444 #endif
445
446 //#define MARKSWEEP_CONSISTENCY_CHECK
447
448 #ifdef MARKSWEEP_CONSISTENCY_CHECK
449 static void
450 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
451 {
452         MSBlockInfo *b;
453
454         for (; block; block = block->next_free) {
455                 g_assert (block->obj_size == size);
456                 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
457
458                 /* blocks in the free lists must have at least
459                    one free slot */
460                 g_assert (block->free_list);
461
462 #ifdef FIXED_HEAP
463                 /* the block must not be in the empty_blocks list */
464                 for (b = empty_blocks; b; b = b->next_free)
465                         g_assert (b != block);
466 #endif
467                 /* the block must be in the all_blocks list */
468                 for (b = all_blocks; b; b = b->next) {
469                         if (b == block)
470                                 break;
471                 }
472                 g_assert (b == block);
473         }
474 }
475
476 static void
477 check_empty_blocks (void)
478 {
479 #ifndef FIXED_HEAP
480         void *p;
481         int i = 0;
482         for (p = empty_blocks; p; p = *(void**)p)
483                 ++i;
484         g_assert (i == num_empty_blocks);
485 #endif
486 }
487
488 static void
489 consistency_check (void)
490 {
491         MSBlockInfo *block;
492         int i;
493
494         /* check all blocks */
495         FOREACH_BLOCK (block) {
496                 int count = MS_BLOCK_FREE / block->obj_size;
497                 int num_free = 0;
498                 void **free;
499
500 #ifndef FIXED_HEAP
501                 /* check block header */
502                 g_assert (((MSBlockHeader*)block->block)->info == block);
503 #endif
504
505                 /* count number of free slots */
506                 for (i = 0; i < count; ++i) {
507                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
508                         if (!MS_OBJ_ALLOCED (obj, block))
509                                 ++num_free;
510                 }
511
512                 /* check free list */
513                 for (free = block->free_list; free; free = (void**)*free) {
514                         g_assert (MS_BLOCK_FOR_OBJ (free) == block);
515                         --num_free;
516                 }
517                 g_assert (num_free == 0);
518
519                 /* check all mark words are zero */
520                 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
521                         g_assert (block->mark_words [i] == 0);
522         } END_FOREACH_BLOCK;
523
524         /* check free blocks */
525         for (i = 0; i < num_block_obj_sizes; ++i) {
526                 int j;
527                 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
528                         check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
529         }
530
531         check_empty_blocks ();
532 }
533 #endif
534
535 static gboolean
536 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
537 {
538         int size = block_obj_sizes [size_index];
539         int count = MS_BLOCK_FREE / size;
540         MSBlockInfo *info;
541 #ifdef SGEN_PARALLEL_MARK
542         MSBlockInfo *next;
543 #endif
544 #ifndef FIXED_HEAP
545         MSBlockHeader *header;
546 #endif
547         MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
548         char *obj_start;
549         int i;
550
551         if (!sgen_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
552                 return FALSE;
553
554 #ifdef FIXED_HEAP
555         info = ms_get_empty_block ();
556 #else
557         info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
558 #endif
559
560         DEBUG (9, g_assert (count >= 2));
561
562         info->obj_size = size;
563         info->obj_size_index = size_index;
564         info->pinned = pinned;
565         info->has_references = has_references;
566         info->has_pinned = pinned;
567         info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD); /*FIXME WHY??? */
568 #ifndef FIXED_HEAP
569         info->block = ms_get_empty_block ();
570
571         header = (MSBlockHeader*) info->block;
572         header->info = info;
573 #endif
574
575         update_heap_boundaries_for_block (info);
576
577         /* build free list */
578         obj_start = info->block + MS_BLOCK_SKIP;
579         info->free_list = (void**)obj_start;
580         /* we're skipping the last one - it must be nulled */
581         for (i = 0; i < count - 1; ++i) {
582                 char *next_obj_start = obj_start + size;
583                 *(void**)obj_start = next_obj_start;
584                 obj_start = next_obj_start;
585         }
586         /* the last one */
587         *(void**)obj_start = NULL;
588
589 #ifdef SGEN_PARALLEL_MARK
590         do {
591                 next = info->next_free = free_blocks [size_index];
592         } while (SGEN_CAS_PTR ((void**)&free_blocks [size_index], info, next) != next);
593
594         do {
595                 next = info->next = all_blocks;
596         } while (SGEN_CAS_PTR ((void**)&all_blocks, info, next) != next);
597 #else
598         info->next_free = free_blocks [size_index];
599         free_blocks [size_index] = info;
600
601         info->next = all_blocks;
602         all_blocks = info;
603 #endif
604
605         ++num_major_sections;
606         return TRUE;
607 }
608
609 static gboolean
610 obj_is_from_pinned_alloc (char *ptr)
611 {
612         MSBlockInfo *block;
613
614         FOREACH_BLOCK (block) {
615                 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
616                         return block->pinned;
617         } END_FOREACH_BLOCK;
618         return FALSE;
619 }
620
621 static void*
622 unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
623 {
624         MSBlockInfo *block;
625         void *obj;
626
627         block = free_blocks [size_index];
628         DEBUG (9, g_assert (block));
629
630         obj = block->free_list;
631         DEBUG (9, g_assert (obj));
632
633         block->free_list = *(void**)obj;
634         if (!block->free_list) {
635                 free_blocks [size_index] = block->next_free;
636                 block->next_free = NULL;
637         }
638
639         return obj;
640 }
641
642 #ifdef SGEN_PARALLEL_MARK
643 static gboolean
644 try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks, int size_index)
645 {
646         /*
647          * No more free slots in the block, so try to free the block.
648          * Don't try again if we don't succeed - another thread will
649          * already have done it.
650          */
651         MSBlockInfo *next_block = block->next_free;
652         if (SGEN_CAS_PTR ((void**)&free_blocks [size_index], next_block, block) == block) {
653                 /*
654                 void *old = SGEN_CAS_PTR ((void**)&block->next_free, NULL, next_block);
655                 g_assert (old == next_block);
656                 */
657                 block->next_free = NULL;
658                 return TRUE;
659         }
660         return FALSE;
661 }
662
663 static void*
664 alloc_obj_par (int size, gboolean pinned, gboolean has_references)
665 {
666         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
667         MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
668         MSBlockInfo *block;
669         void *obj;
670
671         DEBUG (9, g_assert (!ms_sweep_in_progress));
672         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
673
674         if (free_blocks_local [size_index]) {
675         get_slot:
676                 obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
677         } else {
678                 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
679
680         get_block:
681                 block = free_blocks [size_index];
682                 if (block) {
683                         if (!try_remove_block_from_free_list (block, free_blocks, size_index))
684                                 goto get_block;
685
686                         g_assert (block->next_free == NULL);
687                         g_assert (block->free_list);
688                         block->next_free = free_blocks_local [size_index];
689                         free_blocks_local [size_index] = block;
690
691                         goto get_slot;
692                 } else {
693                         gboolean success;
694
695                         LOCK_MS_BLOCK_LIST;
696                         success = ms_alloc_block (size_index, pinned, has_references);
697                         UNLOCK_MS_BLOCK_LIST;
698
699                         if (G_UNLIKELY (!success))
700                                 return NULL;
701
702                         goto get_block;
703                 }
704         }
705
706         /*
707          * FIXME: This should not be necessary because it'll be
708          * overwritten by the vtable immediately.
709          */
710         *(void**)obj = NULL;
711
712         return obj;
713 }
714
715 static void*
716 major_par_alloc_object (int size, gboolean has_references)
717 {
718         return alloc_obj_par (size, FALSE, has_references);
719 }
720 #endif
721
722 static void*
723 alloc_obj (int size, gboolean pinned, gboolean has_references)
724 {
725         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
726         MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
727         void *obj;
728
729 #ifdef SGEN_PARALLEL_MARK
730         DEBUG (9, g_assert (current_collection_generation != GENERATION_OLD));
731 #endif
732
733         DEBUG (9, g_assert (!ms_sweep_in_progress));
734
735         if (!free_blocks [size_index]) {
736                 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
737                         return NULL;
738         }
739
740         obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
741
742         /*
743          * FIXME: This should not be necessary because it'll be
744          * overwritten by the vtable immediately.
745          */
746         *(void**)obj = NULL;
747
748         return obj;
749 }
750
751 static void*
752 major_alloc_object (int size, gboolean has_references)
753 {
754         return alloc_obj (size, FALSE, has_references);
755 }
756
757 /*
758  * We're not freeing the block if it's empty.  We leave that work for
759  * the next major collection.
760  *
761  * This is just called from the domain clearing code, which runs in a
762  * single thread and has the GC lock, so we don't need an extra lock.
763  */
764 static void
765 free_object (char *obj, size_t size, gboolean pinned)
766 {
767         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
768         int word, bit;
769         DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
770         DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
771         MS_CALC_MARK_BIT (word, bit, obj);
772         DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
773         if (!block->free_list) {
774                 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
775                 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
776                 DEBUG (9, g_assert (!block->next_free));
777                 block->next_free = free_blocks [size_index];
778                 free_blocks [size_index] = block;
779         }
780         memset (obj, 0, size);
781         *(void**)obj = block->free_list;
782         block->free_list = (void**)obj;
783 }
784
785 static void
786 major_free_non_pinned_object (char *obj, size_t size)
787 {
788         free_object (obj, size, FALSE);
789 }
790
791 /* size is a multiple of SGEN_ALLOC_ALIGN */
792 static void*
793 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
794 {
795         void *res;
796
797         ms_wait_for_sweep_done ();
798
799         res = alloc_obj (size, TRUE, has_references);
800          /*If we failed to alloc memory, we better try releasing memory
801           *as pinned alloc is requested by the runtime.
802           */
803          if (!res) {
804                  sgen_collect_major_no_lock ("pinned alloc failure");
805                  res = alloc_obj (size, TRUE, has_references);
806          }
807          return res;
808 }
809
810 static void
811 free_pinned_object (char *obj, size_t size)
812 {
813         free_object (obj, size, TRUE);
814 }
815
816 /*
817  * size is already rounded up and we hold the GC lock.
818  */
819 static void*
820 major_alloc_degraded (MonoVTable *vtable, size_t size)
821 {
822         void *obj;
823         int old_num_sections;
824
825         ms_wait_for_sweep_done ();
826
827         old_num_sections = num_major_sections;
828
829         obj = alloc_obj (size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
830         if (G_LIKELY (obj)) {
831                 *(MonoVTable**)obj = vtable;
832                 HEAVY_STAT (++stat_objects_alloced_degraded);
833                 HEAVY_STAT (stat_bytes_alloced_degraded += size);
834                 g_assert (num_major_sections >= old_num_sections);
835                 sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
836         }
837         return obj;
838 }
839
840 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj)   FALSE
841
842 /*
843  * obj is some object.  If it's not in the major heap (i.e. if it's in
844  * the nursery or LOS), return FALSE.  Otherwise return whether it's
845  * been marked or copied.
846  */
847 static gboolean
848 major_is_object_live (char *obj)
849 {
850         MSBlockInfo *block;
851         int word, bit;
852 #ifndef FIXED_HEAP
853         mword objsize;
854 #endif
855
856         if (sgen_ptr_in_nursery (obj))
857                 return FALSE;
858
859 #ifdef FIXED_HEAP
860         /* LOS */
861         if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
862                 return FALSE;
863 #else
864         objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
865
866         /* LOS */
867         if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
868                 return FALSE;
869 #endif
870
871         /* now we know it's in a major block */
872         block = MS_BLOCK_FOR_OBJ (obj);
873         DEBUG (9, g_assert (!block->pinned));
874         MS_CALC_MARK_BIT (word, bit, obj);
875         return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
876 }
877
878 static gboolean
879 major_ptr_is_in_non_pinned_space (char *ptr)
880 {
881         MSBlockInfo *block;
882
883         FOREACH_BLOCK (block) {
884                 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
885                         return !block->pinned;
886         } END_FOREACH_BLOCK;
887         return FALSE;
888 }
889
890 static void
891 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
892 {
893         MSBlockInfo *block;
894
895         ms_wait_for_sweep_done ();
896
897         FOREACH_BLOCK (block) {
898                 int count = MS_BLOCK_FREE / block->obj_size;
899                 int i;
900
901                 if (block->pinned && !pinned)
902                         continue;
903                 if (!block->pinned && !non_pinned)
904                         continue;
905
906                 for (i = 0; i < count; ++i) {
907                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
908                         if (MS_OBJ_ALLOCED (obj, block))
909                                 callback ((char*)obj, block->obj_size, data);
910                 }
911         } END_FOREACH_BLOCK;
912 }
913
914 static gboolean
915 major_is_valid_object (char *object)
916 {
917         MSBlockInfo *block;
918
919         ms_wait_for_sweep_done ();
920         FOREACH_BLOCK (block) {
921                 int idx;
922                 char *obj;
923
924                 if ((block->block > object) || ((block->block + MS_BLOCK_SIZE) <= object))
925                         continue;
926
927                 idx = MS_BLOCK_OBJ_INDEX (object, block);
928                 obj = (char*)MS_BLOCK_OBJ (block, idx);
929                 if (obj != object)
930                         return FALSE;
931                 return MS_OBJ_ALLOCED (obj, block);
932         } END_FOREACH_BLOCK;
933
934         return FALSE;
935 }
936
937 static void
938 major_check_scan_starts (void)
939 {
940 }
941
942 static void
943 major_dump_heap (FILE *heap_dump_file)
944 {
945         MSBlockInfo *block;
946         int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
947         int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
948         int i;
949
950         for (i = 0; i < num_block_obj_sizes; ++i)
951                 slots_available [i] = slots_used [i] = 0;
952
953         FOREACH_BLOCK (block) {
954                 int index = ms_find_block_obj_size_index (block->obj_size);
955                 int count = MS_BLOCK_FREE / block->obj_size;
956
957                 slots_available [index] += count;
958                 for (i = 0; i < count; ++i) {
959                         if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
960                                 ++slots_used [index];
961                 }
962         } END_FOREACH_BLOCK;
963
964         fprintf (heap_dump_file, "<occupancies>\n");
965         for (i = 0; i < num_block_obj_sizes; ++i) {
966                 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
967                                 block_obj_sizes [i], slots_available [i], slots_used [i]);
968         }
969         fprintf (heap_dump_file, "</occupancies>\n");
970
971         FOREACH_BLOCK (block) {
972                 int count = MS_BLOCK_FREE / block->obj_size;
973                 int i;
974                 int start = -1;
975
976                 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
977
978                 for (i = 0; i <= count; ++i) {
979                         if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
980                                 if (start < 0)
981                                         start = i;
982                         } else {
983                                 if (start >= 0) {
984                                         sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
985                                         start = -1;
986                                 }
987                         }
988                 }
989
990                 fprintf (heap_dump_file, "</section>\n");
991         } END_FOREACH_BLOCK;
992 }
993
994 #define LOAD_VTABLE     SGEN_LOAD_VTABLE
995
996 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do {        \
997                 int __word, __bit;                                      \
998                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
999                 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
1000                         MS_SET_MARK_BIT ((block), __word, __bit);       \
1001                         if ((block)->has_references)                    \
1002                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
1003                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1004                 }                                                       \
1005         } while (0)
1006 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {                \
1007                 int __word, __bit;                                      \
1008                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
1009                 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
1010                 if (!MS_MARK_BIT ((block), __word, __bit)) {            \
1011                         MS_SET_MARK_BIT ((block), __word, __bit);       \
1012                         if ((block)->has_references)                    \
1013                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
1014                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1015                 }                                                       \
1016         } while (0)
1017 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {            \
1018                 int __word, __bit;                                      \
1019                 gboolean __was_marked;                                  \
1020                 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
1021                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
1022                 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
1023                 if (!__was_marked) {                                    \
1024                         if ((block)->has_references)                    \
1025                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
1026                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1027                 }                                                       \
1028         } while (0)
1029
1030 static void
1031 pin_major_object (char *obj, SgenGrayQueue *queue)
1032 {
1033         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
1034         block->has_pinned = TRUE;
1035         MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1036 }
1037
1038 #include "sgen-major-copy-object.h"
1039
1040 #ifdef SGEN_PARALLEL_MARK
1041 static void
1042 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
1043 {
1044         void *obj = *ptr;
1045         mword objsize;
1046         MSBlockInfo *block;
1047         MonoVTable *vt;
1048
1049         HEAVY_STAT (++stat_copy_object_called_major);
1050
1051         DEBUG (9, g_assert (obj));
1052         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
1053
1054         if (sgen_ptr_in_nursery (obj)) {
1055                 int word, bit;
1056                 gboolean has_references;
1057                 void *destination;
1058                 mword vtable_word = *(mword*)obj;
1059                 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1060
1061                 if (vtable_word & SGEN_FORWARDED_BIT) {
1062                         *ptr = (void*)vt;
1063                         return;
1064                 }
1065
1066                 if (vtable_word & SGEN_PINNED_BIT)
1067                         return;
1068
1069                 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1070                 if (sgen_nursery_is_to_space (obj))
1071                         return;
1072
1073                 HEAVY_STAT (++stat_objects_copied_major);
1074
1075         do_copy_object:
1076                 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1077                 has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
1078
1079                 destination = sgen_minor_collector.par_alloc_for_promotion (obj, objsize, has_references);
1080                 if (G_UNLIKELY (!destination)) {
1081                         if (!sgen_ptr_in_nursery (obj)) {
1082                                 int size_index;
1083                                 block = MS_BLOCK_FOR_OBJ (obj);
1084                                 size_index = block->obj_size_index;
1085                                 evacuate_block_obj_sizes [size_index] = FALSE;
1086                         }
1087
1088                         sgen_parallel_pin_or_update (ptr, obj, vt, queue);
1089                         sgen_set_pinned_from_failed_allocation (objsize);
1090                         return;
1091                 }
1092
1093                 /*
1094                  * We do this before the CAS because we want to make
1095                  * sure that if another thread sees the destination
1096                  * pointer the VTable is already in place.  Not doing
1097                  * this can crash binary protocols.
1098                  */
1099                 *(MonoVTable**)destination = vt;
1100
1101                 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
1102                         gboolean was_marked;
1103
1104                         par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
1105                         obj = destination;
1106                         *ptr = obj;
1107
1108                         /*
1109                          * FIXME: If we make major_alloc_object() give
1110                          * us the block info, too, we won't have to
1111                          * re-fetch it here.
1112                          *
1113                          * FIXME (2): We should rework this to avoid all those nursery checks.
1114                          */
1115                         if (!sgen_ptr_in_nursery (obj)) { /*marking a nursery object is pretty stupid.*/
1116                                 block = MS_BLOCK_FOR_OBJ (obj);
1117                                 MS_CALC_MARK_BIT (word, bit, obj);
1118                                 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
1119                                 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
1120                         }
1121                 } else {
1122                         /*
1123                          * FIXME: We have allocated destination, but
1124                          * we cannot use it.  Give it back to the
1125                          * allocator.
1126                          */
1127                         *(void**)destination = NULL;
1128
1129                         vtable_word = *(mword*)obj;
1130                         g_assert (vtable_word & SGEN_FORWARDED_BIT);
1131
1132                         obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1133
1134                         *ptr = obj;
1135
1136                         HEAVY_STAT (++stat_slots_allocated_in_vain);
1137                 }
1138         } else {
1139 #ifdef FIXED_HEAP
1140                 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1141 #else
1142                 mword vtable_word = *(mword*)obj;
1143                 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1144
1145                 /* see comment in the non-parallel version below */
1146                 if (vtable_word & SGEN_FORWARDED_BIT) {
1147                         *ptr = (void*)vt;
1148                         return;
1149                 }
1150                 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1151
1152                 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1153 #endif
1154                 {
1155                         int size_index;
1156
1157                         block = MS_BLOCK_FOR_OBJ (obj);
1158                         size_index = block->obj_size_index;
1159
1160                         if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
1161                                 if (block->is_to_space)
1162                                         return;
1163
1164 #ifdef FIXED_HEAP
1165                                 {
1166                                         mword vtable_word = *(mword*)obj;
1167                                         vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1168
1169                                         if (vtable_word & SGEN_FORWARDED_BIT) {
1170                                                 *ptr = (void*)vt;
1171                                                 return;
1172                                         }
1173                                 }
1174 #endif
1175
1176                                 HEAVY_STAT (++stat_major_objects_evacuated);
1177                                 goto do_copy_object;
1178                         }
1179
1180                         MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1181                 } else {
1182 #ifdef FIXED_HEAP
1183                         mword vtable_word = *(mword*)obj;
1184                         vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1185 #endif
1186
1187                         if (vtable_word & SGEN_PINNED_BIT)
1188                                 return;
1189                         binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
1190                         if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
1191                                 if (SGEN_VTABLE_HAS_REFERENCES (vt))
1192                                         GRAY_OBJECT_ENQUEUE (queue, obj);
1193                         } else {
1194                                 g_assert (SGEN_OBJECT_IS_PINNED (obj));
1195                         }
1196                 }
1197         }
1198 }
1199 #else
1200 static void
1201 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
1202 {
1203         void *obj = *ptr;
1204         MSBlockInfo *block;
1205
1206         HEAVY_STAT (++stat_copy_object_called_major);
1207
1208         DEBUG (9, g_assert (obj));
1209         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
1210
1211         if (sgen_ptr_in_nursery (obj)) {
1212                 int word, bit;
1213                 char *forwarded, *old_obj;
1214
1215                 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1216                         *ptr = forwarded;
1217                         return;
1218                 }
1219                 if (SGEN_OBJECT_IS_PINNED (obj))
1220                         return;
1221
1222                 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1223                 if (sgen_nursery_is_to_space (obj))
1224                         return;
1225
1226                 HEAVY_STAT (++stat_objects_copied_major);
1227
1228         do_copy_object:
1229                 old_obj = obj;
1230                 obj = copy_object_no_checks (obj, queue);
1231                 if (G_UNLIKELY (old_obj == obj)) {
1232                         /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
1233                         if (!sgen_ptr_in_nursery (obj)) {
1234                                 int size_index;
1235                                 block = MS_BLOCK_FOR_OBJ (obj);
1236                                 size_index = block->obj_size_index;
1237                                 evacuate_block_obj_sizes [size_index] = FALSE;
1238                                 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1239                         }
1240                         return;
1241                 }
1242                 *ptr = obj;
1243
1244                 /*
1245                  * FIXME: See comment for copy_object_no_checks().  If
1246                  * we have that, we can let the allocation function
1247                  * give us the block info, too, and we won't have to
1248                  * re-fetch it.
1249                  *
1250                  * FIXME (2): We should rework this to avoid all those nursery checks.
1251                  */
1252                 if (!sgen_ptr_in_nursery (obj)) { /*marking a nursery object is pretty stupid.*/
1253                         block = MS_BLOCK_FOR_OBJ (obj);
1254                         MS_CALC_MARK_BIT (word, bit, obj);
1255                         DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
1256                         MS_SET_MARK_BIT (block, word, bit);
1257                 }
1258         } else {
1259                 char *forwarded;
1260 #ifdef FIXED_HEAP
1261                 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1262 #else
1263                 mword objsize;
1264
1265                 /*
1266                  * If we have don't have a fixed heap we cannot know
1267                  * whether an object is in the LOS or in the small
1268                  * object major heap without checking its size.  To do
1269                  * that, however, we need to know that we actually
1270                  * have a valid object, not a forwarding pointer, so
1271                  * we have to do this check first.
1272                  */
1273                 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1274                         *ptr = forwarded;
1275                         return;
1276                 }
1277
1278                 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
1279
1280                 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1281 #endif
1282                 {
1283                         int size_index;
1284                         gboolean evacuate;
1285
1286                         block = MS_BLOCK_FOR_OBJ (obj);
1287                         size_index = block->obj_size_index;
1288                         evacuate = evacuate_block_obj_sizes [size_index];
1289
1290 #ifdef FIXED_HEAP
1291                         /*
1292                          * We could also check for !block->has_pinned
1293                          * here, but it would only make an uncommon case
1294                          * faster, namely objects that are in blocks
1295                          * whose slot sizes are evacuated but which have
1296                          * pinned objects.
1297                          */
1298                         if (evacuate && (forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1299                                 *ptr = forwarded;
1300                                 return;
1301                         }
1302 #endif
1303
1304                         if (evacuate && !block->has_pinned) {
1305                                 g_assert (!SGEN_OBJECT_IS_PINNED (obj));
1306                                 if (block->is_to_space)
1307                                         return;
1308                                 HEAVY_STAT (++stat_major_objects_evacuated);
1309                                 goto do_copy_object;
1310                         } else {
1311                                 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1312                         }
1313                 } else {
1314                         if (SGEN_OBJECT_IS_PINNED (obj))
1315                                 return;
1316                         binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
1317                         SGEN_PIN_OBJECT (obj);
1318                         /* FIXME: only enqueue if object has references */
1319                         GRAY_OBJECT_ENQUEUE (queue, obj);
1320                 }
1321         }
1322 }
1323 #endif
1324
1325 #include "sgen-major-scan-object.h"
1326
1327 static void
1328 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
1329 {
1330         int i;
1331         int last_index = -1;
1332
1333         if (!block->pin_queue_num_entries)
1334                 return;
1335
1336         block->has_pinned = TRUE;
1337
1338         for (i = 0; i < block->pin_queue_num_entries; ++i) {
1339                 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
1340                 DEBUG (9, g_assert (index >= 0 && index < MS_BLOCK_FREE / block->obj_size));
1341                 if (index == last_index)
1342                         continue;
1343                 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
1344                 last_index = index;
1345         }
1346 }
1347
1348 static void
1349 ms_sweep (void)
1350 {
1351         int i;
1352         MSBlockInfo **iter;
1353
1354         /* statistics for evacuation */
1355         int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1356         int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1357         int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
1358
1359         for (i = 0; i < num_block_obj_sizes; ++i)
1360                 slots_available [i] = slots_used [i] = num_blocks [i] = 0;
1361
1362         /* clear all the free lists */
1363         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1364                 MSBlockInfo **free_blocks = free_block_lists [i];
1365                 int j;
1366                 for (j = 0; j < num_block_obj_sizes; ++j)
1367                         free_blocks [j] = NULL;
1368         }
1369
1370         /* traverse all blocks, free and zero unmarked objects */
1371         iter = &all_blocks;
1372         while (*iter) {
1373                 MSBlockInfo *block = *iter;
1374                 int count;
1375                 gboolean have_live = FALSE;
1376                 gboolean has_pinned;
1377                 int obj_index;
1378                 int obj_size_index;
1379
1380                 obj_size_index = block->obj_size_index;
1381
1382                 has_pinned = block->has_pinned;
1383                 block->has_pinned = block->pinned;
1384
1385                 block->is_to_space = FALSE;
1386
1387                 count = MS_BLOCK_FREE / block->obj_size;
1388                 block->free_list = NULL;
1389
1390                 for (obj_index = 0; obj_index < count; ++obj_index) {
1391                         int word, bit;
1392                         void *obj = MS_BLOCK_OBJ (block, obj_index);
1393
1394                         MS_CALC_MARK_BIT (word, bit, obj);
1395                         if (MS_MARK_BIT (block, word, bit)) {
1396                                 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
1397                                 have_live = TRUE;
1398                                 if (!has_pinned)
1399                                         ++slots_used [obj_size_index];
1400                         } else {
1401                                 /* an unmarked object */
1402                                 if (MS_OBJ_ALLOCED (obj, block)) {
1403                                         binary_protocol_empty (obj, block->obj_size);
1404                                         memset (obj, 0, block->obj_size);
1405                                 }
1406                                 *(void**)obj = block->free_list;
1407                                 block->free_list = obj;
1408                         }
1409                 }
1410
1411                 /* reset mark bits */
1412                 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
1413
1414                 /*
1415                  * FIXME: reverse free list so that it's in address
1416                  * order
1417                  */
1418
1419                 if (have_live) {
1420                         if (!has_pinned) {
1421                                 ++num_blocks [obj_size_index];
1422                                 slots_available [obj_size_index] += count;
1423                         }
1424
1425                         iter = &block->next;
1426
1427                         /*
1428                          * If there are free slots in the block, add
1429                          * the block to the corresponding free list.
1430                          */
1431                         if (block->free_list) {
1432                                 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1433                                 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1434                                 block->next_free = free_blocks [index];
1435                                 free_blocks [index] = block;
1436                         }
1437
1438                         update_heap_boundaries_for_block (block);
1439                 } else {
1440                         /*
1441                          * Blocks without live objects are removed from the
1442                          * block list and freed.
1443                          */
1444                         *iter = block->next;
1445
1446 #ifdef FIXED_HEAP
1447                         ms_free_block (block);
1448 #else
1449                         ms_free_block (block->block);
1450
1451                         sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
1452 #endif
1453
1454                         --num_major_sections;
1455                 }
1456         }
1457
1458         for (i = 0; i < num_block_obj_sizes; ++i) {
1459                 float usage = (float)slots_used [i] / (float)slots_available [i];
1460                 if (num_blocks [i] > 5 && usage < evacuation_threshold) {
1461                         evacuate_block_obj_sizes [i] = TRUE;
1462                         /*
1463                         g_print ("slot size %d - %d of %d used\n",
1464                                         block_obj_sizes [i], slots_used [i], slots_available [i]);
1465                         */
1466                 } else {
1467                         evacuate_block_obj_sizes [i] = FALSE;
1468                 }
1469         }
1470
1471         have_swept = TRUE;
1472 }
1473
1474 static mono_native_thread_return_t
1475 ms_sweep_thread_func (void *dummy)
1476 {
1477         g_assert (concurrent_sweep);
1478
1479         for (;;) {
1480                 int result;
1481
1482                 while ((result = MONO_SEM_WAIT (&ms_sweep_cmd_semaphore)) != 0) {
1483                         if (errno != EINTR)
1484                                 g_error ("MONO_SEM_WAIT FAILED with %d errno %d (%s)", result, errno, strerror (errno));
1485                 }
1486
1487                 ms_sweep ();
1488
1489                 ms_signal_sweep_done ();
1490         }
1491
1492         return NULL;
1493 }
1494
1495 static void
1496 major_sweep (void)
1497 {
1498         if (concurrent_sweep) {
1499                 g_assert (ms_sweep_thread);
1500                 ms_signal_sweep_command ();
1501         } else {
1502                 ms_sweep ();
1503         }
1504 }
1505
1506 static int count_pinned_ref;
1507 static int count_pinned_nonref;
1508 static int count_nonpinned_ref;
1509 static int count_nonpinned_nonref;
1510
1511 static void
1512 count_nonpinned_callback (char *obj, size_t size, void *data)
1513 {
1514         MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1515
1516         if (vtable->klass->has_references)
1517                 ++count_nonpinned_ref;
1518         else
1519                 ++count_nonpinned_nonref;
1520 }
1521
1522 static void
1523 count_pinned_callback (char *obj, size_t size, void *data)
1524 {
1525         MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1526
1527         if (vtable->klass->has_references)
1528                 ++count_pinned_ref;
1529         else
1530                 ++count_pinned_nonref;
1531 }
1532
1533 static void __attribute__ ((unused))
1534 count_ref_nonref_objs (void)
1535 {
1536         int total;
1537
1538         count_pinned_ref = 0;
1539         count_pinned_nonref = 0;
1540         count_nonpinned_ref = 0;
1541         count_nonpinned_nonref = 0;
1542
1543         major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
1544         major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
1545
1546         total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1547
1548         g_print ("ref: %d pinned %d non-pinned   non-ref: %d pinned %d non-pinned  --  %.1f\n",
1549                         count_pinned_ref, count_nonpinned_ref,
1550                         count_pinned_nonref, count_nonpinned_nonref,
1551                         (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1552 }
1553
1554 static int
1555 ms_calculate_block_obj_sizes (double factor, int *arr)
1556 {
1557         double target_size = sizeof (MonoObject);
1558         int num_sizes = 0;
1559         int last_size = 0;
1560
1561         do {
1562                 int target_count = ceil (MS_BLOCK_FREE / target_size);
1563                 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1564
1565                 if (size != last_size) {
1566                         if (arr)
1567                                 arr [num_sizes] = size;
1568                         ++num_sizes;
1569                         last_size = size;
1570                 }
1571
1572                 target_size *= factor;
1573         } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1574
1575         return num_sizes;
1576 }
1577
1578 /* only valid during minor collections */
1579 static int old_num_major_sections;
1580
1581 static void
1582 major_start_nursery_collection (void)
1583 {
1584         ms_wait_for_sweep_done ();
1585
1586 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1587         consistency_check ();
1588 #endif
1589
1590         old_num_major_sections = num_major_sections;
1591 }
1592
1593 static void
1594 major_finish_nursery_collection (void)
1595 {
1596 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1597         consistency_check ();
1598 #endif
1599         sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1600 }
1601
1602 static void
1603 major_start_major_collection (void)
1604 {
1605         int i;
1606
1607         ms_wait_for_sweep_done ();
1608
1609         /* clear the free lists */
1610         for (i = 0; i < num_block_obj_sizes; ++i) {
1611                 if (!evacuate_block_obj_sizes [i])
1612                         continue;
1613
1614                 free_block_lists [0][i] = NULL;
1615                 free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
1616         }
1617 }
1618
1619 static void
1620 major_finish_major_collection (void)
1621 {
1622 }
1623
1624 static void
1625 major_have_computer_minor_collection_allowance (void)
1626 {
1627 #ifndef FIXED_HEAP
1628         int section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1629
1630         g_assert (have_swept);
1631         ms_wait_for_sweep_done ();
1632         g_assert (!ms_sweep_in_progress);
1633
1634         /*
1635          * FIXME: We don't free blocks on 32 bit platforms because it
1636          * can lead to address space fragmentation, since we're
1637          * allocating blocks in larger contingents.
1638          */
1639         if (sizeof (mword) < 8)
1640                 return;
1641
1642         while (num_empty_blocks > section_reserve) {
1643                 void *next = *(void**)empty_blocks;
1644                 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
1645                 empty_blocks = next;
1646                 /*
1647                  * Needs not be atomic because this is running
1648                  * single-threaded.
1649                  */
1650                 --num_empty_blocks;
1651
1652                 ++stat_major_blocks_freed;
1653         }
1654 #endif
1655 }
1656
1657 static void
1658 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
1659 {
1660         MSBlockInfo *block;
1661
1662         FOREACH_BLOCK (block) {
1663                 block->pin_queue_start = sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
1664                                 &block->pin_queue_num_entries);
1665         } END_FOREACH_BLOCK;
1666 }
1667
1668 static void
1669 major_pin_objects (SgenGrayQueue *queue)
1670 {
1671         MSBlockInfo *block;
1672
1673         FOREACH_BLOCK (block) {
1674                 mark_pinned_objects_in_block (block, queue);
1675         } END_FOREACH_BLOCK;
1676 }
1677
1678 static void
1679 major_init_to_space (void)
1680 {
1681 }
1682
1683 static void
1684 major_report_pinned_memory_usage (void)
1685 {
1686         g_assert_not_reached ();
1687 }
1688
1689 static gint64
1690 major_get_used_size (void)
1691 {
1692         gint64 size = 0;
1693         MSBlockInfo *block;
1694
1695         FOREACH_BLOCK (block) {
1696                 int count = MS_BLOCK_FREE / block->obj_size;
1697                 void **iter;
1698                 size += count * block->obj_size;
1699                 for (iter = block->free_list; iter; iter = (void**)*iter)
1700                         size -= block->obj_size;
1701         } END_FOREACH_BLOCK;
1702
1703         return size;
1704 }
1705
1706 static int
1707 get_num_major_sections (void)
1708 {
1709         return num_major_sections;
1710 }
1711
1712 static gboolean
1713 major_handle_gc_param (const char *opt)
1714 {
1715 #ifdef FIXED_HEAP
1716         if (g_str_has_prefix (opt, "major-heap-size=")) {
1717                 const char *arg = strchr (opt, '=') + 1;
1718                 glong size;
1719                 if (!mono_gc_parse_environment_string_extract_number (arg, &size))
1720                         return FALSE;
1721                 ms_heap_num_blocks = (size + MS_BLOCK_SIZE - 1) / MS_BLOCK_SIZE;
1722                 g_assert (ms_heap_num_blocks > 0);
1723                 return TRUE;
1724         } else
1725 #endif
1726         if (g_str_has_prefix (opt, "evacuation-threshold=")) {
1727                 const char *arg = strchr (opt, '=') + 1;
1728                 int percentage = atoi (arg);
1729                 if (percentage < 0 || percentage > 100) {
1730                         fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
1731                         exit (1);
1732                 }
1733                 evacuation_threshold = (float)percentage / 100.0;
1734                 return TRUE;
1735         } else if (!strcmp (opt, "concurrent-sweep")) {
1736                 concurrent_sweep = TRUE;
1737                 return TRUE;
1738         } else if (!strcmp (opt, "no-concurrent-sweep")) {
1739                 concurrent_sweep = FALSE;
1740                 return TRUE;
1741         }
1742
1743         return FALSE;
1744 }
1745
1746 static void
1747 major_print_gc_param_usage (void)
1748 {
1749         fprintf (stderr,
1750                         ""
1751 #ifdef FIXED_HEAP
1752                         "  major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
1753 #endif
1754                         "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
1755                         "  (no-)concurrent-sweep\n"
1756                         );
1757 }
1758
1759 #ifdef SGEN_HAVE_CARDTABLE
1760 static void
1761 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
1762 {
1763         MSBlockInfo *block;
1764
1765         FOREACH_BLOCK (block) {
1766                 if (block->has_references)
1767                         callback ((mword)block->block, MS_BLOCK_SIZE);
1768         } END_FOREACH_BLOCK;
1769 }
1770
1771 #ifdef HEAVY_STATISTICS
1772 extern long long marked_cards;
1773 extern long long scanned_cards;
1774 extern long long scanned_objects;
1775 extern long long remarked_cards;
1776 #endif
1777
1778 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
1779 /*
1780  * MS blocks are 16K aligned.
1781  * Cardtables are 4K aligned, at least.
1782  * This means that the cardtable of a given block is 32 bytes aligned.
1783  */
1784 static guint8*
1785 initial_skip_card (guint8 *card_data)
1786 {
1787         mword *cards = (mword*)card_data;
1788         mword card;
1789         int i;
1790         for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
1791                 card = cards [i];
1792                 if (card)
1793                         break;
1794         }
1795
1796         if (i == CARD_WORDS_PER_BLOCK)
1797                 return card_data + CARDS_PER_BLOCK;
1798
1799 #if defined(__i386__) && defined(__GNUC__)
1800         return card_data + i * 4 +  (__builtin_ffs (card) - 1) / 8;
1801 #elif defined(__x86_64__) && defined(__GNUC__)
1802         return card_data + i * 8 +  (__builtin_ffsll (card) - 1) / 8;
1803 #elif defined(__s390x__) && defined(__GNUC__)
1804         return card_data + i * 8 +  (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1805 #else
1806         for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
1807                 if (card_data [i])
1808                         return &card_data [i];
1809         }
1810         return card_data;
1811 #endif
1812 }
1813
1814
1815 static G_GNUC_UNUSED guint8*
1816 skip_card (guint8 *card_data, guint8 *card_data_end)
1817 {
1818         while (card_data < card_data_end && !*card_data)
1819                 ++card_data;
1820         return card_data;
1821 }
1822
1823 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
1824 #define MS_BLOCK_OBJ_FAST(b,os,i)                       ((b) + MS_BLOCK_SKIP + (os) * (i))
1825 #define MS_OBJ_ALLOCED_FAST(o,b)                (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
1826
1827 static void
1828 major_scan_card_table (SgenGrayQueue *queue)
1829 {
1830         MSBlockInfo *block;
1831         ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
1832
1833         FOREACH_BLOCK (block) {
1834                 int block_obj_size;
1835                 char *block_start;
1836
1837                 if (!block->has_references)
1838                         continue;
1839
1840                 block_obj_size = block->obj_size;
1841                 block_start = block->block;
1842
1843                 if (block_obj_size >= CARD_SIZE_IN_BYTES) {
1844                         guint8 *cards;
1845 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
1846                         guint8 cards_data [CARDS_PER_BLOCK];
1847 #endif
1848                         char *obj, *end, *base;
1849
1850                         /*We can avoid the extra copy since the remark cardtable was cleaned before */
1851 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1852                         cards = sgen_card_table_get_card_scan_address ((mword)block_start);
1853 #else
1854                         cards = cards_data;
1855                         if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
1856                                 continue;
1857 #endif
1858
1859                         obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
1860                         end = block_start + MS_BLOCK_SIZE;
1861                         base = sgen_card_table_align_pointer (obj);
1862
1863                         while (obj < end) {
1864                                 if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
1865                                         int card_offset = (obj - base) >> CARD_BITS;
1866                                         sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, queue);
1867                                 }
1868                                 obj += block_obj_size;
1869                         }
1870                 } else {
1871                         guint8 *card_data, *card_base;
1872                         guint8 *card_data_end;
1873
1874                         /*
1875                          * This is safe in face of card aliasing for the following reason:
1876                          *
1877                          * Major blocks are 16k aligned, or 32 cards aligned.
1878                          * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
1879                          * sizes, they won't overflow the cardtable overlap modulus.
1880                          */
1881                         card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
1882                         card_data_end = card_data + CARDS_PER_BLOCK;
1883
1884                         for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
1885                                 int index;
1886                                 int idx = card_data - card_base;
1887                                 char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
1888                                 char *end = start + CARD_SIZE_IN_BYTES;
1889                                 char *obj;
1890
1891                                 HEAVY_STAT (++scanned_cards);
1892
1893                                 if (!*card_data)
1894                                         continue;
1895
1896                                 HEAVY_STAT (++marked_cards);
1897
1898                                 sgen_card_table_prepare_card_for_scanning (card_data);
1899
1900                                 if (idx == 0)
1901                                         index = 0;
1902                                 else
1903                                         index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
1904
1905                                 obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
1906                                 while (obj < end) {
1907                                         if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
1908                                                 HEAVY_STAT (++scanned_objects);
1909                                                 scan_func (obj, queue);
1910                                         }
1911                                         obj += block_obj_size;
1912                                 }
1913                                 HEAVY_STAT (if (*card_data) ++remarked_cards);
1914                         }
1915                 }
1916         } END_FOREACH_BLOCK;
1917 }
1918 #endif
1919
1920 static gboolean
1921 major_is_worker_thread (MonoNativeThreadId thread)
1922 {
1923         if (concurrent_sweep)
1924                 return thread == ms_sweep_thread;
1925         else
1926                 return FALSE;
1927 }
1928
1929 static void
1930 alloc_free_block_lists (MSBlockInfo ***lists)
1931 {
1932         int i;
1933         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
1934                 lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1935 }
1936
1937 #ifdef SGEN_PARALLEL_MARK
1938 static void*
1939 major_alloc_worker_data (void)
1940 {
1941         /* FIXME: free this when the workers come down */
1942         MSBlockInfo ***lists = malloc (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX);
1943         alloc_free_block_lists (lists);
1944         return lists;
1945 }
1946
1947 static void
1948 major_init_worker_thread (void *data)
1949 {
1950         MSBlockInfo ***lists = data;
1951         int i;
1952
1953         g_assert (lists && lists != free_block_lists);
1954         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1955                 int j;
1956                 for (j = 0; j < num_block_obj_sizes; ++j)
1957                         g_assert (!lists [i][j]);
1958         }
1959
1960 #ifdef HAVE_KW_THREAD
1961         workers_free_block_lists = data;
1962 #else
1963         mono_native_tls_set_value (workers_free_block_lists_key, data);
1964 #endif
1965 }
1966
1967 static void
1968 major_reset_worker_data (void *data)
1969 {
1970         MSBlockInfo ***lists = data;
1971         int i;
1972         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1973                 int j;
1974                 for (j = 0; j < num_block_obj_sizes; ++j)
1975                         lists [i][j] = NULL;
1976         }
1977 }
1978 #endif
1979
1980 #undef pthread_create
1981
1982 static void
1983 post_param_init (void)
1984 {
1985         if (concurrent_sweep) {
1986                 if (!mono_native_thread_create (&ms_sweep_thread, ms_sweep_thread_func, NULL)) {
1987                         fprintf (stderr, "Error: Could not create sweep thread.\n");
1988                         exit (1);
1989                 }
1990         }
1991 }
1992
1993 void
1994 #ifdef SGEN_PARALLEL_MARK
1995 #ifdef FIXED_HEAP
1996 sgen_marksweep_fixed_par_init
1997 #else
1998 sgen_marksweep_par_init
1999 #endif
2000 #else
2001 #ifdef FIXED_HEAP
2002 sgen_marksweep_fixed_init
2003 #else
2004 sgen_marksweep_init
2005 #endif
2006 #endif
2007         (SgenMajorCollector *collector)
2008 {
2009         int i;
2010
2011 #ifndef FIXED_HEAP
2012         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
2013 #endif
2014
2015         num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
2016         block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
2017         ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
2018
2019         evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
2020         for (i = 0; i < num_block_obj_sizes; ++i)
2021                 evacuate_block_obj_sizes [i] = FALSE;
2022
2023         /*
2024         {
2025                 int i;
2026                 g_print ("block object sizes:\n");
2027                 for (i = 0; i < num_block_obj_sizes; ++i)
2028                         g_print ("%d\n", block_obj_sizes [i]);
2029         }
2030         */
2031
2032         alloc_free_block_lists (free_block_lists);
2033
2034         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
2035                 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
2036         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
2037                 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
2038
2039 #ifdef SGEN_PARALLEL_MARK
2040         LOCK_INIT (ms_block_list_mutex);
2041 #endif
2042
2043         mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
2044         mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
2045         mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
2046         mono_counters_register ("Wait for sweep time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &stat_time_wait_for_sweep);
2047 #ifdef SGEN_PARALLEL_MARK
2048 #ifndef HAVE_KW_THREAD
2049         mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
2050 #endif
2051 #endif
2052
2053         /*
2054          * FIXME: These are superfluous if concurrent sweep is
2055          * disabled.  We might want to create them lazily.
2056          */
2057         MONO_SEM_INIT (&ms_sweep_cmd_semaphore, 0);
2058         MONO_SEM_INIT (&ms_sweep_done_semaphore, 0);
2059
2060         collector->section_size = MAJOR_SECTION_SIZE;
2061 #ifdef SGEN_PARALLEL_MARK
2062         collector->is_parallel = TRUE;
2063         collector->alloc_worker_data = major_alloc_worker_data;
2064         collector->init_worker_thread = major_init_worker_thread;
2065         collector->reset_worker_data = major_reset_worker_data;
2066 #else
2067         collector->is_parallel = FALSE;
2068 #endif
2069         collector->supports_cardtable = TRUE;
2070
2071         collector->have_swept = &have_swept;
2072
2073         collector->alloc_heap = major_alloc_heap;
2074         collector->is_object_live = major_is_object_live;
2075         collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2076         collector->alloc_degraded = major_alloc_degraded;
2077
2078         collector->alloc_object = major_alloc_object;
2079 #ifdef SGEN_PARALLEL_MARK
2080         collector->par_alloc_object = major_par_alloc_object;
2081 #endif
2082         collector->free_pinned_object = free_pinned_object;
2083         collector->iterate_objects = major_iterate_objects;
2084         collector->free_non_pinned_object = major_free_non_pinned_object;
2085         collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
2086         collector->pin_objects = major_pin_objects;
2087         collector->pin_major_object = pin_major_object;
2088 #ifdef SGEN_HAVE_CARDTABLE
2089         collector->scan_card_table = major_scan_card_table;
2090         collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
2091 #endif
2092         collector->init_to_space = major_init_to_space;
2093         collector->sweep = major_sweep;
2094         collector->check_scan_starts = major_check_scan_starts;
2095         collector->dump_heap = major_dump_heap;
2096         collector->get_used_size = major_get_used_size;
2097         collector->start_nursery_collection = major_start_nursery_collection;
2098         collector->finish_nursery_collection = major_finish_nursery_collection;
2099         collector->start_major_collection = major_start_major_collection;
2100         collector->finish_major_collection = major_finish_major_collection;
2101         collector->have_computed_minor_collection_allowance = major_have_computer_minor_collection_allowance;
2102         collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2103         collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
2104         collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2105         collector->get_num_major_sections = get_num_major_sections;
2106         collector->handle_gc_param = major_handle_gc_param;
2107         collector->print_gc_param_usage = major_print_gc_param_usage;
2108         collector->is_worker_thread = major_is_worker_thread;
2109         collector->post_param_init = post_param_init;
2110         collector->is_valid_object = major_is_valid_object;
2111
2112         collector->major_ops.copy_or_mark_object = major_copy_or_mark_object;
2113         collector->major_ops.scan_object = major_scan_object;
2114
2115 #ifdef SGEN_HAVE_CARDTABLE
2116         /*cardtable requires major pages to be 8 cards aligned*/
2117         g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2118 #endif
2119 }
2120
2121 #endif