Merge pull request #271 from pruiz/xamarin-bug-4108
[mono.git] / mono / metadata / sgen-marksweep.c
1 /*
2  * sgen-marksweep.c: Simple generational GC.
3  *
4  * Author:
5  *      Mark Probst <mark.probst@gmail.com>
6  *
7  * Copyright 2009-2010 Novell, Inc.
8  * 
9  * Permission is hereby granted, free of charge, to any person obtaining
10  * a copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sublicense, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  * 
17  * The above copyright notice and this permission notice shall be
18  * included in all copies or substantial portions of the Software.
19  * 
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
24  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  */
28
29 #include "config.h"
30
31 #ifdef HAVE_SGEN_GC
32
33 #include <math.h>
34 #include <errno.h>
35
36 #include "utils/mono-counters.h"
37 #include "utils/mono-semaphore.h"
38 #include "utils/mono-time.h"
39 #include "metadata/object-internals.h"
40 #include "metadata/profiler-private.h"
41
42 #include "metadata/sgen-gc.h"
43 #include "metadata/sgen-protocol.h"
44 #include "metadata/sgen-cardtable.h"
45 #include "metadata/gc-internal.h"
46
47 #define MS_BLOCK_SIZE   (16*1024)
48 #define MS_BLOCK_SIZE_SHIFT     14
49 #define MAJOR_SECTION_SIZE      MS_BLOCK_SIZE
50 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
51
52 #ifdef FIXED_HEAP
53 #define MS_DEFAULT_HEAP_NUM_BLOCKS      (32 * 1024) /* 512 MB */
54 #endif
55
56 /*
57  * Don't allocate single blocks, but alloc a contingent of this many
58  * blocks in one swoop.
59  */
60 #define MS_BLOCK_ALLOC_NUM      32
61
62 /*
63  * Number of bytes before the first object in a block.  At the start
64  * of a block is the MSBlockHeader, then opional padding, then come
65  * the objects, so this must be >= sizeof (MSBlockHeader).
66  */
67 #ifdef FIXED_HEAP
68 #define MS_BLOCK_SKIP   0
69 #else
70 #define MS_BLOCK_SKIP   16
71 #endif
72
73 #define MS_BLOCK_FREE   (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
74
75 #define MS_NUM_MARK_WORDS       ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
76
77 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
78 #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
79 #endif
80
81 typedef struct _MSBlockInfo MSBlockInfo;
82 struct _MSBlockInfo {
83         int obj_size;
84         int obj_size_index;
85         int pin_queue_num_entries;
86         unsigned int pinned : 1;
87         unsigned int has_references : 1;
88         unsigned int has_pinned : 1;    /* means cannot evacuate */
89         unsigned int is_to_space : 1;
90 #ifdef FIXED_HEAP
91         unsigned int used : 1;
92         unsigned int zeroed : 1;
93 #endif
94         MSBlockInfo *next;
95         char *block;
96         void **free_list;
97         MSBlockInfo *next_free;
98         void **pin_queue_start;
99         mword mark_words [MS_NUM_MARK_WORDS];
100 };
101
102 #ifdef FIXED_HEAP
103 static int ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
104
105 static char *ms_heap_start;
106 static char *ms_heap_end;
107
108 #define MS_PTR_IN_SMALL_MAJOR_HEAP(p)   ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
109
110 /* array of all all block infos in the system */
111 static MSBlockInfo *block_infos;
112 #endif
113
114 #define MS_BLOCK_OBJ(b,i)               ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
115 #define MS_BLOCK_DATA_FOR_OBJ(o)        ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
116
117 #ifdef FIXED_HEAP
118 #define MS_BLOCK_FOR_OBJ(o)             (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
119 #else
120 typedef struct {
121         MSBlockInfo *info;
122 } MSBlockHeader;
123
124 #define MS_BLOCK_FOR_OBJ(o)             (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
125 #endif
126
127 #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
128
129 #define MS_CALC_MARK_BIT(w,b,o)         do {                            \
130                 int i = ((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o))) >> SGEN_ALLOC_ALIGN_BITS; \
131                 if (sizeof (mword) == 4) {                              \
132                         (w) = i >> 5;                                   \
133                         (b) = i & 31;                                   \
134                 } else {                                                \
135                         (w) = i >> 6;                                   \
136                         (b) = i & 63;                                   \
137                 }                                                       \
138         } while (0)
139
140 #define MS_MARK_BIT(bl,w,b)     ((bl)->mark_words [(w)] & (1L << (b)))
141 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
142 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b)  do {                    \
143                 mword __old = (bl)->mark_words [(w)];                   \
144                 mword __bitmask = 1L << (b);                            \
145                 if (__old & __bitmask) {                                \
146                         was_marked = TRUE;                              \
147                         break;                                          \
148                 }                                                       \
149                 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)],   \
150                                                 (gpointer)(__old | __bitmask), \
151                                                 (gpointer)__old) ==     \
152                                 (gpointer)__old) {                      \
153                         was_marked = FALSE;                             \
154                         break;                                          \
155                 }                                                       \
156         } while (1)
157
158 #define MS_OBJ_ALLOCED(o,b)     (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
159
160 #define MS_BLOCK_OBJ_SIZE_FACTOR        (sqrt (2.0))
161
162 /*
163  * This way we can lookup block object size indexes for sizes up to
164  * 256 bytes with a single load.
165  */
166 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES      32
167
168 static int *block_obj_sizes;
169 static int num_block_obj_sizes;
170 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
171
172 #define MS_BLOCK_FLAG_PINNED    1
173 #define MS_BLOCK_FLAG_REFS      2
174
175 #define MS_BLOCK_TYPE_MAX       4
176
177 #ifdef SGEN_PARALLEL_MARK
178 static LOCK_DECLARE (ms_block_list_mutex);
179 #define LOCK_MS_BLOCK_LIST mono_mutex_lock (&ms_block_list_mutex)
180 #define UNLOCK_MS_BLOCK_LIST mono_mutex_unlock (&ms_block_list_mutex)
181 #endif
182
183 static gboolean *evacuate_block_obj_sizes;
184 static float evacuation_threshold = 0.666;
185
186 static gboolean concurrent_sweep = FALSE;
187 static gboolean have_swept;
188
189 /* all allocated blocks in the system */
190 static MSBlockInfo *all_blocks;
191
192 #ifdef FIXED_HEAP
193 /* non-allocated block free-list */
194 static MSBlockInfo *empty_blocks = NULL;
195 #else
196 /* non-allocated block free-list */
197 static void *empty_blocks = NULL;
198 static int num_empty_blocks = 0;
199 #endif
200
201 #define FOREACH_BLOCK(bl)       for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
202 #define END_FOREACH_BLOCK       }
203
204 static int num_major_sections = 0;
205 /* one free block list for each block object size */
206 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
207
208 #ifdef SGEN_PARALLEL_MARK
209 #ifdef HAVE_KW_THREAD
210 static __thread MSBlockInfo ***workers_free_block_lists;
211 #else
212 static MonoNativeTlsKey workers_free_block_lists_key;
213 #endif
214 #endif
215
216 static long long stat_major_blocks_alloced = 0;
217 static long long stat_major_blocks_freed = 0;
218 static long long stat_major_objects_evacuated = 0;
219 static long long stat_time_wait_for_sweep = 0;
220
221 static gboolean ms_sweep_in_progress = FALSE;
222 static MonoNativeThreadId ms_sweep_thread;
223 static MonoSemType ms_sweep_cmd_semaphore;
224 static MonoSemType ms_sweep_done_semaphore;
225
226 static void
227 ms_signal_sweep_command (void)
228 {
229         if (!concurrent_sweep)
230                 return;
231
232         g_assert (!ms_sweep_in_progress);
233         ms_sweep_in_progress = TRUE;
234         MONO_SEM_POST (&ms_sweep_cmd_semaphore);
235 }
236
237 static void
238 ms_signal_sweep_done (void)
239 {
240         if (!concurrent_sweep)
241                 return;
242
243         MONO_SEM_POST (&ms_sweep_done_semaphore);
244 }
245
246 static void
247 ms_wait_for_sweep_done (void)
248 {
249         SGEN_TV_DECLARE (atv);
250         SGEN_TV_DECLARE (btv);
251         int result;
252
253         if (!concurrent_sweep)
254                 return;
255
256         if (!ms_sweep_in_progress)
257                 return;
258
259         SGEN_TV_GETTIME (atv);
260         while ((result = MONO_SEM_WAIT (&ms_sweep_done_semaphore)) != 0) {
261                 if (errno != EINTR)
262                         g_error ("MONO_SEM_WAIT");
263         }
264         SGEN_TV_GETTIME (btv);
265         stat_time_wait_for_sweep += SGEN_TV_ELAPSED (atv, btv);
266
267         g_assert (ms_sweep_in_progress);
268         ms_sweep_in_progress = FALSE;
269 }
270
271 static int
272 ms_find_block_obj_size_index (int size)
273 {
274         int i;
275         DEBUG (9, g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE));
276         for (i = 0; i < num_block_obj_sizes; ++i)
277                 if (block_obj_sizes [i] >= size)
278                         return i;
279         g_error ("no object of size %d\n", size);
280 }
281
282 #define FREE_BLOCKS_FROM(lists,p,r)     (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
283 #define FREE_BLOCKS(p,r)                (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
284 #ifdef SGEN_PARALLEL_MARK
285 #ifdef HAVE_KW_THREAD
286 #define FREE_BLOCKS_LOCAL(p,r)          (FREE_BLOCKS_FROM (workers_free_block_lists, (p), (r)))
287 #else
288 #define FREE_BLOCKS_LOCAL(p,r)          (FREE_BLOCKS_FROM (((MSBlockInfo***)(mono_native_tls_get_value (workers_free_block_lists_key))), (p), (r)))
289 #endif
290 #else
291 //#define FREE_BLOCKS_LOCAL(p,r)                (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
292 #endif
293
294 #define MS_BLOCK_OBJ_SIZE_INDEX(s)                              \
295         (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ?      \
296          fast_block_obj_size_indexes [((s)+7)>>3] :             \
297          ms_find_block_obj_size_index ((s)))
298
299 #ifdef FIXED_HEAP
300 static void*
301 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
302 {
303         char *nursery_start;
304         mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
305         mword alloc_size = nursery_size + major_heap_size;
306         int i;
307
308         g_assert (ms_heap_num_blocks > 0);
309         g_assert (nursery_size % MS_BLOCK_SIZE == 0);
310         if (nursery_align)
311                 g_assert (nursery_align % MS_BLOCK_SIZE == 0);
312
313         nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, TRUE);
314         ms_heap_start = nursery_start + nursery_size;
315         ms_heap_end = ms_heap_start + major_heap_size;
316
317         block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO);
318
319         for (i = 0; i < ms_heap_num_blocks; ++i) {
320                 block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
321                 if (i < ms_heap_num_blocks - 1)
322                         block_infos [i].next_free = &block_infos [i + 1];
323                 else
324                         block_infos [i].next_free = NULL;
325                 block_infos [i].zeroed = TRUE;
326         }
327
328         empty_blocks = &block_infos [0];
329
330         return nursery_start;
331 }
332 #else
333 static void*
334 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
335 {
336         char *start;
337         if (nursery_align)
338                 start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, TRUE);
339         else
340                 start = sgen_alloc_os_memory (nursery_size, TRUE);
341
342         return start;
343 }
344 #endif
345
346 static void
347 update_heap_boundaries_for_block (MSBlockInfo *block)
348 {
349         sgen_update_heap_boundaries ((mword)block->block, (mword)block->block + MS_BLOCK_SIZE);
350 }
351
352 #ifdef FIXED_HEAP
353 static MSBlockInfo*
354 ms_get_empty_block (void)
355 {
356         MSBlockInfo *block;
357
358         g_assert (empty_blocks);
359
360         do {
361                 block = empty_blocks;
362         } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block->next_free, block) != block);
363
364         block->used = TRUE;
365
366         if (!block->zeroed)
367                 memset (block->block, 0, MS_BLOCK_SIZE);
368
369         return block;
370 }
371
372 static void
373 ms_free_block (MSBlockInfo *block)
374 {
375         block->next_free = empty_blocks;
376         empty_blocks = block;
377         block->used = FALSE;
378         block->zeroed = FALSE;
379         sgen_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
380 }
381 #else
382 static void*
383 ms_get_empty_block (void)
384 {
385         char *p;
386         int i;
387         void *block, *empty, *next;
388
389  retry:
390         if (!empty_blocks) {
391                 p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
392
393                 for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
394                         block = p;
395                         /*
396                          * We do the free list update one after the
397                          * other so that other threads can use the new
398                          * blocks as quickly as possible.
399                          */
400                         do {
401                                 empty = empty_blocks;
402                                 *(void**)block = empty;
403                         } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
404                         p += MS_BLOCK_SIZE;
405                 }
406
407                 SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
408
409                 stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
410         }
411
412         do {
413                 empty = empty_blocks;
414                 if (!empty)
415                         goto retry;
416                 block = empty;
417                 next = *(void**)block;
418         } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
419
420         SGEN_ATOMIC_ADD (num_empty_blocks, -1);
421
422         *(void**)block = NULL;
423
424         g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
425
426         return block;
427 }
428
429 static void
430 ms_free_block (void *block)
431 {
432         void *empty;
433
434         sgen_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
435         memset (block, 0, MS_BLOCK_SIZE);
436
437         do {
438                 empty = empty_blocks;
439                 *(void**)block = empty;
440         } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
441
442         SGEN_ATOMIC_ADD (num_empty_blocks, 1);
443 }
444 #endif
445
446 //#define MARKSWEEP_CONSISTENCY_CHECK
447
448 #ifdef MARKSWEEP_CONSISTENCY_CHECK
449 static void
450 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
451 {
452         MSBlockInfo *b;
453
454         for (; block; block = block->next_free) {
455                 g_assert (block->obj_size == size);
456                 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
457
458                 /* blocks in the free lists must have at least
459                    one free slot */
460                 g_assert (block->free_list);
461
462 #ifdef FIXED_HEAP
463                 /* the block must not be in the empty_blocks list */
464                 for (b = empty_blocks; b; b = b->next_free)
465                         g_assert (b != block);
466 #endif
467                 /* the block must be in the all_blocks list */
468                 for (b = all_blocks; b; b = b->next) {
469                         if (b == block)
470                                 break;
471                 }
472                 g_assert (b == block);
473         }
474 }
475
476 static void
477 check_empty_blocks (void)
478 {
479 #ifndef FIXED_HEAP
480         void *p;
481         int i = 0;
482         for (p = empty_blocks; p; p = *(void**)p)
483                 ++i;
484         g_assert (i == num_empty_blocks);
485 #endif
486 }
487
488 static void
489 consistency_check (void)
490 {
491         MSBlockInfo *block;
492         int i;
493
494         /* check all blocks */
495         FOREACH_BLOCK (block) {
496                 int count = MS_BLOCK_FREE / block->obj_size;
497                 int num_free = 0;
498                 void **free;
499
500 #ifndef FIXED_HEAP
501                 /* check block header */
502                 g_assert (((MSBlockHeader*)block->block)->info == block);
503 #endif
504
505                 /* count number of free slots */
506                 for (i = 0; i < count; ++i) {
507                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
508                         if (!MS_OBJ_ALLOCED (obj, block))
509                                 ++num_free;
510                 }
511
512                 /* check free list */
513                 for (free = block->free_list; free; free = (void**)*free) {
514                         g_assert (MS_BLOCK_FOR_OBJ (free) == block);
515                         --num_free;
516                 }
517                 g_assert (num_free == 0);
518
519                 /* check all mark words are zero */
520                 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
521                         g_assert (block->mark_words [i] == 0);
522         } END_FOREACH_BLOCK;
523
524         /* check free blocks */
525         for (i = 0; i < num_block_obj_sizes; ++i) {
526                 int j;
527                 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
528                         check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
529         }
530
531         check_empty_blocks ();
532 }
533 #endif
534
535 static gboolean
536 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
537 {
538         int size = block_obj_sizes [size_index];
539         int count = MS_BLOCK_FREE / size;
540         MSBlockInfo *info;
541 #ifdef SGEN_PARALLEL_MARK
542         MSBlockInfo *next;
543 #endif
544 #ifndef FIXED_HEAP
545         MSBlockHeader *header;
546 #endif
547         MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
548         char *obj_start;
549         int i;
550
551         if (!sgen_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
552                 return FALSE;
553
554 #ifdef FIXED_HEAP
555         info = ms_get_empty_block ();
556 #else
557         info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
558 #endif
559
560         DEBUG (9, g_assert (count >= 2));
561
562         info->obj_size = size;
563         info->obj_size_index = size_index;
564         info->pinned = pinned;
565         info->has_references = has_references;
566         info->has_pinned = pinned;
567         info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD); /*FIXME WHY??? */
568 #ifndef FIXED_HEAP
569         info->block = ms_get_empty_block ();
570
571         header = (MSBlockHeader*) info->block;
572         header->info = info;
573 #endif
574
575         update_heap_boundaries_for_block (info);
576
577         /* build free list */
578         obj_start = info->block + MS_BLOCK_SKIP;
579         info->free_list = (void**)obj_start;
580         /* we're skipping the last one - it must be nulled */
581         for (i = 0; i < count - 1; ++i) {
582                 char *next_obj_start = obj_start + size;
583                 *(void**)obj_start = next_obj_start;
584                 obj_start = next_obj_start;
585         }
586         /* the last one */
587         *(void**)obj_start = NULL;
588
589 #ifdef SGEN_PARALLEL_MARK
590         do {
591                 next = info->next_free = free_blocks [size_index];
592         } while (SGEN_CAS_PTR ((void**)&free_blocks [size_index], info, next) != next);
593
594         do {
595                 next = info->next = all_blocks;
596         } while (SGEN_CAS_PTR ((void**)&all_blocks, info, next) != next);
597 #else
598         info->next_free = free_blocks [size_index];
599         free_blocks [size_index] = info;
600
601         info->next = all_blocks;
602         all_blocks = info;
603 #endif
604
605         ++num_major_sections;
606         return TRUE;
607 }
608
609 static gboolean
610 obj_is_from_pinned_alloc (char *ptr)
611 {
612         MSBlockInfo *block;
613
614         FOREACH_BLOCK (block) {
615                 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
616                         return block->pinned;
617         } END_FOREACH_BLOCK;
618         return FALSE;
619 }
620
621 static void*
622 unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
623 {
624         MSBlockInfo *block;
625         void *obj;
626
627         block = free_blocks [size_index];
628         DEBUG (9, g_assert (block));
629
630         obj = block->free_list;
631         DEBUG (9, g_assert (obj));
632
633         block->free_list = *(void**)obj;
634         if (!block->free_list) {
635                 free_blocks [size_index] = block->next_free;
636                 block->next_free = NULL;
637         }
638
639         return obj;
640 }
641
642 #ifdef SGEN_PARALLEL_MARK
643 static gboolean
644 try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks, int size_index)
645 {
646         /*
647          * No more free slots in the block, so try to free the block.
648          * Don't try again if we don't succeed - another thread will
649          * already have done it.
650          */
651         MSBlockInfo *next_block = block->next_free;
652         if (SGEN_CAS_PTR ((void**)&free_blocks [size_index], next_block, block) == block) {
653                 /*
654                 void *old = SGEN_CAS_PTR ((void**)&block->next_free, NULL, next_block);
655                 g_assert (old == next_block);
656                 */
657                 block->next_free = NULL;
658                 return TRUE;
659         }
660         return FALSE;
661 }
662
663 static void*
664 alloc_obj_par (int size, gboolean pinned, gboolean has_references)
665 {
666         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
667         MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
668         MSBlockInfo *block;
669         void *obj;
670
671         DEBUG (9, g_assert (!ms_sweep_in_progress));
672         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
673
674         if (free_blocks_local [size_index]) {
675         get_slot:
676                 obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
677         } else {
678                 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
679
680         get_block:
681                 block = free_blocks [size_index];
682                 if (block) {
683                         if (!try_remove_block_from_free_list (block, free_blocks, size_index))
684                                 goto get_block;
685
686                         g_assert (block->next_free == NULL);
687                         g_assert (block->free_list);
688                         block->next_free = free_blocks_local [size_index];
689                         free_blocks_local [size_index] = block;
690
691                         goto get_slot;
692                 } else {
693                         gboolean success;
694
695                         LOCK_MS_BLOCK_LIST;
696                         success = ms_alloc_block (size_index, pinned, has_references);
697                         UNLOCK_MS_BLOCK_LIST;
698
699                         if (G_UNLIKELY (!success))
700                                 return NULL;
701
702                         goto get_block;
703                 }
704         }
705
706         /*
707          * FIXME: This should not be necessary because it'll be
708          * overwritten by the vtable immediately.
709          */
710         *(void**)obj = NULL;
711
712         return obj;
713 }
714
715 static void*
716 major_par_alloc_object (int size, gboolean has_references)
717 {
718         return alloc_obj_par (size, FALSE, has_references);
719 }
720 #endif
721
722 static void*
723 alloc_obj (int size, gboolean pinned, gboolean has_references)
724 {
725         int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
726         MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
727         void *obj;
728
729 #ifdef SGEN_PARALLEL_MARK
730         DEBUG (9, g_assert (current_collection_generation != GENERATION_OLD));
731 #endif
732
733         DEBUG (9, g_assert (!ms_sweep_in_progress));
734
735         if (!free_blocks [size_index]) {
736                 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
737                         return NULL;
738         }
739
740         obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
741
742         /*
743          * FIXME: This should not be necessary because it'll be
744          * overwritten by the vtable immediately.
745          */
746         *(void**)obj = NULL;
747
748         return obj;
749 }
750
751 static void*
752 major_alloc_object (int size, gboolean has_references)
753 {
754         return alloc_obj (size, FALSE, has_references);
755 }
756
757 /*
758  * We're not freeing the block if it's empty.  We leave that work for
759  * the next major collection.
760  *
761  * This is just called from the domain clearing code, which runs in a
762  * single thread and has the GC lock, so we don't need an extra lock.
763  */
764 static void
765 free_object (char *obj, size_t size, gboolean pinned)
766 {
767         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
768         int word, bit;
769         DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
770         DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
771         MS_CALC_MARK_BIT (word, bit, obj);
772         DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
773         if (!block->free_list) {
774                 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
775                 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
776                 DEBUG (9, g_assert (!block->next_free));
777                 block->next_free = free_blocks [size_index];
778                 free_blocks [size_index] = block;
779         }
780         memset (obj, 0, size);
781         *(void**)obj = block->free_list;
782         block->free_list = (void**)obj;
783 }
784
785 static void
786 major_free_non_pinned_object (char *obj, size_t size)
787 {
788         free_object (obj, size, FALSE);
789 }
790
791 /* size is a multiple of SGEN_ALLOC_ALIGN */
792 static void*
793 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
794 {
795         void *res;
796
797         ms_wait_for_sweep_done ();
798
799         res = alloc_obj (size, TRUE, has_references);
800          /*If we failed to alloc memory, we better try releasing memory
801           *as pinned alloc is requested by the runtime.
802           */
803          if (!res) {
804                  sgen_collect_major_no_lock ("pinned alloc failure");
805                  res = alloc_obj (size, TRUE, has_references);
806          }
807          return res;
808 }
809
810 static void
811 free_pinned_object (char *obj, size_t size)
812 {
813         free_object (obj, size, TRUE);
814 }
815
816 /*
817  * size is already rounded up and we hold the GC lock.
818  */
819 static void*
820 major_alloc_degraded (MonoVTable *vtable, size_t size)
821 {
822         void *obj;
823         int old_num_sections;
824
825         ms_wait_for_sweep_done ();
826
827         old_num_sections = num_major_sections;
828
829         obj = alloc_obj (size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
830         if (G_LIKELY (obj)) {
831                 *(MonoVTable**)obj = vtable;
832                 HEAVY_STAT (++stat_objects_alloced_degraded);
833                 HEAVY_STAT (stat_bytes_alloced_degraded += size);
834                 g_assert (num_major_sections >= old_num_sections);
835                 sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
836         }
837         return obj;
838 }
839
840 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj)   FALSE
841
842 /*
843  * obj is some object.  If it's not in the major heap (i.e. if it's in
844  * the nursery or LOS), return FALSE.  Otherwise return whether it's
845  * been marked or copied.
846  */
847 static gboolean
848 major_is_object_live (char *obj)
849 {
850         MSBlockInfo *block;
851         int word, bit;
852 #ifndef FIXED_HEAP
853         mword objsize;
854 #endif
855
856         if (sgen_ptr_in_nursery (obj))
857                 return FALSE;
858
859 #ifdef FIXED_HEAP
860         /* LOS */
861         if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
862                 return FALSE;
863 #else
864         objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
865
866         /* LOS */
867         if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
868                 return FALSE;
869 #endif
870
871         /* now we know it's in a major block */
872         block = MS_BLOCK_FOR_OBJ (obj);
873         DEBUG (9, g_assert (!block->pinned));
874         MS_CALC_MARK_BIT (word, bit, obj);
875         return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
876 }
877
878 static gboolean
879 major_ptr_is_in_non_pinned_space (char *ptr)
880 {
881         MSBlockInfo *block;
882
883         FOREACH_BLOCK (block) {
884                 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
885                         return !block->pinned;
886         } END_FOREACH_BLOCK;
887         return FALSE;
888 }
889
890 static void
891 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
892 {
893         MSBlockInfo *block;
894
895         ms_wait_for_sweep_done ();
896
897         FOREACH_BLOCK (block) {
898                 int count = MS_BLOCK_FREE / block->obj_size;
899                 int i;
900
901                 if (block->pinned && !pinned)
902                         continue;
903                 if (!block->pinned && !non_pinned)
904                         continue;
905
906                 for (i = 0; i < count; ++i) {
907                         void **obj = (void**) MS_BLOCK_OBJ (block, i);
908                         if (MS_OBJ_ALLOCED (obj, block))
909                                 callback ((char*)obj, block->obj_size, data);
910                 }
911         } END_FOREACH_BLOCK;
912 }
913
914 static void
915 major_check_scan_starts (void)
916 {
917 }
918
919 static void
920 major_dump_heap (FILE *heap_dump_file)
921 {
922         MSBlockInfo *block;
923         int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
924         int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
925         int i;
926
927         for (i = 0; i < num_block_obj_sizes; ++i)
928                 slots_available [i] = slots_used [i] = 0;
929
930         FOREACH_BLOCK (block) {
931                 int index = ms_find_block_obj_size_index (block->obj_size);
932                 int count = MS_BLOCK_FREE / block->obj_size;
933
934                 slots_available [index] += count;
935                 for (i = 0; i < count; ++i) {
936                         if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
937                                 ++slots_used [index];
938                 }
939         } END_FOREACH_BLOCK;
940
941         fprintf (heap_dump_file, "<occupancies>\n");
942         for (i = 0; i < num_block_obj_sizes; ++i) {
943                 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
944                                 block_obj_sizes [i], slots_available [i], slots_used [i]);
945         }
946         fprintf (heap_dump_file, "</occupancies>\n");
947
948         FOREACH_BLOCK (block) {
949                 int count = MS_BLOCK_FREE / block->obj_size;
950                 int i;
951                 int start = -1;
952
953                 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
954
955                 for (i = 0; i <= count; ++i) {
956                         if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
957                                 if (start < 0)
958                                         start = i;
959                         } else {
960                                 if (start >= 0) {
961                                         sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
962                                         start = -1;
963                                 }
964                         }
965                 }
966
967                 fprintf (heap_dump_file, "</section>\n");
968         } END_FOREACH_BLOCK;
969 }
970
971 #define LOAD_VTABLE     SGEN_LOAD_VTABLE
972
973 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do {        \
974                 int __word, __bit;                                      \
975                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
976                 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
977                         MS_SET_MARK_BIT ((block), __word, __bit);       \
978                         if ((block)->has_references)                    \
979                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
980                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
981                 }                                                       \
982         } while (0)
983 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {                \
984                 int __word, __bit;                                      \
985                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
986                 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
987                 if (!MS_MARK_BIT ((block), __word, __bit)) {            \
988                         MS_SET_MARK_BIT ((block), __word, __bit);       \
989                         if ((block)->has_references)                    \
990                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
991                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
992                 }                                                       \
993         } while (0)
994 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do {            \
995                 int __word, __bit;                                      \
996                 gboolean __was_marked;                                  \
997                 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block))));  \
998                 MS_CALC_MARK_BIT (__word, __bit, (obj));                \
999                 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
1000                 if (!__was_marked) {                                    \
1001                         if ((block)->has_references)                    \
1002                                 GRAY_OBJECT_ENQUEUE ((queue), (obj));   \
1003                         binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1004                 }                                                       \
1005         } while (0)
1006
1007 static void
1008 pin_major_object (char *obj, SgenGrayQueue *queue)
1009 {
1010         MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
1011         block->has_pinned = TRUE;
1012         MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1013 }
1014
1015 #include "sgen-major-copy-object.h"
1016
1017 #ifdef SGEN_PARALLEL_MARK
1018 static void
1019 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
1020 {
1021         void *obj = *ptr;
1022         mword objsize;
1023         MSBlockInfo *block;
1024         MonoVTable *vt;
1025
1026         HEAVY_STAT (++stat_copy_object_called_major);
1027
1028         DEBUG (9, g_assert (obj));
1029         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
1030
1031         if (sgen_ptr_in_nursery (obj)) {
1032                 int word, bit;
1033                 gboolean has_references;
1034                 void *destination;
1035                 mword vtable_word = *(mword*)obj;
1036                 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1037
1038                 if (vtable_word & SGEN_FORWARDED_BIT) {
1039                         *ptr = (void*)vt;
1040                         return;
1041                 }
1042
1043                 if (vtable_word & SGEN_PINNED_BIT)
1044                         return;
1045
1046                 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1047                 if (sgen_nursery_is_to_space (obj))
1048                         return;
1049
1050                 HEAVY_STAT (++stat_objects_copied_major);
1051
1052         do_copy_object:
1053                 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1054                 has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
1055
1056                 destination = sgen_minor_collector.par_alloc_for_promotion (obj, objsize, has_references);
1057                 if (G_UNLIKELY (!destination)) {
1058                         if (!sgen_ptr_in_nursery (obj)) {
1059                                 int size_index;
1060                                 block = MS_BLOCK_FOR_OBJ (obj);
1061                                 size_index = block->obj_size_index;
1062                                 evacuate_block_obj_sizes [size_index] = FALSE;
1063                         }
1064
1065                         sgen_parallel_pin_or_update (ptr, obj, vt, queue);
1066                         sgen_set_pinned_from_failed_allocation (objsize);
1067                         return;
1068                 }
1069
1070                 /*
1071                  * We do this before the CAS because we want to make
1072                  * sure that if another thread sees the destination
1073                  * pointer the VTable is already in place.  Not doing
1074                  * this can crash binary protocols.
1075                  */
1076                 *(MonoVTable**)destination = vt;
1077
1078                 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
1079                         gboolean was_marked;
1080
1081                         par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
1082                         obj = destination;
1083                         *ptr = obj;
1084
1085                         /*
1086                          * FIXME: If we make major_alloc_object() give
1087                          * us the block info, too, we won't have to
1088                          * re-fetch it here.
1089                          *
1090                          * FIXME (2): We should rework this to avoid all those nursery checks.
1091                          */
1092                         if (!sgen_ptr_in_nursery (obj)) { /*marking a nursery object is pretty stupid.*/
1093                                 block = MS_BLOCK_FOR_OBJ (obj);
1094                                 MS_CALC_MARK_BIT (word, bit, obj);
1095                                 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
1096                                 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
1097                         }
1098                 } else {
1099                         /*
1100                          * FIXME: We have allocated destination, but
1101                          * we cannot use it.  Give it back to the
1102                          * allocator.
1103                          */
1104                         *(void**)destination = NULL;
1105
1106                         vtable_word = *(mword*)obj;
1107                         g_assert (vtable_word & SGEN_FORWARDED_BIT);
1108
1109                         obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1110
1111                         *ptr = obj;
1112
1113                         HEAVY_STAT (++stat_slots_allocated_in_vain);
1114                 }
1115         } else {
1116 #ifdef FIXED_HEAP
1117                 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1118 #else
1119                 mword vtable_word = *(mword*)obj;
1120                 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1121
1122                 /* see comment in the non-parallel version below */
1123                 if (vtable_word & SGEN_FORWARDED_BIT) {
1124                         *ptr = (void*)vt;
1125                         return;
1126                 }
1127                 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1128
1129                 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1130 #endif
1131                 {
1132                         int size_index;
1133
1134                         block = MS_BLOCK_FOR_OBJ (obj);
1135                         size_index = block->obj_size_index;
1136
1137                         if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
1138                                 if (block->is_to_space)
1139                                         return;
1140
1141 #ifdef FIXED_HEAP
1142                                 {
1143                                         mword vtable_word = *(mword*)obj;
1144                                         vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1145
1146                                         if (vtable_word & SGEN_FORWARDED_BIT) {
1147                                                 *ptr = (void*)vt;
1148                                                 return;
1149                                         }
1150                                 }
1151 #endif
1152
1153                                 HEAVY_STAT (++stat_major_objects_evacuated);
1154                                 goto do_copy_object;
1155                         }
1156
1157                         MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1158                 } else {
1159 #ifdef FIXED_HEAP
1160                         mword vtable_word = *(mword*)obj;
1161                         vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1162 #endif
1163
1164                         if (vtable_word & SGEN_PINNED_BIT)
1165                                 return;
1166                         binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
1167                         if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
1168                                 if (SGEN_VTABLE_HAS_REFERENCES (vt))
1169                                         GRAY_OBJECT_ENQUEUE (queue, obj);
1170                         } else {
1171                                 g_assert (SGEN_OBJECT_IS_PINNED (obj));
1172                         }
1173                 }
1174         }
1175 }
1176 #else
1177 static void
1178 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
1179 {
1180         void *obj = *ptr;
1181         MSBlockInfo *block;
1182
1183         HEAVY_STAT (++stat_copy_object_called_major);
1184
1185         DEBUG (9, g_assert (obj));
1186         DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
1187
1188         if (sgen_ptr_in_nursery (obj)) {
1189                 int word, bit;
1190                 char *forwarded, *old_obj;
1191
1192                 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1193                         *ptr = forwarded;
1194                         return;
1195                 }
1196                 if (SGEN_OBJECT_IS_PINNED (obj))
1197                         return;
1198
1199                 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1200                 if (sgen_nursery_is_to_space (obj))
1201                         return;
1202
1203                 HEAVY_STAT (++stat_objects_copied_major);
1204
1205         do_copy_object:
1206                 old_obj = obj;
1207                 obj = copy_object_no_checks (obj, queue);
1208                 if (G_UNLIKELY (old_obj == obj)) {
1209                         /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
1210                         if (!sgen_ptr_in_nursery (obj)) {
1211                                 int size_index;
1212                                 block = MS_BLOCK_FOR_OBJ (obj);
1213                                 size_index = block->obj_size_index;
1214                                 evacuate_block_obj_sizes [size_index] = FALSE;
1215                                 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1216                         }
1217                         return;
1218                 }
1219                 *ptr = obj;
1220
1221                 /*
1222                  * FIXME: See comment for copy_object_no_checks().  If
1223                  * we have that, we can let the allocation function
1224                  * give us the block info, too, and we won't have to
1225                  * re-fetch it.
1226                  *
1227                  * FIXME (2): We should rework this to avoid all those nursery checks.
1228                  */
1229                 if (!sgen_ptr_in_nursery (obj)) { /*marking a nursery object is pretty stupid.*/
1230                         block = MS_BLOCK_FOR_OBJ (obj);
1231                         MS_CALC_MARK_BIT (word, bit, obj);
1232                         DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
1233                         MS_SET_MARK_BIT (block, word, bit);
1234                 }
1235         } else {
1236                 char *forwarded;
1237 #ifdef FIXED_HEAP
1238                 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1239 #else
1240                 mword objsize;
1241
1242                 /*
1243                  * If we have don't have a fixed heap we cannot know
1244                  * whether an object is in the LOS or in the small
1245                  * object major heap without checking its size.  To do
1246                  * that, however, we need to know that we actually
1247                  * have a valid object, not a forwarding pointer, so
1248                  * we have to do this check first.
1249                  */
1250                 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1251                         *ptr = forwarded;
1252                         return;
1253                 }
1254
1255                 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
1256
1257                 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1258 #endif
1259                 {
1260                         int size_index;
1261                         gboolean evacuate;
1262
1263                         block = MS_BLOCK_FOR_OBJ (obj);
1264                         size_index = block->obj_size_index;
1265                         evacuate = evacuate_block_obj_sizes [size_index];
1266
1267 #ifdef FIXED_HEAP
1268                         /*
1269                          * We could also check for !block->has_pinned
1270                          * here, but it would only make an uncommon case
1271                          * faster, namely objects that are in blocks
1272                          * whose slot sizes are evacuated but which have
1273                          * pinned objects.
1274                          */
1275                         if (evacuate && (forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1276                                 *ptr = forwarded;
1277                                 return;
1278                         }
1279 #endif
1280
1281                         if (evacuate && !block->has_pinned) {
1282                                 g_assert (!SGEN_OBJECT_IS_PINNED (obj));
1283                                 if (block->is_to_space)
1284                                         return;
1285                                 HEAVY_STAT (++stat_major_objects_evacuated);
1286                                 goto do_copy_object;
1287                         } else {
1288                                 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1289                         }
1290                 } else {
1291                         if (SGEN_OBJECT_IS_PINNED (obj))
1292                                 return;
1293                         binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
1294                         SGEN_PIN_OBJECT (obj);
1295                         /* FIXME: only enqueue if object has references */
1296                         GRAY_OBJECT_ENQUEUE (queue, obj);
1297                 }
1298         }
1299 }
1300 #endif
1301
1302 #include "sgen-major-scan-object.h"
1303
1304 static void
1305 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
1306 {
1307         int i;
1308         int last_index = -1;
1309
1310         if (!block->pin_queue_num_entries)
1311                 return;
1312
1313         block->has_pinned = TRUE;
1314
1315         for (i = 0; i < block->pin_queue_num_entries; ++i) {
1316                 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
1317                 DEBUG (9, g_assert (index >= 0 && index < MS_BLOCK_FREE / block->obj_size));
1318                 if (index == last_index)
1319                         continue;
1320                 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
1321                 last_index = index;
1322         }
1323 }
1324
1325 static void
1326 ms_sweep (void)
1327 {
1328         int i;
1329         MSBlockInfo **iter;
1330
1331         /* statistics for evacuation */
1332         int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1333         int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1334         int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
1335
1336         for (i = 0; i < num_block_obj_sizes; ++i)
1337                 slots_available [i] = slots_used [i] = num_blocks [i] = 0;
1338
1339         /* clear all the free lists */
1340         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1341                 MSBlockInfo **free_blocks = free_block_lists [i];
1342                 int j;
1343                 for (j = 0; j < num_block_obj_sizes; ++j)
1344                         free_blocks [j] = NULL;
1345         }
1346
1347         /* traverse all blocks, free and zero unmarked objects */
1348         iter = &all_blocks;
1349         while (*iter) {
1350                 MSBlockInfo *block = *iter;
1351                 int count;
1352                 gboolean have_live = FALSE;
1353                 gboolean has_pinned;
1354                 int obj_index;
1355                 int obj_size_index;
1356
1357                 obj_size_index = block->obj_size_index;
1358
1359                 has_pinned = block->has_pinned;
1360                 block->has_pinned = block->pinned;
1361
1362                 block->is_to_space = FALSE;
1363
1364                 count = MS_BLOCK_FREE / block->obj_size;
1365                 block->free_list = NULL;
1366
1367                 for (obj_index = 0; obj_index < count; ++obj_index) {
1368                         int word, bit;
1369                         void *obj = MS_BLOCK_OBJ (block, obj_index);
1370
1371                         MS_CALC_MARK_BIT (word, bit, obj);
1372                         if (MS_MARK_BIT (block, word, bit)) {
1373                                 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
1374                                 have_live = TRUE;
1375                                 if (!has_pinned)
1376                                         ++slots_used [obj_size_index];
1377                         } else {
1378                                 /* an unmarked object */
1379                                 if (MS_OBJ_ALLOCED (obj, block)) {
1380                                         binary_protocol_empty (obj, block->obj_size);
1381                                         memset (obj, 0, block->obj_size);
1382                                 }
1383                                 *(void**)obj = block->free_list;
1384                                 block->free_list = obj;
1385                         }
1386                 }
1387
1388                 /* reset mark bits */
1389                 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
1390
1391                 /*
1392                  * FIXME: reverse free list so that it's in address
1393                  * order
1394                  */
1395
1396                 if (have_live) {
1397                         if (!has_pinned) {
1398                                 ++num_blocks [obj_size_index];
1399                                 slots_available [obj_size_index] += count;
1400                         }
1401
1402                         iter = &block->next;
1403
1404                         /*
1405                          * If there are free slots in the block, add
1406                          * the block to the corresponding free list.
1407                          */
1408                         if (block->free_list) {
1409                                 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1410                                 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1411                                 block->next_free = free_blocks [index];
1412                                 free_blocks [index] = block;
1413                         }
1414
1415                         update_heap_boundaries_for_block (block);
1416                 } else {
1417                         /*
1418                          * Blocks without live objects are removed from the
1419                          * block list and freed.
1420                          */
1421                         *iter = block->next;
1422
1423 #ifdef FIXED_HEAP
1424                         ms_free_block (block);
1425 #else
1426                         ms_free_block (block->block);
1427
1428                         sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
1429 #endif
1430
1431                         --num_major_sections;
1432                 }
1433         }
1434
1435         for (i = 0; i < num_block_obj_sizes; ++i) {
1436                 float usage = (float)slots_used [i] / (float)slots_available [i];
1437                 if (num_blocks [i] > 5 && usage < evacuation_threshold) {
1438                         evacuate_block_obj_sizes [i] = TRUE;
1439                         /*
1440                         g_print ("slot size %d - %d of %d used\n",
1441                                         block_obj_sizes [i], slots_used [i], slots_available [i]);
1442                         */
1443                 } else {
1444                         evacuate_block_obj_sizes [i] = FALSE;
1445                 }
1446         }
1447
1448         have_swept = TRUE;
1449 }
1450
1451 static mono_native_thread_return_t
1452 ms_sweep_thread_func (void *dummy)
1453 {
1454         g_assert (concurrent_sweep);
1455
1456         for (;;) {
1457                 int result;
1458
1459                 while ((result = MONO_SEM_WAIT (&ms_sweep_cmd_semaphore)) != 0) {
1460                         if (errno != EINTR)
1461                                 g_error ("MONO_SEM_WAIT FAILED with %d errno %d (%s)", result, errno, strerror (errno));
1462                 }
1463
1464                 ms_sweep ();
1465
1466                 ms_signal_sweep_done ();
1467         }
1468
1469         return NULL;
1470 }
1471
1472 static void
1473 major_sweep (void)
1474 {
1475         if (concurrent_sweep) {
1476                 g_assert (ms_sweep_thread);
1477                 ms_signal_sweep_command ();
1478         } else {
1479                 ms_sweep ();
1480         }
1481 }
1482
1483 static int count_pinned_ref;
1484 static int count_pinned_nonref;
1485 static int count_nonpinned_ref;
1486 static int count_nonpinned_nonref;
1487
1488 static void
1489 count_nonpinned_callback (char *obj, size_t size, void *data)
1490 {
1491         MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1492
1493         if (vtable->klass->has_references)
1494                 ++count_nonpinned_ref;
1495         else
1496                 ++count_nonpinned_nonref;
1497 }
1498
1499 static void
1500 count_pinned_callback (char *obj, size_t size, void *data)
1501 {
1502         MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1503
1504         if (vtable->klass->has_references)
1505                 ++count_pinned_ref;
1506         else
1507                 ++count_pinned_nonref;
1508 }
1509
1510 static void __attribute__ ((unused))
1511 count_ref_nonref_objs (void)
1512 {
1513         int total;
1514
1515         count_pinned_ref = 0;
1516         count_pinned_nonref = 0;
1517         count_nonpinned_ref = 0;
1518         count_nonpinned_nonref = 0;
1519
1520         major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
1521         major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
1522
1523         total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1524
1525         g_print ("ref: %d pinned %d non-pinned   non-ref: %d pinned %d non-pinned  --  %.1f\n",
1526                         count_pinned_ref, count_nonpinned_ref,
1527                         count_pinned_nonref, count_nonpinned_nonref,
1528                         (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1529 }
1530
1531 static int
1532 ms_calculate_block_obj_sizes (double factor, int *arr)
1533 {
1534         double target_size = sizeof (MonoObject);
1535         int num_sizes = 0;
1536         int last_size = 0;
1537
1538         do {
1539                 int target_count = ceil (MS_BLOCK_FREE / target_size);
1540                 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1541
1542                 if (size != last_size) {
1543                         if (arr)
1544                                 arr [num_sizes] = size;
1545                         ++num_sizes;
1546                         last_size = size;
1547                 }
1548
1549                 target_size *= factor;
1550         } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1551
1552         return num_sizes;
1553 }
1554
1555 /* only valid during minor collections */
1556 static int old_num_major_sections;
1557
1558 static void
1559 major_start_nursery_collection (void)
1560 {
1561         ms_wait_for_sweep_done ();
1562
1563 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1564         consistency_check ();
1565 #endif
1566
1567         old_num_major_sections = num_major_sections;
1568 }
1569
1570 static void
1571 major_finish_nursery_collection (void)
1572 {
1573 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1574         consistency_check ();
1575 #endif
1576         sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1577 }
1578
1579 static void
1580 major_start_major_collection (void)
1581 {
1582         int i;
1583
1584         ms_wait_for_sweep_done ();
1585
1586         /* clear the free lists */
1587         for (i = 0; i < num_block_obj_sizes; ++i) {
1588                 if (!evacuate_block_obj_sizes [i])
1589                         continue;
1590
1591                 free_block_lists [0][i] = NULL;
1592                 free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
1593         }
1594 }
1595
1596 static void
1597 major_finish_major_collection (void)
1598 {
1599 }
1600
1601 static void
1602 major_have_computer_minor_collection_allowance (void)
1603 {
1604 #ifndef FIXED_HEAP
1605         int section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1606
1607         g_assert (have_swept);
1608         ms_wait_for_sweep_done ();
1609         g_assert (!ms_sweep_in_progress);
1610
1611         /*
1612          * FIXME: We don't free blocks on 32 bit platforms because it
1613          * can lead to address space fragmentation, since we're
1614          * allocating blocks in larger contingents.
1615          */
1616         if (sizeof (mword) < 8)
1617                 return;
1618
1619         while (num_empty_blocks > section_reserve) {
1620                 void *next = *(void**)empty_blocks;
1621                 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
1622                 empty_blocks = next;
1623                 /*
1624                  * Needs not be atomic because this is running
1625                  * single-threaded.
1626                  */
1627                 --num_empty_blocks;
1628
1629                 ++stat_major_blocks_freed;
1630         }
1631 #endif
1632 }
1633
1634 static void
1635 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
1636 {
1637         MSBlockInfo *block;
1638
1639         FOREACH_BLOCK (block) {
1640                 block->pin_queue_start = sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
1641                                 &block->pin_queue_num_entries);
1642         } END_FOREACH_BLOCK;
1643 }
1644
1645 static void
1646 major_pin_objects (SgenGrayQueue *queue)
1647 {
1648         MSBlockInfo *block;
1649
1650         FOREACH_BLOCK (block) {
1651                 mark_pinned_objects_in_block (block, queue);
1652         } END_FOREACH_BLOCK;
1653 }
1654
1655 static void
1656 major_init_to_space (void)
1657 {
1658 }
1659
1660 static void
1661 major_report_pinned_memory_usage (void)
1662 {
1663         g_assert_not_reached ();
1664 }
1665
1666 static gint64
1667 major_get_used_size (void)
1668 {
1669         gint64 size = 0;
1670         MSBlockInfo *block;
1671
1672         FOREACH_BLOCK (block) {
1673                 int count = MS_BLOCK_FREE / block->obj_size;
1674                 void **iter;
1675                 size += count * block->obj_size;
1676                 for (iter = block->free_list; iter; iter = (void**)*iter)
1677                         size -= block->obj_size;
1678         } END_FOREACH_BLOCK;
1679
1680         return size;
1681 }
1682
1683 static int
1684 get_num_major_sections (void)
1685 {
1686         return num_major_sections;
1687 }
1688
1689 static gboolean
1690 major_handle_gc_param (const char *opt)
1691 {
1692 #ifdef FIXED_HEAP
1693         if (g_str_has_prefix (opt, "major-heap-size=")) {
1694                 const char *arg = strchr (opt, '=') + 1;
1695                 glong size;
1696                 if (!mono_gc_parse_environment_string_extract_number (arg, &size))
1697                         return FALSE;
1698                 ms_heap_num_blocks = (size + MS_BLOCK_SIZE - 1) / MS_BLOCK_SIZE;
1699                 g_assert (ms_heap_num_blocks > 0);
1700                 return TRUE;
1701         } else
1702 #endif
1703         if (g_str_has_prefix (opt, "evacuation-threshold=")) {
1704                 const char *arg = strchr (opt, '=') + 1;
1705                 int percentage = atoi (arg);
1706                 if (percentage < 0 || percentage > 100) {
1707                         fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
1708                         exit (1);
1709                 }
1710                 evacuation_threshold = (float)percentage / 100.0;
1711                 return TRUE;
1712         } else if (!strcmp (opt, "concurrent-sweep")) {
1713                 concurrent_sweep = TRUE;
1714                 return TRUE;
1715         } else if (!strcmp (opt, "no-concurrent-sweep")) {
1716                 concurrent_sweep = FALSE;
1717                 return TRUE;
1718         }
1719
1720         return FALSE;
1721 }
1722
1723 static void
1724 major_print_gc_param_usage (void)
1725 {
1726         fprintf (stderr,
1727                         ""
1728 #ifdef FIXED_HEAP
1729                         "  major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
1730 #endif
1731                         "  evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
1732                         "  (no-)concurrent-sweep\n"
1733                         );
1734 }
1735
1736 #ifdef SGEN_HAVE_CARDTABLE
1737 static void
1738 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
1739 {
1740         MSBlockInfo *block;
1741
1742         FOREACH_BLOCK (block) {
1743                 if (block->has_references)
1744                         callback ((mword)block->block, MS_BLOCK_SIZE);
1745         } END_FOREACH_BLOCK;
1746 }
1747
1748 #ifdef HEAVY_STATISTICS
1749 extern long long marked_cards;
1750 extern long long scanned_cards;
1751 extern long long scanned_objects;
1752 extern long long remarked_cards;
1753 #endif
1754
1755 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
1756 /*
1757  * MS blocks are 16K aligned.
1758  * Cardtables are 4K aligned, at least.
1759  * This means that the cardtable of a given block is 32 bytes aligned.
1760  */
1761 static guint8*
1762 initial_skip_card (guint8 *card_data)
1763 {
1764         mword *cards = (mword*)card_data;
1765         mword card;
1766         int i;
1767         for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
1768                 card = cards [i];
1769                 if (card)
1770                         break;
1771         }
1772
1773         if (i == CARD_WORDS_PER_BLOCK)
1774                 return card_data + CARDS_PER_BLOCK;
1775
1776 #if defined(__i386__) && defined(__GNUC__)
1777         return card_data + i * 4 +  (__builtin_ffs (card) - 1) / 8;
1778 #elif defined(__x86_64__) && defined(__GNUC__)
1779         return card_data + i * 8 +  (__builtin_ffsll (card) - 1) / 8;
1780 #elif defined(__s390x__) && defined(__GNUC__)
1781         return card_data + i * 8 +  (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1782 #else
1783         for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
1784                 if (card_data [i])
1785                         return &card_data [i];
1786         }
1787         return card_data;
1788 #endif
1789 }
1790
1791
1792 static G_GNUC_UNUSED guint8*
1793 skip_card (guint8 *card_data, guint8 *card_data_end)
1794 {
1795         while (card_data < card_data_end && !*card_data)
1796                 ++card_data;
1797         return card_data;
1798 }
1799
1800 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
1801 #define MS_BLOCK_OBJ_FAST(b,os,i)                       ((b) + MS_BLOCK_SKIP + (os) * (i))
1802 #define MS_OBJ_ALLOCED_FAST(o,b)                (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
1803
1804 static void
1805 major_scan_card_table (SgenGrayQueue *queue)
1806 {
1807         MSBlockInfo *block;
1808         ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
1809
1810         FOREACH_BLOCK (block) {
1811                 int block_obj_size;
1812                 char *block_start;
1813
1814                 if (!block->has_references)
1815                         continue;
1816
1817                 block_obj_size = block->obj_size;
1818                 block_start = block->block;
1819
1820                 if (block_obj_size >= CARD_SIZE_IN_BYTES) {
1821                         guint8 *cards;
1822 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
1823                         guint8 cards_data [CARDS_PER_BLOCK];
1824 #endif
1825                         char *obj, *end, *base;
1826
1827                         /*We can avoid the extra copy since the remark cardtable was cleaned before */
1828 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1829                         cards = sgen_card_table_get_card_scan_address ((mword)block_start);
1830 #else
1831                         cards = cards_data;
1832                         if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
1833                                 continue;
1834 #endif
1835
1836                         obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
1837                         end = block_start + MS_BLOCK_SIZE;
1838                         base = sgen_card_table_align_pointer (obj);
1839
1840                         while (obj < end) {
1841                                 if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
1842                                         int card_offset = (obj - base) >> CARD_BITS;
1843                                         sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, queue);
1844                                 }
1845                                 obj += block_obj_size;
1846                         }
1847                 } else {
1848                         guint8 *card_data, *card_base;
1849                         guint8 *card_data_end;
1850
1851                         /*
1852                          * This is safe in face of card aliasing for the following reason:
1853                          *
1854                          * Major blocks are 16k aligned, or 32 cards aligned.
1855                          * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
1856                          * sizes, they won't overflow the cardtable overlap modulus.
1857                          */
1858                         card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
1859                         card_data_end = card_data + CARDS_PER_BLOCK;
1860
1861                         for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
1862                                 int index;
1863                                 int idx = card_data - card_base;
1864                                 char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
1865                                 char *end = start + CARD_SIZE_IN_BYTES;
1866                                 char *obj;
1867
1868                                 HEAVY_STAT (++scanned_cards);
1869
1870                                 if (!*card_data)
1871                                         continue;
1872
1873                                 HEAVY_STAT (++marked_cards);
1874
1875                                 sgen_card_table_prepare_card_for_scanning (card_data);
1876
1877                                 if (idx == 0)
1878                                         index = 0;
1879                                 else
1880                                         index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
1881
1882                                 obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
1883                                 while (obj < end) {
1884                                         if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
1885                                                 HEAVY_STAT (++scanned_objects);
1886                                                 scan_func (obj, queue);
1887                                         }
1888                                         obj += block_obj_size;
1889                                 }
1890                                 HEAVY_STAT (if (*card_data) ++remarked_cards);
1891                         }
1892                 }
1893         } END_FOREACH_BLOCK;
1894 }
1895 #endif
1896
1897 static gboolean
1898 major_is_worker_thread (MonoNativeThreadId thread)
1899 {
1900         if (concurrent_sweep)
1901                 return thread == ms_sweep_thread;
1902         else
1903                 return FALSE;
1904 }
1905
1906 static void
1907 alloc_free_block_lists (MSBlockInfo ***lists)
1908 {
1909         int i;
1910         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
1911                 lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1912 }
1913
1914 #ifdef SGEN_PARALLEL_MARK
1915 static void*
1916 major_alloc_worker_data (void)
1917 {
1918         /* FIXME: free this when the workers come down */
1919         MSBlockInfo ***lists = malloc (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX);
1920         alloc_free_block_lists (lists);
1921         return lists;
1922 }
1923
1924 static void
1925 major_init_worker_thread (void *data)
1926 {
1927         MSBlockInfo ***lists = data;
1928         int i;
1929
1930         g_assert (lists && lists != free_block_lists);
1931         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1932                 int j;
1933                 for (j = 0; j < num_block_obj_sizes; ++j)
1934                         g_assert (!lists [i][j]);
1935         }
1936
1937 #ifdef HAVE_KW_THREAD
1938         workers_free_block_lists = data;
1939 #else
1940         mono_native_tls_set_value (workers_free_block_lists_key, data);
1941 #endif
1942 }
1943
1944 static void
1945 major_reset_worker_data (void *data)
1946 {
1947         MSBlockInfo ***lists = data;
1948         int i;
1949         for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1950                 int j;
1951                 for (j = 0; j < num_block_obj_sizes; ++j)
1952                         lists [i][j] = NULL;
1953         }
1954 }
1955 #endif
1956
1957 #undef pthread_create
1958
1959 static void
1960 post_param_init (void)
1961 {
1962         if (concurrent_sweep) {
1963                 if (!mono_native_thread_create (&ms_sweep_thread, ms_sweep_thread_func, NULL)) {
1964                         fprintf (stderr, "Error: Could not create sweep thread.\n");
1965                         exit (1);
1966                 }
1967         }
1968 }
1969
1970 void
1971 #ifdef SGEN_PARALLEL_MARK
1972 #ifdef FIXED_HEAP
1973 sgen_marksweep_fixed_par_init
1974 #else
1975 sgen_marksweep_par_init
1976 #endif
1977 #else
1978 #ifdef FIXED_HEAP
1979 sgen_marksweep_fixed_init
1980 #else
1981 sgen_marksweep_init
1982 #endif
1983 #endif
1984         (SgenMajorCollector *collector)
1985 {
1986         int i;
1987
1988 #ifndef FIXED_HEAP
1989         sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
1990 #endif
1991
1992         num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
1993         block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1994         ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
1995
1996         evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1997         for (i = 0; i < num_block_obj_sizes; ++i)
1998                 evacuate_block_obj_sizes [i] = FALSE;
1999
2000         /*
2001         {
2002                 int i;
2003                 g_print ("block object sizes:\n");
2004                 for (i = 0; i < num_block_obj_sizes; ++i)
2005                         g_print ("%d\n", block_obj_sizes [i]);
2006         }
2007         */
2008
2009         alloc_free_block_lists (free_block_lists);
2010
2011         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
2012                 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
2013         for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
2014                 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
2015
2016 #ifdef SGEN_PARALLEL_MARK
2017         LOCK_INIT (ms_block_list_mutex);
2018 #endif
2019
2020         mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
2021         mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
2022         mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
2023         mono_counters_register ("Wait for sweep time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &stat_time_wait_for_sweep);
2024 #ifdef SGEN_PARALLEL_MARK
2025 #ifndef HAVE_KW_THREAD
2026         mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
2027 #endif
2028 #endif
2029
2030         /*
2031          * FIXME: These are superfluous if concurrent sweep is
2032          * disabled.  We might want to create them lazily.
2033          */
2034         MONO_SEM_INIT (&ms_sweep_cmd_semaphore, 0);
2035         MONO_SEM_INIT (&ms_sweep_done_semaphore, 0);
2036
2037         collector->section_size = MAJOR_SECTION_SIZE;
2038 #ifdef SGEN_PARALLEL_MARK
2039         collector->is_parallel = TRUE;
2040         collector->alloc_worker_data = major_alloc_worker_data;
2041         collector->init_worker_thread = major_init_worker_thread;
2042         collector->reset_worker_data = major_reset_worker_data;
2043 #else
2044         collector->is_parallel = FALSE;
2045 #endif
2046         collector->supports_cardtable = TRUE;
2047
2048         collector->have_swept = &have_swept;
2049
2050         collector->alloc_heap = major_alloc_heap;
2051         collector->is_object_live = major_is_object_live;
2052         collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2053         collector->alloc_degraded = major_alloc_degraded;
2054
2055         collector->alloc_object = major_alloc_object;
2056 #ifdef SGEN_PARALLEL_MARK
2057         collector->par_alloc_object = major_par_alloc_object;
2058 #endif
2059         collector->free_pinned_object = free_pinned_object;
2060         collector->iterate_objects = major_iterate_objects;
2061         collector->free_non_pinned_object = major_free_non_pinned_object;
2062         collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
2063         collector->pin_objects = major_pin_objects;
2064         collector->pin_major_object = pin_major_object;
2065 #ifdef SGEN_HAVE_CARDTABLE
2066         collector->scan_card_table = major_scan_card_table;
2067         collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
2068 #endif
2069         collector->init_to_space = major_init_to_space;
2070         collector->sweep = major_sweep;
2071         collector->check_scan_starts = major_check_scan_starts;
2072         collector->dump_heap = major_dump_heap;
2073         collector->get_used_size = major_get_used_size;
2074         collector->start_nursery_collection = major_start_nursery_collection;
2075         collector->finish_nursery_collection = major_finish_nursery_collection;
2076         collector->start_major_collection = major_start_major_collection;
2077         collector->finish_major_collection = major_finish_major_collection;
2078         collector->have_computed_minor_collection_allowance = major_have_computer_minor_collection_allowance;
2079         collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2080         collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
2081         collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2082         collector->get_num_major_sections = get_num_major_sections;
2083         collector->handle_gc_param = major_handle_gc_param;
2084         collector->print_gc_param_usage = major_print_gc_param_usage;
2085         collector->is_worker_thread = major_is_worker_thread;
2086         collector->post_param_init = post_param_init;
2087
2088         collector->major_ops.copy_or_mark_object = major_copy_or_mark_object;
2089         collector->major_ops.scan_object = major_scan_object;
2090
2091 #ifdef SGEN_HAVE_CARDTABLE
2092         /*cardtable requires major pages to be 8 cards aligned*/
2093         g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2094 #endif
2095 }
2096
2097 #endif