2 * sgen-marksweep.c: The Mark & Sweep major collector.
5 * Mark Probst <mark.probst@gmail.com>
7 * Copyright 2009-2010 Novell, Inc.
8 * Copyright (C) 2012 Xamarin Inc
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Library General Public
12 * License 2.0 as published by the Free Software Foundation;
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License 2.0 along with this library; if not, write to the Free
21 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "utils/mono-counters.h"
32 #include "utils/mono-semaphore.h"
33 #include "utils/mono-time.h"
34 #include "metadata/object-internals.h"
35 #include "metadata/profiler-private.h"
37 #include "metadata/sgen-gc.h"
38 #include "metadata/sgen-protocol.h"
39 #include "metadata/sgen-cardtable.h"
40 #include "metadata/sgen-memory-governor.h"
41 #include "metadata/sgen-layout-stats.h"
42 #include "metadata/gc-internal.h"
44 #if !defined(SGEN_PARALLEL_MARK) && !defined(FIXED_HEAP)
45 #define SGEN_HAVE_CONCURRENT_MARK
48 #define MS_BLOCK_SIZE (16*1024)
49 #define MS_BLOCK_SIZE_SHIFT 14
50 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
51 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
54 #define MS_DEFAULT_HEAP_NUM_BLOCKS (32 * 1024) /* 512 MB */
58 * Don't allocate single blocks, but alloc a contingent of this many
59 * blocks in one swoop. This must be a power of two.
61 #define MS_BLOCK_ALLOC_NUM 32
64 * Number of bytes before the first object in a block. At the start
65 * of a block is the MSBlockHeader, then opional padding, then come
66 * the objects, so this must be >= sizeof (MSBlockHeader).
69 #define MS_BLOCK_SKIP 0
71 #define MS_BLOCK_SKIP 16
74 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
76 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
78 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
79 #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
82 typedef struct _MSBlockInfo MSBlockInfo;
86 size_t pin_queue_num_entries;
87 unsigned int pinned : 1;
88 unsigned int has_references : 1;
89 unsigned int has_pinned : 1; /* means cannot evacuate */
90 unsigned int is_to_space : 1;
91 unsigned int swept : 1;
93 unsigned int used : 1;
94 unsigned int zeroed : 1;
99 MSBlockInfo *next_free;
100 void **pin_queue_start;
101 #ifdef SGEN_HAVE_CONCURRENT_MARK
102 guint8 *cardtable_mod_union;
104 mword mark_words [MS_NUM_MARK_WORDS];
108 static mword ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
110 static char *ms_heap_start;
111 static char *ms_heap_end;
113 #define MS_PTR_IN_SMALL_MAJOR_HEAP(p) ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
115 /* array of all all block infos in the system */
116 static MSBlockInfo *block_infos;
119 #define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
120 #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) ((b)->block + MS_BLOCK_SKIP + (obj_size) * (i))
121 #define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
124 #define MS_BLOCK_FOR_OBJ(o) (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
130 #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
133 /* object index will always be small */
134 #define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size))
136 //casting to int is fine since blocks are 32k
137 #define MS_CALC_MARK_BIT(w,b,o) do { \
138 int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
139 if (sizeof (mword) == 4) { \
148 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (ONE_P << (b)))
149 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
150 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
151 mword __old = (bl)->mark_words [(w)]; \
152 mword __bitmask = ONE_P << (b); \
153 if (__old & __bitmask) { \
157 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
158 (gpointer)(__old | __bitmask), \
159 (gpointer)__old) == \
161 was_marked = FALSE; \
166 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
168 #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
171 * This way we can lookup block object size indexes for sizes up to
172 * 256 bytes with a single load.
174 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
176 static int *block_obj_sizes;
177 static int num_block_obj_sizes;
178 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
180 #define MS_BLOCK_FLAG_PINNED 1
181 #define MS_BLOCK_FLAG_REFS 2
183 #define MS_BLOCK_TYPE_MAX 4
185 #ifdef SGEN_PARALLEL_MARK
186 static LOCK_DECLARE (ms_block_list_mutex);
187 #define LOCK_MS_BLOCK_LIST mono_mutex_lock (&ms_block_list_mutex)
188 #define UNLOCK_MS_BLOCK_LIST mono_mutex_unlock (&ms_block_list_mutex)
191 static gboolean *evacuate_block_obj_sizes;
192 static float evacuation_threshold = 0.666f;
193 #ifdef SGEN_HAVE_CONCURRENT_MARK
194 static float concurrent_evacuation_threshold = 0.666f;
195 static gboolean want_evacuation = FALSE;
198 static gboolean lazy_sweep = TRUE;
199 static gboolean have_swept;
201 #ifdef SGEN_HAVE_CONCURRENT_MARK
202 static gboolean concurrent_mark;
205 /* all allocated blocks in the system */
206 static MSBlockInfo *all_blocks;
209 /* non-allocated block free-list */
210 static MSBlockInfo *empty_blocks = NULL;
212 /* non-allocated block free-list */
213 static void *empty_blocks = NULL;
214 static size_t num_empty_blocks = 0;
217 #define FOREACH_BLOCK(bl) for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
218 #define END_FOREACH_BLOCK }
220 static size_t num_major_sections = 0;
221 /* one free block list for each block object size */
222 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
224 #ifdef SGEN_PARALLEL_MARK
225 #ifdef HAVE_KW_THREAD
226 static __thread MSBlockInfo ***workers_free_block_lists;
228 static MonoNativeTlsKey workers_free_block_lists_key;
232 static long long stat_major_blocks_alloced = 0;
233 static long long stat_major_blocks_freed = 0;
234 static long long stat_major_blocks_lazy_swept = 0;
235 static long long stat_major_objects_evacuated = 0;
237 #if SIZEOF_VOID_P != 8
238 static long long stat_major_blocks_freed_ideal = 0;
239 static long long stat_major_blocks_freed_less_ideal = 0;
240 static long long stat_major_blocks_freed_individual = 0;
241 static long long stat_major_blocks_alloced_less_ideal = 0;
244 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
245 static long long num_major_objects_marked = 0;
246 #define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
248 #define INC_NUM_MAJOR_OBJECTS_MARKED()
252 sweep_block (MSBlockInfo *block, gboolean during_major_collection);
255 ms_find_block_obj_size_index (size_t size)
258 SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %d is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
259 for (i = 0; i < num_block_obj_sizes; ++i)
260 if (block_obj_sizes [i] >= size)
262 g_error ("no object of size %d\n", size);
265 #define FREE_BLOCKS_FROM(lists,p,r) (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
266 #define FREE_BLOCKS(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
267 #ifdef SGEN_PARALLEL_MARK
268 #ifdef HAVE_KW_THREAD
269 #define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (workers_free_block_lists, (p), (r)))
271 #define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (((MSBlockInfo***)(mono_native_tls_get_value (workers_free_block_lists_key))), (p), (r)))
274 //#define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
277 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
278 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
279 fast_block_obj_size_indexes [((s)+7)>>3] : \
280 ms_find_block_obj_size_index ((s)))
284 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
287 mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
288 mword alloc_size = nursery_size + major_heap_size;
291 g_assert (ms_heap_num_blocks > 0);
292 g_assert (nursery_size % MS_BLOCK_SIZE == 0);
294 g_assert (nursery_align % MS_BLOCK_SIZE == 0);
296 nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "heap");
297 ms_heap_start = nursery_start + nursery_size;
298 ms_heap_end = ms_heap_start + major_heap_size;
300 block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO, TRUE);
302 for (i = 0; i < ms_heap_num_blocks; ++i) {
303 block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
304 if (i < ms_heap_num_blocks - 1)
305 block_infos [i].next_free = &block_infos [i + 1];
307 block_infos [i].next_free = NULL;
308 block_infos [i].zeroed = TRUE;
311 empty_blocks = &block_infos [0];
313 return nursery_start;
317 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
321 start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
323 start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
330 update_heap_boundaries_for_block (MSBlockInfo *block)
332 sgen_update_heap_boundaries ((mword)block->block, (mword)block->block + MS_BLOCK_SIZE);
337 ms_get_empty_block (void)
341 g_assert (empty_blocks);
344 block = empty_blocks;
345 } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block->next_free, block) != block);
350 memset (block->block, 0, MS_BLOCK_SIZE);
356 ms_free_block (MSBlockInfo *block)
358 block->next_free = empty_blocks;
359 empty_blocks = block;
361 block->zeroed = FALSE;
362 sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
366 ms_get_empty_block (void)
370 void *block, *empty, *next;
375 * We try allocating MS_BLOCK_ALLOC_NUM blocks first. If that's
376 * unsuccessful, we halve the number of blocks and try again, until we're at
377 * 1. If that doesn't work, either, we assert.
379 int alloc_num = MS_BLOCK_ALLOC_NUM;
381 p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
382 alloc_num == 1 ? "major heap section" : NULL);
388 for (i = 0; i < alloc_num; ++i) {
391 * We do the free list update one after the
392 * other so that other threads can use the new
393 * blocks as quickly as possible.
396 empty = empty_blocks;
397 *(void**)block = empty;
398 } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
402 SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
404 stat_major_blocks_alloced += alloc_num;
405 #if SIZEOF_VOID_P != 8
406 if (alloc_num != MS_BLOCK_ALLOC_NUM)
407 stat_major_blocks_alloced_less_ideal += alloc_num;
412 empty = empty_blocks;
416 next = *(void**)block;
417 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
419 SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
421 *(void**)block = NULL;
423 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
429 ms_free_block (void *block)
433 sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
434 memset (block, 0, MS_BLOCK_SIZE);
437 empty = empty_blocks;
438 *(void**)block = empty;
439 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
441 SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
445 //#define MARKSWEEP_CONSISTENCY_CHECK
447 #ifdef MARKSWEEP_CONSISTENCY_CHECK
449 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
453 for (; block; block = block->next_free) {
454 g_assert (block->obj_size == size);
455 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
457 /* blocks in the free lists must have at least
460 g_assert (block->free_list);
463 /* the block must not be in the empty_blocks list */
464 for (b = empty_blocks; b; b = b->next_free)
465 g_assert (b != block);
467 /* the block must be in the all_blocks list */
468 for (b = all_blocks; b; b = b->next) {
472 g_assert (b == block);
477 check_empty_blocks (void)
482 for (p = empty_blocks; p; p = *(void**)p)
484 g_assert (i == num_empty_blocks);
489 consistency_check (void)
494 /* check all blocks */
495 FOREACH_BLOCK (block) {
496 int count = MS_BLOCK_FREE / block->obj_size;
501 /* check block header */
502 g_assert (((MSBlockHeader*)block->block)->info == block);
505 /* count number of free slots */
506 for (i = 0; i < count; ++i) {
507 void **obj = (void**) MS_BLOCK_OBJ (block, i);
508 if (!MS_OBJ_ALLOCED (obj, block))
512 /* check free list */
513 for (free = block->free_list; free; free = (void**)*free) {
514 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
517 g_assert (num_free == 0);
519 /* check all mark words are zero */
521 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
522 g_assert (block->mark_words [i] == 0);
526 /* check free blocks */
527 for (i = 0; i < num_block_obj_sizes; ++i) {
529 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
530 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
533 check_empty_blocks ();
538 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
540 int size = block_obj_sizes [size_index];
541 int count = MS_BLOCK_FREE / size;
543 #ifdef SGEN_PARALLEL_MARK
547 MSBlockHeader *header;
549 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
553 if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
557 info = ms_get_empty_block ();
559 info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
562 SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
564 info->obj_size = size;
565 info->obj_size_index = size_index;
566 info->pinned = pinned;
567 info->has_references = has_references;
568 info->has_pinned = pinned;
570 * Blocks that are to-space are not evacuated from. During an major collection
571 * blocks are allocated for two reasons: evacuating objects from the nursery and
572 * evacuating them from major blocks marked for evacuation. In both cases we don't
573 * want further evacuation.
575 info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
578 info->block = ms_get_empty_block ();
580 header = (MSBlockHeader*) info->block;
583 #ifdef SGEN_HAVE_CONCURRENT_MARK
584 info->cardtable_mod_union = NULL;
587 update_heap_boundaries_for_block (info);
589 /* build free list */
590 obj_start = info->block + MS_BLOCK_SKIP;
591 info->free_list = (void**)obj_start;
592 /* we're skipping the last one - it must be nulled */
593 for (i = 0; i < count - 1; ++i) {
594 char *next_obj_start = obj_start + size;
595 *(void**)obj_start = next_obj_start;
596 obj_start = next_obj_start;
599 *(void**)obj_start = NULL;
601 #ifdef SGEN_PARALLEL_MARK
603 next = info->next_free = free_blocks [size_index];
604 } while (SGEN_CAS_PTR ((void**)&free_blocks [size_index], info, next) != next);
607 next = info->next = all_blocks;
608 } while (SGEN_CAS_PTR ((void**)&all_blocks, info, next) != next);
610 info->next_free = free_blocks [size_index];
611 free_blocks [size_index] = info;
613 info->next = all_blocks;
617 ++num_major_sections;
622 obj_is_from_pinned_alloc (char *ptr)
626 FOREACH_BLOCK (block) {
627 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
628 return block->pinned;
634 unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
639 block = free_blocks [size_index];
640 SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
642 if (G_UNLIKELY (!block->swept)) {
643 stat_major_blocks_lazy_swept ++;
644 sweep_block (block, FALSE);
647 obj = block->free_list;
648 SGEN_ASSERT (9, obj, "block %p in free list had no available object to alloc from", block);
650 block->free_list = *(void**)obj;
651 if (!block->free_list) {
652 free_blocks [size_index] = block->next_free;
653 block->next_free = NULL;
659 #ifdef SGEN_PARALLEL_MARK
661 try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks, int size_index)
664 * No more free slots in the block, so try to free the block.
665 * Don't try again if we don't succeed - another thread will
666 * already have done it.
668 MSBlockInfo *next_block = block->next_free;
669 if (SGEN_CAS_PTR ((void**)&free_blocks [size_index], next_block, block) == block) {
671 void *old = SGEN_CAS_PTR ((void**)&block->next_free, NULL, next_block);
672 g_assert (old == next_block);
674 block->next_free = NULL;
681 alloc_obj_par (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
683 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
684 MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
688 #ifdef SGEN_HAVE_CONCURRENT_MARK
690 g_assert_not_reached ();
693 SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
695 if (free_blocks_local [size_index]) {
697 obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
699 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
702 block = free_blocks [size_index];
704 if (!try_remove_block_from_free_list (block, free_blocks, size_index))
707 g_assert (block->next_free == NULL);
708 g_assert (block->free_list);
709 block->next_free = free_blocks_local [size_index];
710 free_blocks_local [size_index] = block;
717 success = ms_alloc_block (size_index, pinned, has_references);
718 UNLOCK_MS_BLOCK_LIST;
720 if (G_UNLIKELY (!success))
727 *(MonoVTable**)obj = vtable;
733 major_par_alloc_object (MonoVTable *vtable, size_t size, gboolean has_references)
735 return alloc_obj_par (vtable, size, FALSE, has_references);
740 alloc_obj (MonoVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
742 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
743 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
746 #ifdef SGEN_PARALLEL_MARK
747 SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
751 if (!free_blocks [size_index]) {
752 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
756 obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
758 *(MonoVTable**)obj = vtable;
764 major_alloc_object (MonoVTable *vtable, size_t size, gboolean has_references)
766 return alloc_obj (vtable, size, FALSE, has_references);
770 * We're not freeing the block if it's empty. We leave that work for
771 * the next major collection.
773 * This is just called from the domain clearing code, which runs in a
774 * single thread and has the GC lock, so we don't need an extra lock.
777 free_object (char *obj, size_t size, gboolean pinned)
779 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
783 sweep_block (block, FALSE);
784 SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
785 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
786 MS_CALC_MARK_BIT (word, bit, obj);
787 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set");
788 if (!block->free_list) {
789 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
790 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
791 SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks");
792 block->next_free = free_blocks [size_index];
793 free_blocks [size_index] = block;
795 memset (obj, 0, size);
796 *(void**)obj = block->free_list;
797 block->free_list = (void**)obj;
801 major_free_non_pinned_object (char *obj, size_t size)
803 free_object (obj, size, FALSE);
806 /* size is a multiple of SGEN_ALLOC_ALIGN */
808 major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
812 res = alloc_obj (vtable, size, TRUE, has_references);
813 /*If we failed to alloc memory, we better try releasing memory
814 *as pinned alloc is requested by the runtime.
817 sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
818 res = alloc_obj (vtable, size, TRUE, has_references);
824 free_pinned_object (char *obj, size_t size)
826 free_object (obj, size, TRUE);
830 * size is already rounded up and we hold the GC lock.
833 major_alloc_degraded (MonoVTable *vtable, size_t size)
836 size_t old_num_sections;
838 old_num_sections = num_major_sections;
840 obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
841 if (G_LIKELY (obj)) {
842 HEAVY_STAT (++stat_objects_alloced_degraded);
843 HEAVY_STAT (stat_bytes_alloced_degraded += size);
844 g_assert (num_major_sections >= old_num_sections);
845 sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
850 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
853 * obj is some object. If it's not in the major heap (i.e. if it's in
854 * the nursery or LOS), return FALSE. Otherwise return whether it's
855 * been marked or copied.
858 major_is_object_live (char *obj)
866 if (sgen_ptr_in_nursery (obj))
871 if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
874 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
877 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
881 /* now we know it's in a major block */
882 block = MS_BLOCK_FOR_OBJ (obj);
883 SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?");
884 MS_CALC_MARK_BIT (word, bit, obj);
885 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
889 major_ptr_is_in_non_pinned_space (char *ptr, char **start)
893 FOREACH_BLOCK (block) {
894 if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE) {
895 int count = MS_BLOCK_FREE / block->obj_size;
899 for (i = 0; i <= count; ++i) {
900 if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
901 *start = MS_BLOCK_OBJ (block, i);
905 return !block->pinned;
912 major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
914 gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
915 gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
916 gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
919 FOREACH_BLOCK (block) {
920 int count = MS_BLOCK_FREE / block->obj_size;
923 if (block->pinned && !pinned)
925 if (!block->pinned && !non_pinned)
927 if (sweep && lazy_sweep) {
928 sweep_block (block, FALSE);
929 SGEN_ASSERT (0, block->swept, "Block must be swept after sweeping");
932 for (i = 0; i < count; ++i) {
933 void **obj = (void**) MS_BLOCK_OBJ (block, i);
936 MS_CALC_MARK_BIT (word, bit, obj);
937 if (!MS_MARK_BIT (block, word, bit))
940 if (MS_OBJ_ALLOCED (obj, block))
941 callback ((char*)obj, block->obj_size, data);
947 major_is_valid_object (char *object)
951 FOREACH_BLOCK (block) {
955 if ((block->block > object) || ((block->block + MS_BLOCK_SIZE) <= object))
958 idx = MS_BLOCK_OBJ_INDEX (object, block);
959 obj = (char*)MS_BLOCK_OBJ (block, idx);
962 return MS_OBJ_ALLOCED (obj, block);
970 major_describe_pointer (char *ptr)
974 FOREACH_BLOCK (block) {
982 if ((block->block > ptr) || ((block->block + MS_BLOCK_SIZE) <= ptr))
985 SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
986 block->block, block->obj_size, block->pinned, block->has_references);
988 idx = MS_BLOCK_OBJ_INDEX (ptr, block);
989 obj = (char*)MS_BLOCK_OBJ (block, idx);
990 live = MS_OBJ_ALLOCED (obj, block);
991 vtable = live ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
993 MS_CALC_MARK_BIT (w, b, obj);
994 marked = MS_MARK_BIT (block, w, b);
999 SGEN_LOG (0, "object");
1001 SGEN_LOG (0, "dead-object");
1004 SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
1006 SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
1009 SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
1012 } END_FOREACH_BLOCK;
1018 major_check_scan_starts (void)
1023 major_dump_heap (FILE *heap_dump_file)
1026 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1027 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1030 for (i = 0; i < num_block_obj_sizes; ++i)
1031 slots_available [i] = slots_used [i] = 0;
1033 FOREACH_BLOCK (block) {
1034 int index = ms_find_block_obj_size_index (block->obj_size);
1035 int count = MS_BLOCK_FREE / block->obj_size;
1037 slots_available [index] += count;
1038 for (i = 0; i < count; ++i) {
1039 if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
1040 ++slots_used [index];
1042 } END_FOREACH_BLOCK;
1044 fprintf (heap_dump_file, "<occupancies>\n");
1045 for (i = 0; i < num_block_obj_sizes; ++i) {
1046 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
1047 block_obj_sizes [i], slots_available [i], slots_used [i]);
1049 fprintf (heap_dump_file, "</occupancies>\n");
1051 FOREACH_BLOCK (block) {
1052 int count = MS_BLOCK_FREE / block->obj_size;
1056 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
1058 for (i = 0; i <= count; ++i) {
1059 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
1064 sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
1070 fprintf (heap_dump_file, "</section>\n");
1071 } END_FOREACH_BLOCK;
1074 #define LOAD_VTABLE SGEN_LOAD_VTABLE
1076 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
1077 int __word, __bit; \
1078 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
1079 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
1080 MS_SET_MARK_BIT ((block), __word, __bit); \
1081 if ((block)->has_references) \
1082 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
1083 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1084 INC_NUM_MAJOR_OBJECTS_MARKED (); \
1087 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
1088 int __word, __bit; \
1089 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
1090 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
1091 if (!MS_MARK_BIT ((block), __word, __bit)) { \
1092 MS_SET_MARK_BIT ((block), __word, __bit); \
1093 if ((block)->has_references) \
1094 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
1095 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1096 INC_NUM_MAJOR_OBJECTS_MARKED (); \
1099 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
1100 int __word, __bit; \
1101 gboolean __was_marked; \
1102 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
1103 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
1104 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
1105 if (!__was_marked) { \
1106 if ((block)->has_references) \
1107 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
1108 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
1109 INC_NUM_MAJOR_OBJECTS_MARKED (); \
1114 pin_major_object (char *obj, SgenGrayQueue *queue)
1118 #ifdef SGEN_HAVE_CONCURRENT_MARK
1119 if (concurrent_mark)
1120 g_assert_not_reached ();
1123 block = MS_BLOCK_FOR_OBJ (obj);
1124 block->has_pinned = TRUE;
1125 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1128 #include "sgen-major-copy-object.h"
1130 #ifdef SGEN_PARALLEL_MARK
1132 major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
1138 HEAVY_STAT (++stat_copy_object_called_major);
1140 SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
1141 SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
1143 if (sgen_ptr_in_nursery (obj)) {
1145 gboolean has_references;
1147 mword vtable_word = *(mword*)obj;
1148 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1150 if (vtable_word & SGEN_FORWARDED_BIT) {
1155 if (vtable_word & SGEN_PINNED_BIT)
1158 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1159 if (sgen_nursery_is_to_space (obj))
1162 HEAVY_STAT (++stat_objects_copied_major);
1165 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1166 has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
1168 destination = sgen_minor_collector.par_alloc_for_promotion (vt, obj, objsize, has_references);
1169 if (G_UNLIKELY (!destination)) {
1170 if (!sgen_ptr_in_nursery (obj)) {
1172 block = MS_BLOCK_FOR_OBJ (obj);
1173 size_index = block->obj_size_index;
1174 evacuate_block_obj_sizes [size_index] = FALSE;
1177 sgen_parallel_pin_or_update (ptr, obj, vt, queue);
1178 sgen_set_pinned_from_failed_allocation (objsize);
1182 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
1183 gboolean was_marked;
1185 par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
1190 * FIXME: If we make major_alloc_object() give
1191 * us the block info, too, we won't have to
1194 * FIXME (2): We should rework this to avoid all those nursery checks.
1197 * For the split nursery allocator the object
1198 * might still be in the nursery despite
1199 * having being promoted, in which case we
1202 if (!sgen_ptr_in_nursery (obj)) {
1203 block = MS_BLOCK_FOR_OBJ (obj);
1204 MS_CALC_MARK_BIT (word, bit, obj);
1205 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
1206 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
1207 binary_protocol_mark (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
1211 * FIXME: We have allocated destination, but
1212 * we cannot use it. Give it back to the
1215 *(void**)destination = NULL;
1217 vtable_word = *(mword*)obj;
1218 g_assert (vtable_word & SGEN_FORWARDED_BIT);
1220 obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1224 HEAVY_STAT (++stat_slots_allocated_in_vain);
1228 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1230 mword vtable_word = *(mword*)obj;
1231 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1233 /* see comment in the non-parallel version below */
1234 if (vtable_word & SGEN_FORWARDED_BIT) {
1238 objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
1240 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1245 block = MS_BLOCK_FOR_OBJ (obj);
1246 size_index = block->obj_size_index;
1248 if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
1249 if (block->is_to_space)
1254 mword vtable_word = *(mword*)obj;
1255 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1257 if (vtable_word & SGEN_FORWARDED_BIT) {
1264 HEAVY_STAT (++stat_major_objects_evacuated);
1265 goto do_copy_object;
1268 MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1270 LOSObject *bigobj = sgen_los_header_for_object (obj);
1271 mword size_word = bigobj->size;
1273 mword vtable_word = *(mword*)obj;
1274 vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1278 binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
1279 if (SGEN_CAS_PTR ((void*)&bigobj->size, (void*)(size_word | 1), (void*)size_word) == (void*)size_word) {
1280 if (SGEN_VTABLE_HAS_REFERENCES (vt))
1281 GRAY_OBJECT_ENQUEUE (queue, obj);
1283 g_assert (sgen_los_object_is_pinned (obj));
1289 #ifdef SGEN_HAVE_CONCURRENT_MARK
1291 major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
1293 g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
1295 if (!sgen_ptr_in_nursery (obj)) {
1297 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1301 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
1303 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1306 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
1307 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1309 if (sgen_los_object_is_pinned (obj))
1312 #ifdef ENABLE_DTRACE
1313 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1314 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1315 MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
1319 sgen_los_pin_object (obj);
1320 if (SGEN_OBJECT_HAS_REFERENCES (obj))
1321 GRAY_OBJECT_ENQUEUE (queue, obj);
1322 INC_NUM_MAJOR_OBJECTS_MARKED ();
1329 major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
1333 HEAVY_STAT (++stat_copy_object_called_major);
1335 SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
1336 SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
1338 if (sgen_ptr_in_nursery (obj)) {
1340 char *forwarded, *old_obj;
1342 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1346 if (SGEN_OBJECT_IS_PINNED (obj))
1349 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
1350 if (sgen_nursery_is_to_space (obj))
1353 HEAVY_STAT (++stat_objects_copied_major);
1357 obj = copy_object_no_checks (obj, queue);
1358 if (G_UNLIKELY (old_obj == obj)) {
1359 /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
1360 if (!sgen_ptr_in_nursery (obj)) {
1362 block = MS_BLOCK_FOR_OBJ (obj);
1363 size_index = block->obj_size_index;
1364 evacuate_block_obj_sizes [size_index] = FALSE;
1365 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1372 * FIXME: See comment for copy_object_no_checks(). If
1373 * we have that, we can let the allocation function
1374 * give us the block info, too, and we won't have to
1377 * FIXME (2): We should rework this to avoid all those nursery checks.
1380 * For the split nursery allocator the object might
1381 * still be in the nursery despite having being
1382 * promoted, in which case we can't mark it.
1384 if (!sgen_ptr_in_nursery (obj)) {
1385 block = MS_BLOCK_FOR_OBJ (obj);
1386 MS_CALC_MARK_BIT (word, bit, obj);
1387 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
1388 MS_SET_MARK_BIT (block, word, bit);
1389 binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
1394 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
1399 * If we have don't have a fixed heap we cannot know
1400 * whether an object is in the LOS or in the small
1401 * object major heap without checking its size. To do
1402 * that, however, we need to know that we actually
1403 * have a valid object, not a forwarding pointer, so
1404 * we have to do this check first.
1406 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1411 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
1413 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
1419 block = MS_BLOCK_FOR_OBJ (obj);
1420 size_index = block->obj_size_index;
1421 evacuate = evacuate_block_obj_sizes [size_index];
1425 * We could also check for !block->has_pinned
1426 * here, but it would only make an uncommon case
1427 * faster, namely objects that are in blocks
1428 * whose slot sizes are evacuated but which have
1431 if (evacuate && (forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1437 if (evacuate && !block->has_pinned) {
1438 g_assert (!SGEN_OBJECT_IS_PINNED (obj));
1439 if (block->is_to_space)
1441 HEAVY_STAT (++stat_major_objects_evacuated);
1442 goto do_copy_object;
1444 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
1447 if (sgen_los_object_is_pinned (obj))
1449 binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
1451 #ifdef ENABLE_DTRACE
1452 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1453 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1454 MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
1458 sgen_los_pin_object (obj);
1459 if (SGEN_OBJECT_HAS_REFERENCES (obj))
1460 GRAY_OBJECT_ENQUEUE (queue, obj);
1467 major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
1469 major_copy_or_mark_object (ptr, *ptr, queue);
1472 #ifdef SGEN_HAVE_CONCURRENT_MARK
1474 major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
1476 major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
1480 major_get_and_reset_num_major_objects_marked (void)
1482 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
1483 long long num = num_major_objects_marked;
1484 num_major_objects_marked = 0;
1492 #include "sgen-major-scan-object.h"
1494 #ifdef SGEN_HAVE_CONCURRENT_MARK
1495 #define SCAN_FOR_CONCURRENT_MARK
1496 #include "sgen-major-scan-object.h"
1497 #undef SCAN_FOR_CONCURRENT_MARK
1501 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
1504 int last_index = -1;
1506 if (!block->pin_queue_num_entries)
1509 block->has_pinned = TRUE;
1511 for (i = 0; i < block->pin_queue_num_entries; ++i) {
1512 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
1513 SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", block->pin_queue_start [i], index, MS_BLOCK_FREE / block->obj_size);
1514 if (index == last_index)
1516 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
1522 sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
1526 for (obj_index = 0; obj_index < count; ++obj_index) {
1528 void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
1530 MS_CALC_MARK_BIT (word, bit, obj);
1531 if (MS_MARK_BIT (block, word, bit)) {
1532 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
1534 /* an unmarked object */
1535 if (MS_OBJ_ALLOCED (obj, block)) {
1537 * FIXME: Merge consecutive
1538 * slots for lower reporting
1539 * overhead. Maybe memset
1540 * will also benefit?
1542 binary_protocol_empty (obj, obj_size);
1543 MONO_GC_MAJOR_SWEPT ((mword)obj, obj_size);
1544 memset (obj, 0, obj_size);
1546 *(void**)obj = block->free_list;
1547 block->free_list = obj;
1555 * Traverse BLOCK, freeing and zeroing unused objects.
1558 sweep_block (MSBlockInfo *block, gboolean during_major_collection)
1561 void *reversed = NULL;
1563 if (!during_major_collection)
1564 g_assert (!sgen_concurrent_collection_in_progress ());
1569 count = MS_BLOCK_FREE / block->obj_size;
1571 block->free_list = NULL;
1573 /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
1574 // FIXME: Add more sizes
1575 switch (block->obj_size) {
1577 sweep_block_for_size (block, count, 16);
1580 sweep_block_for_size (block, count, block->obj_size);
1584 /* reset mark bits */
1585 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
1587 /* Reverse free list so that it's in address order */
1589 while (block->free_list) {
1590 void *next = *(void**)block->free_list;
1591 *(void**)block->free_list = reversed;
1592 reversed = block->free_list;
1593 block->free_list = next;
1595 block->free_list = reversed;
1606 if (sizeof (mword) == sizeof (unsigned long))
1607 count += __builtin_popcountl (d);
1609 count += __builtin_popcount (d);
1625 /* statistics for evacuation */
1626 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1627 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1628 int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
1630 #ifdef SGEN_HAVE_CONCURRENT_MARK
1631 mword total_evacuate_heap = 0;
1632 mword total_evacuate_saved = 0;
1635 for (i = 0; i < num_block_obj_sizes; ++i)
1636 slots_available [i] = slots_used [i] = num_blocks [i] = 0;
1638 /* clear all the free lists */
1639 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1640 MSBlockInfo **free_blocks = free_block_lists [i];
1642 for (j = 0; j < num_block_obj_sizes; ++j)
1643 free_blocks [j] = NULL;
1646 /* traverse all blocks, free and zero unmarked objects */
1649 MSBlockInfo *block = *iter;
1651 gboolean have_live = FALSE;
1652 gboolean has_pinned;
1653 gboolean have_free = FALSE;
1657 obj_size_index = block->obj_size_index;
1659 has_pinned = block->has_pinned;
1660 block->has_pinned = block->pinned;
1662 block->is_to_space = FALSE;
1665 count = MS_BLOCK_FREE / block->obj_size;
1667 #ifdef SGEN_HAVE_CONCURRENT_MARK
1668 if (block->cardtable_mod_union) {
1669 sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
1670 block->cardtable_mod_union = NULL;
1674 /* Count marked objects in the block */
1675 for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
1676 nused += bitcount (block->mark_words [i]);
1685 sweep_block (block, TRUE);
1689 ++num_blocks [obj_size_index];
1690 slots_used [obj_size_index] += nused;
1691 slots_available [obj_size_index] += count;
1694 iter = &block->next;
1697 * If there are free slots in the block, add
1698 * the block to the corresponding free list.
1701 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1702 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1703 block->next_free = free_blocks [index];
1704 free_blocks [index] = block;
1707 update_heap_boundaries_for_block (block);
1710 * Blocks without live objects are removed from the
1711 * block list and freed.
1713 *iter = block->next;
1715 binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
1717 ms_free_block (block);
1719 ms_free_block (block->block);
1721 sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
1724 --num_major_sections;
1728 for (i = 0; i < num_block_obj_sizes; ++i) {
1729 float usage = (float)slots_used [i] / (float)slots_available [i];
1730 if (num_blocks [i] > 5 && usage < evacuation_threshold) {
1731 evacuate_block_obj_sizes [i] = TRUE;
1733 g_print ("slot size %d - %d of %d used\n",
1734 block_obj_sizes [i], slots_used [i], slots_available [i]);
1737 evacuate_block_obj_sizes [i] = FALSE;
1739 #ifdef SGEN_HAVE_CONCURRENT_MARK
1741 mword total_bytes = block_obj_sizes [i] * slots_available [i];
1742 total_evacuate_heap += total_bytes;
1743 if (evacuate_block_obj_sizes [i])
1744 total_evacuate_saved += total_bytes - block_obj_sizes [i] * slots_used [i];
1749 #ifdef SGEN_HAVE_CONCURRENT_MARK
1750 want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
1762 static int count_pinned_ref;
1763 static int count_pinned_nonref;
1764 static int count_nonpinned_ref;
1765 static int count_nonpinned_nonref;
1768 count_nonpinned_callback (char *obj, size_t size, void *data)
1770 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1772 if (vtable->klass->has_references)
1773 ++count_nonpinned_ref;
1775 ++count_nonpinned_nonref;
1779 count_pinned_callback (char *obj, size_t size, void *data)
1781 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1783 if (vtable->klass->has_references)
1786 ++count_pinned_nonref;
1789 static G_GNUC_UNUSED void
1790 count_ref_nonref_objs (void)
1794 count_pinned_ref = 0;
1795 count_pinned_nonref = 0;
1796 count_nonpinned_ref = 0;
1797 count_nonpinned_nonref = 0;
1799 major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
1800 major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
1802 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1804 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
1805 count_pinned_ref, count_nonpinned_ref,
1806 count_pinned_nonref, count_nonpinned_nonref,
1807 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1811 ms_calculate_block_obj_sizes (double factor, int *arr)
1813 double target_size = sizeof (MonoObject);
1818 int target_count = (int)ceil (MS_BLOCK_FREE / target_size);
1819 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1821 if (size != last_size) {
1823 arr [num_sizes] = size;
1828 target_size *= factor;
1829 } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1834 /* only valid during minor collections */
1835 static mword old_num_major_sections;
1838 major_start_nursery_collection (void)
1840 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1841 consistency_check ();
1844 old_num_major_sections = num_major_sections;
1848 major_finish_nursery_collection (void)
1850 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1851 consistency_check ();
1853 sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1857 major_start_major_collection (void)
1861 /* clear the free lists */
1862 for (i = 0; i < num_block_obj_sizes; ++i) {
1863 if (!evacuate_block_obj_sizes [i])
1866 free_block_lists [0][i] = NULL;
1867 free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
1870 // Sweep all unswept blocks
1874 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
1878 MSBlockInfo *block = *iter;
1880 sweep_block (block, TRUE);
1882 iter = &block->next;
1885 MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
1890 major_finish_major_collection (void)
1894 #if !defined(FIXED_HEAP) && SIZEOF_VOID_P != 8
1896 compare_pointers (const void *va, const void *vb) {
1897 char *a = *(char**)va, *b = *(char**)vb;
1907 major_have_computer_minor_collection_allowance (void)
1910 size_t section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1912 g_assert (have_swept);
1914 #if SIZEOF_VOID_P != 8
1916 int i, num_empty_blocks_orig, num_blocks, arr_length;
1918 void **empty_block_arr;
1919 void **rebuild_next;
1923 * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
1924 * a VirtualAlloc ()-ed block.
1929 if (num_empty_blocks <= section_reserve)
1931 SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
1933 num_empty_blocks_orig = num_empty_blocks;
1934 empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
1935 INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
1936 if (!empty_block_arr)
1940 for (block = empty_blocks; block; block = *(void**)block)
1941 empty_block_arr [i++] = block;
1942 SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
1944 sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
1947 * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
1948 * contiguous ones. If we do, we free them. If that's not enough to get to
1949 * section_reserve, we halve the number of contiguous blocks we're looking
1950 * for and have another go, until we're done with looking for pairs of
1951 * blocks, at which point we give up and go to the fallback.
1953 arr_length = num_empty_blocks_orig;
1954 num_blocks = MS_BLOCK_ALLOC_NUM;
1955 while (num_empty_blocks > section_reserve && num_blocks > 1) {
1960 for (i = 0; i < arr_length; ++i) {
1962 void *block = empty_block_arr [i];
1963 SGEN_ASSERT (0, block, "we're not shifting correctly");
1965 empty_block_arr [dest] = block;
1967 * This is not strictly necessary, but we're
1970 empty_block_arr [i] = NULL;
1979 SGEN_ASSERT (0, first >= 0 && d > first, "algorithm is wrong");
1981 if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
1986 if (d + 1 - first == num_blocks) {
1988 * We found num_blocks contiguous blocks. Free them
1989 * and null their array entries. As an optimization
1990 * we could, instead of nulling the entries, shift
1991 * the following entries over to the left, while
1995 sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
1996 for (j = first; j <= d; ++j)
1997 empty_block_arr [j] = NULL;
2001 num_empty_blocks -= num_blocks;
2003 stat_major_blocks_freed += num_blocks;
2004 if (num_blocks == MS_BLOCK_ALLOC_NUM)
2005 stat_major_blocks_freed_ideal += num_blocks;
2007 stat_major_blocks_freed_less_ideal += num_blocks;
2012 SGEN_ASSERT (0, dest <= i && dest <= arr_length, "array length is off");
2014 SGEN_ASSERT (0, arr_length == num_empty_blocks, "array length is off");
2019 /* rebuild empty_blocks free list */
2020 rebuild_next = (void**)&empty_blocks;
2021 for (i = 0; i < arr_length; ++i) {
2022 void *block = empty_block_arr [i];
2023 SGEN_ASSERT (0, block, "we're missing blocks");
2024 *rebuild_next = block;
2025 rebuild_next = (void**)block;
2027 *rebuild_next = NULL;
2030 sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
2033 SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
2037 * This is our threshold. If there's not more empty than used blocks, we won't
2038 * release uncontiguous blocks, in fear of fragmenting the address space.
2040 if (num_empty_blocks <= num_major_sections)
2044 while (num_empty_blocks > section_reserve) {
2045 void *next = *(void**)empty_blocks;
2046 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
2047 empty_blocks = next;
2049 * Needs not be atomic because this is running
2054 ++stat_major_blocks_freed;
2055 #if SIZEOF_VOID_P != 8
2056 ++stat_major_blocks_freed_individual;
2063 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
2067 FOREACH_BLOCK (block) {
2068 block->pin_queue_start = sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
2069 &block->pin_queue_num_entries);
2070 } END_FOREACH_BLOCK;
2074 major_pin_objects (SgenGrayQueue *queue)
2078 FOREACH_BLOCK (block) {
2079 mark_pinned_objects_in_block (block, queue);
2080 } END_FOREACH_BLOCK;
2084 major_init_to_space (void)
2089 major_report_pinned_memory_usage (void)
2091 g_assert_not_reached ();
2095 major_get_used_size (void)
2100 FOREACH_BLOCK (block) {
2101 int count = MS_BLOCK_FREE / block->obj_size;
2103 size += count * block->obj_size;
2104 for (iter = block->free_list; iter; iter = (void**)*iter)
2105 size -= block->obj_size;
2106 } END_FOREACH_BLOCK;
2112 get_num_major_sections (void)
2114 return num_major_sections;
2118 major_handle_gc_param (const char *opt)
2121 if (g_str_has_prefix (opt, "major-heap-size=")) {
2122 const char *arg = strchr (opt, '=') + 1;
2124 if (!mono_gc_parse_environment_string_extract_number (arg, &size))
2126 ms_heap_num_blocks = (size + MS_BLOCK_SIZE - 1) / MS_BLOCK_SIZE;
2127 g_assert (ms_heap_num_blocks > 0);
2131 if (g_str_has_prefix (opt, "evacuation-threshold=")) {
2132 const char *arg = strchr (opt, '=') + 1;
2133 int percentage = atoi (arg);
2134 if (percentage < 0 || percentage > 100) {
2135 fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
2138 evacuation_threshold = (float)percentage / 100.0f;
2140 } else if (!strcmp (opt, "lazy-sweep")) {
2143 } else if (!strcmp (opt, "no-lazy-sweep")) {
2152 major_print_gc_param_usage (void)
2157 " major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
2159 " evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
2160 " (no-)lazy-sweep\n"
2165 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
2169 FOREACH_BLOCK (block) {
2170 if (block->has_references)
2171 callback ((mword)block->block, MS_BLOCK_SIZE);
2172 } END_FOREACH_BLOCK;
2175 #ifdef HEAVY_STATISTICS
2176 extern long long marked_cards;
2177 extern long long scanned_cards;
2178 extern long long scanned_objects;
2179 extern long long remarked_cards;
2182 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
2184 * MS blocks are 16K aligned.
2185 * Cardtables are 4K aligned, at least.
2186 * This means that the cardtable of a given block is 32 bytes aligned.
2189 initial_skip_card (guint8 *card_data)
2191 mword *cards = (mword*)card_data;
2194 for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
2200 if (i == CARD_WORDS_PER_BLOCK)
2201 return card_data + CARDS_PER_BLOCK;
2203 #if defined(__i386__) && defined(__GNUC__)
2204 return card_data + i * 4 + (__builtin_ffs (card) - 1) / 8;
2205 #elif defined(__x86_64__) && defined(__GNUC__)
2206 return card_data + i * 8 + (__builtin_ffsll (card) - 1) / 8;
2207 #elif defined(__s390x__) && defined(__GNUC__)
2208 return card_data + i * 8 + (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
2210 for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
2212 return &card_data [i];
2219 static G_GNUC_UNUSED guint8*
2220 skip_card (guint8 *card_data, guint8 *card_data_end)
2222 while (card_data < card_data_end && !*card_data)
2227 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
2228 #define MS_BLOCK_OBJ_FAST(b,os,i) ((b) + MS_BLOCK_SKIP + (os) * (i))
2229 #define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
2232 major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
2235 ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
2237 #ifdef SGEN_HAVE_CONCURRENT_MARK
2238 if (!concurrent_mark)
2239 g_assert (!mod_union);
2241 g_assert (!mod_union);
2244 FOREACH_BLOCK (block) {
2248 if (!block->has_references)
2251 block_obj_size = block->obj_size;
2252 block_start = block->block;
2254 if (block_obj_size >= CARD_SIZE_IN_BYTES) {
2256 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
2257 guint8 cards_data [CARDS_PER_BLOCK];
2259 char *obj, *end, *base;
2262 #ifdef SGEN_HAVE_CONCURRENT_MARK
2263 cards = block->cardtable_mod_union;
2265 * This happens when the nursery
2266 * collection that precedes finishing
2267 * the concurrent collection allocates
2274 /*We can avoid the extra copy since the remark cardtable was cleaned before */
2275 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
2276 cards = sgen_card_table_get_card_scan_address ((mword)block_start);
2279 if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
2284 obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
2285 end = block_start + MS_BLOCK_SIZE;
2286 base = sgen_card_table_align_pointer (obj);
2292 sweep_block (block, FALSE);
2294 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
2298 /* FIXME: do this more efficiently */
2300 MS_CALC_MARK_BIT (w, b, obj);
2301 if (!MS_MARK_BIT (block, w, b))
2305 card_offset = (obj - base) >> CARD_BITS;
2306 sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, mod_union, queue);
2309 obj += block_obj_size;
2312 guint8 *card_data, *card_base;
2313 guint8 *card_data_end;
2316 * This is safe in face of card aliasing for the following reason:
2318 * Major blocks are 16k aligned, or 32 cards aligned.
2319 * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
2320 * sizes, they won't overflow the cardtable overlap modulus.
2323 #ifdef SGEN_HAVE_CONCURRENT_MARK
2324 card_data = card_base = block->cardtable_mod_union;
2326 * This happens when the nursery
2327 * collection that precedes finishing
2328 * the concurrent collection allocates
2334 g_assert_not_reached ();
2338 card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
2340 card_data_end = card_data + CARDS_PER_BLOCK;
2342 for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
2344 size_t idx = card_data - card_base;
2345 char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
2346 char *end = start + CARD_SIZE_IN_BYTES;
2347 char *first_obj, *obj;
2349 HEAVY_STAT (++scanned_cards);
2355 sweep_block (block, FALSE);
2357 HEAVY_STAT (++marked_cards);
2359 sgen_card_table_prepare_card_for_scanning (card_data);
2364 index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
2366 obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
2368 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
2372 /* FIXME: do this more efficiently */
2374 MS_CALC_MARK_BIT (w, b, obj);
2375 if (!MS_MARK_BIT (block, w, b))
2379 HEAVY_STAT (++scanned_objects);
2380 scan_func (obj, queue);
2382 obj += block_obj_size;
2384 HEAVY_STAT (if (*card_data) ++remarked_cards);
2385 binary_protocol_card_scan (first_obj, obj - first_obj);
2388 } END_FOREACH_BLOCK;
2392 major_count_cards (long long *num_total_cards, long long *num_marked_cards)
2395 long long total_cards = 0;
2396 long long marked_cards = 0;
2398 FOREACH_BLOCK (block) {
2399 guint8 *cards = sgen_card_table_get_card_scan_address ((mword) block->block);
2402 if (!block->has_references)
2405 total_cards += CARDS_PER_BLOCK;
2406 for (i = 0; i < CARDS_PER_BLOCK; ++i) {
2410 } END_FOREACH_BLOCK;
2412 *num_total_cards = total_cards;
2413 *num_marked_cards = marked_cards;
2416 #ifdef SGEN_HAVE_CONCURRENT_MARK
2418 update_cardtable_mod_union (void)
2422 FOREACH_BLOCK (block) {
2425 block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
2426 block->block, MS_BLOCK_SIZE, &num_cards);
2428 SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
2429 } END_FOREACH_BLOCK;
2433 major_get_cardtable_mod_union_for_object (char *obj)
2435 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
2436 return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (block->block)) >> CARD_BITS];
2441 alloc_free_block_lists (MSBlockInfo ***lists)
2444 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
2445 lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2448 #ifdef SGEN_PARALLEL_MARK
2450 major_alloc_worker_data (void)
2452 /* FIXME: free this when the workers come down */
2453 MSBlockInfo ***lists = malloc (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX);
2454 alloc_free_block_lists (lists);
2459 major_init_worker_thread (void *data)
2461 MSBlockInfo ***lists = data;
2464 g_assert (lists && lists != free_block_lists);
2465 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
2467 for (j = 0; j < num_block_obj_sizes; ++j)
2468 g_assert (!lists [i][j]);
2471 #ifdef HAVE_KW_THREAD
2472 workers_free_block_lists = data;
2474 mono_native_tls_set_value (workers_free_block_lists_key, data);
2479 major_reset_worker_data (void *data)
2481 MSBlockInfo ***lists = data;
2483 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
2485 for (j = 0; j < num_block_obj_sizes; ++j)
2486 lists [i][j] = NULL;
2491 #undef pthread_create
2494 post_param_init (SgenMajorCollector *collector)
2496 collector->sweeps_lazily = lazy_sweep;
2499 #ifdef SGEN_HAVE_CONCURRENT_MARK
2501 sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
2502 #else // SGEN_HAVE_CONCURRENT_MARK
2503 #ifdef SGEN_PARALLEL_MARK
2506 sgen_marksweep_fixed_par_init (SgenMajorCollector *collector)
2509 sgen_marksweep_par_init (SgenMajorCollector *collector)
2510 #endif // FIXED_HEAP
2511 #else // SGEN_PARALLEL_MARK
2514 sgen_marksweep_fixed_init (SgenMajorCollector *collector)
2516 #error unknown configuration
2517 #endif // FIXED_HEAP
2518 #endif // SGEN_PARALLEL_MARK
2519 #endif // SGEN_HAVE_CONCURRENT_MARK
2524 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
2527 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
2528 block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2529 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
2531 evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2532 for (i = 0; i < num_block_obj_sizes; ++i)
2533 evacuate_block_obj_sizes [i] = FALSE;
2538 g_print ("block object sizes:\n");
2539 for (i = 0; i < num_block_obj_sizes; ++i)
2540 g_print ("%d\n", block_obj_sizes [i]);
2544 alloc_free_block_lists (free_block_lists);
2546 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
2547 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
2548 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
2549 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
2551 #ifdef SGEN_PARALLEL_MARK
2552 LOCK_INIT (ms_block_list_mutex);
2555 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
2556 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
2557 mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
2558 mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
2559 #if SIZEOF_VOID_P != 8
2560 mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
2561 mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
2562 mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
2563 mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
2566 #ifdef SGEN_PARALLEL_MARK
2567 #ifndef HAVE_KW_THREAD
2568 mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
2572 collector->section_size = MAJOR_SECTION_SIZE;
2573 #ifdef SGEN_PARALLEL_MARK
2574 collector->is_parallel = TRUE;
2575 collector->alloc_worker_data = major_alloc_worker_data;
2576 collector->init_worker_thread = major_init_worker_thread;
2577 collector->reset_worker_data = major_reset_worker_data;
2579 collector->is_parallel = FALSE;
2581 #ifdef SGEN_HAVE_CONCURRENT_MARK
2582 concurrent_mark = is_concurrent;
2583 if (is_concurrent) {
2584 collector->is_concurrent = TRUE;
2585 collector->want_synchronous_collection = &want_evacuation;
2586 collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
2590 collector->is_concurrent = FALSE;
2591 collector->want_synchronous_collection = NULL;
2593 collector->supports_cardtable = TRUE;
2595 collector->have_swept = &have_swept;
2597 collector->alloc_heap = major_alloc_heap;
2598 collector->is_object_live = major_is_object_live;
2599 collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2600 collector->alloc_degraded = major_alloc_degraded;
2602 collector->alloc_object = major_alloc_object;
2603 #ifdef SGEN_PARALLEL_MARK
2604 collector->par_alloc_object = major_par_alloc_object;
2606 collector->free_pinned_object = free_pinned_object;
2607 collector->iterate_objects = major_iterate_objects;
2608 collector->free_non_pinned_object = major_free_non_pinned_object;
2609 collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
2610 collector->pin_objects = major_pin_objects;
2611 collector->pin_major_object = pin_major_object;
2612 collector->scan_card_table = major_scan_card_table;
2613 collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
2614 #ifdef SGEN_HAVE_CONCURRENT_MARK
2615 if (is_concurrent) {
2616 collector->update_cardtable_mod_union = update_cardtable_mod_union;
2617 collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_object;
2620 collector->init_to_space = major_init_to_space;
2621 collector->sweep = major_sweep;
2622 collector->check_scan_starts = major_check_scan_starts;
2623 collector->dump_heap = major_dump_heap;
2624 collector->get_used_size = major_get_used_size;
2625 collector->start_nursery_collection = major_start_nursery_collection;
2626 collector->finish_nursery_collection = major_finish_nursery_collection;
2627 collector->start_major_collection = major_start_major_collection;
2628 collector->finish_major_collection = major_finish_major_collection;
2629 collector->have_computed_minor_collection_allowance = major_have_computer_minor_collection_allowance;
2630 collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2631 collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
2632 collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2633 collector->get_num_major_sections = get_num_major_sections;
2634 collector->handle_gc_param = major_handle_gc_param;
2635 collector->print_gc_param_usage = major_print_gc_param_usage;
2636 collector->post_param_init = post_param_init;
2637 collector->is_valid_object = major_is_valid_object;
2638 collector->describe_pointer = major_describe_pointer;
2639 collector->count_cards = major_count_cards;
2641 collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
2642 collector->major_ops.scan_object = major_scan_object;
2643 #ifdef SGEN_HAVE_CONCURRENT_MARK
2644 if (is_concurrent) {
2645 collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
2646 collector->major_concurrent_ops.scan_object = major_scan_object_concurrent;
2647 collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
2651 /*cardtable requires major pages to be 8 cards aligned*/
2652 g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2655 #ifdef SGEN_HAVE_CONCURRENT_MARK
2657 sgen_marksweep_init (SgenMajorCollector *collector)
2659 sgen_marksweep_init_internal (collector, FALSE);
2663 sgen_marksweep_conc_init (SgenMajorCollector *collector)
2665 sgen_marksweep_init_internal (collector, TRUE);