2 * sgen-marksweep.c: The Mark & Sweep major collector.
5 * Mark Probst <mark.probst@gmail.com>
7 * Copyright 2009-2010 Novell, Inc.
8 * Copyright (C) 2012 Xamarin Inc
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Library General Public
12 * License 2.0 as published by the Free Software Foundation;
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License 2.0 along with this library; if not, write to the Free
21 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "utils/mono-counters.h"
32 #include "utils/mono-semaphore.h"
33 #include "utils/mono-time.h"
34 #include "metadata/object-internals.h"
35 #include "metadata/profiler-private.h"
37 #include "metadata/sgen-gc.h"
38 #include "metadata/sgen-protocol.h"
39 #include "metadata/sgen-cardtable.h"
40 #include "metadata/sgen-memory-governor.h"
41 #include "metadata/sgen-layout-stats.h"
42 #include "metadata/gc-internal.h"
43 #include "metadata/sgen-pointer-queue.h"
44 #include "metadata/sgen-pinning.h"
45 #include "metadata/sgen-workers.h"
47 #define SGEN_HAVE_CONCURRENT_MARK
49 #if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
50 #define MS_BLOCK_SIZE ARCH_MIN_MS_BLOCK_SIZE
51 #define MS_BLOCK_SIZE_SHIFT ARCH_MIN_MS_BLOCK_SIZE_SHIFT
53 #define MS_BLOCK_SIZE_SHIFT 14 /* INT FASTENABLE */
54 #define MS_BLOCK_SIZE (1 << MS_BLOCK_SIZE_SHIFT)
56 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
57 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
60 * Don't allocate single blocks, but alloc a contingent of this many
61 * blocks in one swoop. This must be a power of two.
63 #define MS_BLOCK_ALLOC_NUM 32
66 * Number of bytes before the first object in a block. At the start
67 * of a block is the MSBlockHeader, then opional padding, then come
68 * the objects, so this must be >= sizeof (MSBlockHeader).
70 #define MS_BLOCK_SKIP ((sizeof (MSBlockHeader) + 15) & ~15)
72 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
74 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
76 typedef struct _MSBlockInfo MSBlockInfo;
80 unsigned int pinned : 1;
81 unsigned int has_references : 1;
82 unsigned int has_pinned : 1; /* means cannot evacuate */
83 unsigned int is_to_space : 1;
84 unsigned int swept : 1;
86 MSBlockInfo *next_free;
87 size_t pin_queue_first_entry;
88 size_t pin_queue_last_entry;
89 #ifdef SGEN_HAVE_CONCURRENT_MARK
90 guint8 *cardtable_mod_union;
92 mword mark_words [MS_NUM_MARK_WORDS];
95 #define MS_BLOCK_FOR_BLOCK_INFO(b) ((char*)(b))
97 #define MS_BLOCK_OBJ(b,i) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
98 #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
99 #define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
105 #define MS_BLOCK_FOR_OBJ(o) (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
107 /* object index will always be small */
108 #define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
110 //casting to int is fine since blocks are 32k
111 #define MS_CALC_MARK_BIT(w,b,o) do { \
112 int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
113 if (sizeof (mword) == 4) { \
122 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (ONE_P << (b)))
123 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
125 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
127 #define MS_BLOCK_OBJ_SIZE_FACTOR (pow (2.0, 1.0 / 3))
130 * This way we can lookup block object size indexes for sizes up to
131 * 256 bytes with a single load.
133 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
135 static int *block_obj_sizes;
136 static int num_block_obj_sizes;
137 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
139 #define MS_BLOCK_FLAG_PINNED 1
140 #define MS_BLOCK_FLAG_REFS 2
142 #define MS_BLOCK_TYPE_MAX 4
144 static gboolean *evacuate_block_obj_sizes;
145 static float evacuation_threshold = 0.666f;
146 #ifdef SGEN_HAVE_CONCURRENT_MARK
147 static float concurrent_evacuation_threshold = 0.666f;
148 static gboolean want_evacuation = FALSE;
151 static gboolean lazy_sweep = TRUE;
152 static gboolean have_swept;
154 #ifdef SGEN_HAVE_CONCURRENT_MARK
155 static gboolean concurrent_mark;
158 #define BLOCK_IS_TAGGED_HAS_REFERENCES(bl) SGEN_POINTER_IS_TAGGED_1 ((bl))
159 #define BLOCK_TAG_HAS_REFERENCES(bl) SGEN_POINTER_TAG_1 ((bl))
160 #define BLOCK_UNTAG_HAS_REFERENCES(bl) SGEN_POINTER_UNTAG_1 ((bl))
162 #define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
164 /* all allocated blocks in the system */
165 static SgenPointerQueue allocated_blocks;
167 /* non-allocated block free-list */
168 static void *empty_blocks = NULL;
169 static size_t num_empty_blocks = 0;
171 #define FOREACH_BLOCK(bl) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = BLOCK_UNTAG_HAS_REFERENCES (allocated_blocks.data [__index]);
172 #define FOREACH_BLOCK_HAS_REFERENCES(bl,hr) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = allocated_blocks.data [__index]; (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); (bl) = BLOCK_UNTAG_HAS_REFERENCES ((bl));
173 #define END_FOREACH_BLOCK } }
174 #define DELETE_BLOCK_IN_FOREACH() (allocated_blocks.data [__index] = NULL)
176 static size_t num_major_sections = 0;
177 /* one free block list for each block object size */
178 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
180 static guint64 stat_major_blocks_alloced = 0;
181 static guint64 stat_major_blocks_freed = 0;
182 static guint64 stat_major_blocks_lazy_swept = 0;
183 static guint64 stat_major_objects_evacuated = 0;
185 #if SIZEOF_VOID_P != 8
186 static guint64 stat_major_blocks_freed_ideal = 0;
187 static guint64 stat_major_blocks_freed_less_ideal = 0;
188 static guint64 stat_major_blocks_freed_individual = 0;
189 static guint64 stat_major_blocks_alloced_less_ideal = 0;
192 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
193 static guint64 num_major_objects_marked = 0;
194 #define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
196 #define INC_NUM_MAJOR_OBJECTS_MARKED()
199 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
200 static mono_mutex_t scanned_objects_list_lock;
201 static SgenPointerQueue scanned_objects_list;
204 add_scanned_object (void *ptr)
206 if (!binary_protocol_is_enabled ())
209 mono_mutex_lock (&scanned_objects_list_lock);
210 sgen_pointer_queue_add (&scanned_objects_list, ptr);
211 mono_mutex_unlock (&scanned_objects_list_lock);
216 sweep_block (MSBlockInfo *block, gboolean during_major_collection);
219 ms_find_block_obj_size_index (size_t size)
222 SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %d is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
223 for (i = 0; i < num_block_obj_sizes; ++i)
224 if (block_obj_sizes [i] >= size)
226 g_error ("no object of size %d\n", size);
229 #define FREE_BLOCKS_FROM(lists,p,r) (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
230 #define FREE_BLOCKS(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
232 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
233 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
234 fast_block_obj_size_indexes [((s)+7)>>3] : \
235 ms_find_block_obj_size_index ((s)))
238 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
242 start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
244 start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
250 update_heap_boundaries_for_block (MSBlockInfo *block)
252 sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
256 ms_get_empty_block (void)
260 void *block, *empty, *next;
265 * We try allocating MS_BLOCK_ALLOC_NUM blocks first. If that's
266 * unsuccessful, we halve the number of blocks and try again, until we're at
267 * 1. If that doesn't work, either, we assert.
269 int alloc_num = MS_BLOCK_ALLOC_NUM;
271 p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
272 alloc_num == 1 ? "major heap section" : NULL);
278 for (i = 0; i < alloc_num; ++i) {
281 * We do the free list update one after the
282 * other so that other threads can use the new
283 * blocks as quickly as possible.
286 empty = empty_blocks;
287 *(void**)block = empty;
288 } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
292 SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
294 stat_major_blocks_alloced += alloc_num;
295 #if SIZEOF_VOID_P != 8
296 if (alloc_num != MS_BLOCK_ALLOC_NUM)
297 stat_major_blocks_alloced_less_ideal += alloc_num;
302 empty = empty_blocks;
306 next = *(void**)block;
307 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
309 SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
311 *(void**)block = NULL;
313 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
319 ms_free_block (void *block)
323 sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
324 memset (block, 0, MS_BLOCK_SIZE);
327 empty = empty_blocks;
328 *(void**)block = empty;
329 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
331 SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
334 //#define MARKSWEEP_CONSISTENCY_CHECK
336 #ifdef MARKSWEEP_CONSISTENCY_CHECK
338 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
342 for (; block; block = block->next_free) {
343 g_assert (block->obj_size == size);
344 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
346 /* blocks in the free lists must have at least
349 g_assert (block->free_list);
351 /* the block must be in the allocated_blocks array */
352 g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
357 check_empty_blocks (void)
361 for (p = empty_blocks; p; p = *(void**)p)
363 g_assert (i == num_empty_blocks);
367 consistency_check (void)
372 /* check all blocks */
373 FOREACH_BLOCK (block) {
374 int count = MS_BLOCK_FREE / block->obj_size;
378 /* check block header */
379 g_assert (((MSBlockHeader*)block->block)->info == block);
381 /* count number of free slots */
382 for (i = 0; i < count; ++i) {
383 void **obj = (void**) MS_BLOCK_OBJ (block, i);
384 if (!MS_OBJ_ALLOCED (obj, block))
388 /* check free list */
389 for (free = block->free_list; free; free = (void**)*free) {
390 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
393 g_assert (num_free == 0);
395 /* check all mark words are zero */
397 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
398 g_assert (block->mark_words [i] == 0);
402 /* check free blocks */
403 for (i = 0; i < num_block_obj_sizes; ++i) {
405 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
406 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
409 check_empty_blocks ();
414 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
416 int size = block_obj_sizes [size_index];
417 int count = MS_BLOCK_FREE / size;
419 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
423 if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
426 info = (MSBlockInfo*)ms_get_empty_block ();
428 SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
430 info->obj_size = size;
431 info->obj_size_index = size_index;
432 info->pinned = pinned;
433 info->has_references = has_references;
434 info->has_pinned = pinned;
436 * Blocks that are to-space are not evacuated from. During an major collection
437 * blocks are allocated for two reasons: evacuating objects from the nursery and
438 * evacuating them from major blocks marked for evacuation. In both cases we don't
439 * want further evacuation.
441 info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
443 #ifdef SGEN_HAVE_CONCURRENT_MARK
444 info->cardtable_mod_union = NULL;
447 update_heap_boundaries_for_block (info);
449 /* build free list */
450 obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
451 info->free_list = (void**)obj_start;
452 /* we're skipping the last one - it must be nulled */
453 for (i = 0; i < count - 1; ++i) {
454 char *next_obj_start = obj_start + size;
455 *(void**)obj_start = next_obj_start;
456 obj_start = next_obj_start;
459 *(void**)obj_start = NULL;
461 info->next_free = free_blocks [size_index];
462 free_blocks [size_index] = info;
464 sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
466 ++num_major_sections;
471 obj_is_from_pinned_alloc (char *ptr)
475 FOREACH_BLOCK (block) {
476 if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
477 return block->pinned;
483 unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
488 block = free_blocks [size_index];
489 SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
491 if (G_UNLIKELY (!block->swept)) {
492 stat_major_blocks_lazy_swept ++;
493 sweep_block (block, FALSE);
496 obj = block->free_list;
497 SGEN_ASSERT (9, obj, "block %p in free list had no available object to alloc from", block);
499 block->free_list = *(void**)obj;
500 if (!block->free_list) {
501 free_blocks [size_index] = block->next_free;
502 block->next_free = NULL;
509 alloc_obj (MonoVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
511 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
512 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
515 if (!free_blocks [size_index]) {
516 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
520 obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
522 *(MonoVTable**)obj = vtable;
528 major_alloc_object (MonoVTable *vtable, size_t size, gboolean has_references)
530 return alloc_obj (vtable, size, FALSE, has_references);
534 * We're not freeing the block if it's empty. We leave that work for
535 * the next major collection.
537 * This is just called from the domain clearing code, which runs in a
538 * single thread and has the GC lock, so we don't need an extra lock.
541 free_object (char *obj, size_t size, gboolean pinned)
543 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
547 sweep_block (block, FALSE);
548 SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
549 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
550 MS_CALC_MARK_BIT (word, bit, obj);
551 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set");
552 if (!block->free_list) {
553 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
554 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
555 SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks");
556 block->next_free = free_blocks [size_index];
557 free_blocks [size_index] = block;
559 memset (obj, 0, size);
560 *(void**)obj = block->free_list;
561 block->free_list = (void**)obj;
565 major_free_non_pinned_object (char *obj, size_t size)
567 free_object (obj, size, FALSE);
570 /* size is a multiple of SGEN_ALLOC_ALIGN */
572 major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
576 res = alloc_obj (vtable, size, TRUE, has_references);
577 /*If we failed to alloc memory, we better try releasing memory
578 *as pinned alloc is requested by the runtime.
581 sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
582 res = alloc_obj (vtable, size, TRUE, has_references);
588 free_pinned_object (char *obj, size_t size)
590 free_object (obj, size, TRUE);
594 * size is already rounded up and we hold the GC lock.
597 major_alloc_degraded (MonoVTable *vtable, size_t size)
600 size_t old_num_sections;
602 old_num_sections = num_major_sections;
604 obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
605 if (G_LIKELY (obj)) {
606 HEAVY_STAT (++stat_objects_alloced_degraded);
607 HEAVY_STAT (stat_bytes_alloced_degraded += size);
608 g_assert (num_major_sections >= old_num_sections);
609 sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
615 * obj is some object. If it's not in the major heap (i.e. if it's in
616 * the nursery or LOS), return FALSE. Otherwise return whether it's
617 * been marked or copied.
620 major_is_object_live (char *obj)
626 if (sgen_ptr_in_nursery (obj))
629 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
632 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
635 /* now we know it's in a major block */
636 block = MS_BLOCK_FOR_OBJ (obj);
637 SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?");
638 MS_CALC_MARK_BIT (word, bit, obj);
639 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
643 major_ptr_is_in_non_pinned_space (char *ptr, char **start)
647 FOREACH_BLOCK (block) {
648 if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
649 int count = MS_BLOCK_FREE / block->obj_size;
653 for (i = 0; i <= count; ++i) {
654 if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
655 *start = MS_BLOCK_OBJ (block, i);
659 return !block->pinned;
666 major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
668 gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
669 gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
670 gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
673 FOREACH_BLOCK (block) {
674 int count = MS_BLOCK_FREE / block->obj_size;
677 if (block->pinned && !pinned)
679 if (!block->pinned && !non_pinned)
681 if (sweep && lazy_sweep) {
682 sweep_block (block, FALSE);
683 SGEN_ASSERT (0, block->swept, "Block must be swept after sweeping");
686 for (i = 0; i < count; ++i) {
687 void **obj = (void**) MS_BLOCK_OBJ (block, i);
690 MS_CALC_MARK_BIT (word, bit, obj);
691 if (!MS_MARK_BIT (block, word, bit))
694 if (MS_OBJ_ALLOCED (obj, block))
695 callback ((char*)obj, block->obj_size, data);
701 major_is_valid_object (char *object)
705 FOREACH_BLOCK (block) {
709 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
712 idx = MS_BLOCK_OBJ_INDEX (object, block);
713 obj = (char*)MS_BLOCK_OBJ (block, idx);
716 return MS_OBJ_ALLOCED (obj, block);
724 major_describe_pointer (char *ptr)
728 FOREACH_BLOCK (block) {
736 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
739 SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
740 MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
742 idx = MS_BLOCK_OBJ_INDEX (ptr, block);
743 obj = (char*)MS_BLOCK_OBJ (block, idx);
744 live = MS_OBJ_ALLOCED (obj, block);
745 vtable = live ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
747 MS_CALC_MARK_BIT (w, b, obj);
748 marked = MS_MARK_BIT (block, w, b);
753 SGEN_LOG (0, "object");
755 SGEN_LOG (0, "dead-object");
758 SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
760 SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
763 SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
772 major_check_scan_starts (void)
777 major_dump_heap (FILE *heap_dump_file)
780 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
781 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
784 for (i = 0; i < num_block_obj_sizes; ++i)
785 slots_available [i] = slots_used [i] = 0;
787 FOREACH_BLOCK (block) {
788 int index = ms_find_block_obj_size_index (block->obj_size);
789 int count = MS_BLOCK_FREE / block->obj_size;
791 slots_available [index] += count;
792 for (i = 0; i < count; ++i) {
793 if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
794 ++slots_used [index];
798 fprintf (heap_dump_file, "<occupancies>\n");
799 for (i = 0; i < num_block_obj_sizes; ++i) {
800 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
801 block_obj_sizes [i], slots_available [i], slots_used [i]);
803 fprintf (heap_dump_file, "</occupancies>\n");
805 FOREACH_BLOCK (block) {
806 int count = MS_BLOCK_FREE / block->obj_size;
810 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
812 for (i = 0; i <= count; ++i) {
813 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
818 sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
824 fprintf (heap_dump_file, "</section>\n");
828 #define LOAD_VTABLE SGEN_LOAD_VTABLE
830 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do { \
832 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
833 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
834 MS_SET_MARK_BIT ((block), __word, __bit); \
835 if (sgen_gc_descr_has_references (desc)) \
836 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
837 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
838 INC_NUM_MAJOR_OBJECTS_MARKED (); \
841 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
843 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
844 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
845 if (!MS_MARK_BIT ((block), __word, __bit)) { \
846 MS_SET_MARK_BIT ((block), __word, __bit); \
847 if (sgen_gc_descr_has_references (desc)) \
848 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
849 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
850 INC_NUM_MAJOR_OBJECTS_MARKED (); \
855 pin_major_object (char *obj, SgenGrayQueue *queue)
859 #ifdef SGEN_HAVE_CONCURRENT_MARK
861 g_assert_not_reached ();
864 block = MS_BLOCK_FOR_OBJ (obj);
865 block->has_pinned = TRUE;
866 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
869 #include "sgen-major-copy-object.h"
871 #ifdef SGEN_HAVE_CONCURRENT_MARK
873 major_copy_or_mark_object_with_evacuation_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
875 SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
876 SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_is_worker_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
878 g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
880 if (!sgen_ptr_in_nursery (obj)) {
883 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
885 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
886 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
887 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
889 if (sgen_los_object_is_pinned (obj))
893 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
894 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
895 MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
899 sgen_los_pin_object (obj);
900 if (SGEN_OBJECT_HAS_REFERENCES (obj))
901 GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
902 INC_NUM_MAJOR_OBJECTS_MARKED ();
909 major_get_and_reset_num_major_objects_marked (void)
911 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
912 long long num = num_major_objects_marked;
913 num_major_objects_marked = 0;
920 #ifdef HEAVY_STATISTICS
921 static guint64 stat_optimized_copy;
922 static guint64 stat_optimized_copy_nursery;
923 static guint64 stat_optimized_copy_nursery_forwarded;
924 static guint64 stat_optimized_copy_nursery_pinned;
925 static guint64 stat_optimized_copy_major;
926 static guint64 stat_optimized_copy_major_small_fast;
927 static guint64 stat_optimized_copy_major_small_slow;
928 static guint64 stat_optimized_copy_major_large;
929 static guint64 stat_optimized_copy_major_forwarded;
930 static guint64 stat_optimized_copy_major_small_evacuate;
931 static guint64 stat_optimized_major_scan;
932 static guint64 stat_optimized_major_scan_no_refs;
934 static guint64 stat_drain_prefetch_fills;
935 static guint64 stat_drain_prefetch_fill_failures;
936 static guint64 stat_drain_loops;
939 static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
941 #define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_no_evacuation
942 #define SCAN_OBJECT_FUNCTION_NAME major_scan_object_no_evacuation
943 #define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
944 #include "sgen-marksweep-drain-gray-stack.h"
946 #define COPY_OR_MARK_WITH_EVACUATION
947 #define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_with_evacuation
948 #define SCAN_OBJECT_FUNCTION_NAME major_scan_object_with_evacuation
949 #define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
950 #include "sgen-marksweep-drain-gray-stack.h"
953 drain_gray_stack (ScanCopyContext ctx)
955 gboolean evacuation = FALSE;
957 for (i = 0; i < num_block_obj_sizes; ++i) {
958 if (evacuate_block_obj_sizes [i]) {
965 return drain_gray_stack_with_evacuation (ctx);
967 return drain_gray_stack_no_evacuation (ctx);
970 #ifdef SGEN_HAVE_CONCURRENT_MARK
971 #include "sgen-marksweep-scan-object-concurrent.h"
975 major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
977 major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
980 #ifdef SGEN_HAVE_CONCURRENT_MARK
982 major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
984 major_copy_or_mark_object_with_evacuation_concurrent (ptr, *ptr, queue);
989 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
994 if (block->pin_queue_first_entry == block->pin_queue_last_entry)
997 block->has_pinned = TRUE;
999 entry = sgen_pinning_get_entry (block->pin_queue_first_entry);
1000 end = sgen_pinning_get_entry (block->pin_queue_last_entry);
1002 for (; entry < end; ++entry) {
1003 int index = MS_BLOCK_OBJ_INDEX (*entry, block);
1005 SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, MS_BLOCK_FREE / block->obj_size);
1006 if (index == last_index)
1008 obj = MS_BLOCK_OBJ (block, index);
1009 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
1015 sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
1019 for (obj_index = 0; obj_index < count; ++obj_index) {
1021 void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
1023 MS_CALC_MARK_BIT (word, bit, obj);
1024 if (MS_MARK_BIT (block, word, bit)) {
1025 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
1027 /* an unmarked object */
1028 if (MS_OBJ_ALLOCED (obj, block)) {
1030 * FIXME: Merge consecutive
1031 * slots for lower reporting
1032 * overhead. Maybe memset
1033 * will also benefit?
1035 binary_protocol_empty (obj, obj_size);
1036 MONO_GC_MAJOR_SWEPT ((mword)obj, obj_size);
1037 memset (obj, 0, obj_size);
1039 *(void**)obj = block->free_list;
1040 block->free_list = obj;
1048 * Traverse BLOCK, freeing and zeroing unused objects.
1051 sweep_block (MSBlockInfo *block, gboolean during_major_collection)
1054 void *reversed = NULL;
1056 if (!during_major_collection)
1057 g_assert (!sgen_concurrent_collection_in_progress ());
1062 count = MS_BLOCK_FREE / block->obj_size;
1064 block->free_list = NULL;
1066 /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
1067 // FIXME: Add more sizes
1068 switch (block->obj_size) {
1070 sweep_block_for_size (block, count, 16);
1073 sweep_block_for_size (block, count, block->obj_size);
1077 /* reset mark bits */
1078 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
1080 /* Reverse free list so that it's in address order */
1082 while (block->free_list) {
1083 void *next = *(void**)block->free_list;
1084 *(void**)block->free_list = reversed;
1085 reversed = block->free_list;
1086 block->free_list = next;
1088 block->free_list = reversed;
1099 if (sizeof (mword) == sizeof (unsigned long))
1100 count += __builtin_popcountl (d);
1102 count += __builtin_popcount (d);
1118 /* statistics for evacuation */
1119 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1120 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1121 int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
1123 #ifdef SGEN_HAVE_CONCURRENT_MARK
1124 mword total_evacuate_heap = 0;
1125 mword total_evacuate_saved = 0;
1128 for (i = 0; i < num_block_obj_sizes; ++i)
1129 slots_available [i] = slots_used [i] = num_blocks [i] = 0;
1131 /* clear all the free lists */
1132 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1133 MSBlockInfo **free_blocks = free_block_lists [i];
1135 for (j = 0; j < num_block_obj_sizes; ++j)
1136 free_blocks [j] = NULL;
1139 /* traverse all blocks, free and zero unmarked objects */
1140 FOREACH_BLOCK (block) {
1142 gboolean have_live = FALSE;
1143 gboolean has_pinned;
1144 gboolean have_free = FALSE;
1148 obj_size_index = block->obj_size_index;
1150 has_pinned = block->has_pinned;
1151 block->has_pinned = block->pinned;
1153 block->is_to_space = FALSE;
1156 count = MS_BLOCK_FREE / block->obj_size;
1158 #ifdef SGEN_HAVE_CONCURRENT_MARK
1159 if (block->cardtable_mod_union) {
1160 sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
1161 block->cardtable_mod_union = NULL;
1165 /* Count marked objects in the block */
1166 for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
1167 nused += bitcount (block->mark_words [i]);
1176 sweep_block (block, TRUE);
1180 ++num_blocks [obj_size_index];
1181 slots_used [obj_size_index] += nused;
1182 slots_available [obj_size_index] += count;
1186 * If there are free slots in the block, add
1187 * the block to the corresponding free list.
1190 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1191 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1192 block->next_free = free_blocks [index];
1193 free_blocks [index] = block;
1196 update_heap_boundaries_for_block (block);
1199 * Blocks without live objects are removed from the
1200 * block list and freed.
1202 DELETE_BLOCK_IN_FOREACH ();
1204 binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
1205 ms_free_block (block);
1207 --num_major_sections;
1209 } END_FOREACH_BLOCK;
1210 sgen_pointer_queue_remove_nulls (&allocated_blocks);
1212 for (i = 0; i < num_block_obj_sizes; ++i) {
1213 float usage = (float)slots_used [i] / (float)slots_available [i];
1214 if (num_blocks [i] > 5 && usage < evacuation_threshold) {
1215 evacuate_block_obj_sizes [i] = TRUE;
1217 g_print ("slot size %d - %d of %d used\n",
1218 block_obj_sizes [i], slots_used [i], slots_available [i]);
1221 evacuate_block_obj_sizes [i] = FALSE;
1223 #ifdef SGEN_HAVE_CONCURRENT_MARK
1225 mword total_bytes = block_obj_sizes [i] * slots_available [i];
1226 total_evacuate_heap += total_bytes;
1227 if (evacuate_block_obj_sizes [i])
1228 total_evacuate_saved += total_bytes - block_obj_sizes [i] * slots_used [i];
1233 #ifdef SGEN_HAVE_CONCURRENT_MARK
1234 want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
1246 static int count_pinned_ref;
1247 static int count_pinned_nonref;
1248 static int count_nonpinned_ref;
1249 static int count_nonpinned_nonref;
1252 count_nonpinned_callback (char *obj, size_t size, void *data)
1254 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1256 if (vtable->klass->has_references)
1257 ++count_nonpinned_ref;
1259 ++count_nonpinned_nonref;
1263 count_pinned_callback (char *obj, size_t size, void *data)
1265 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1267 if (vtable->klass->has_references)
1270 ++count_pinned_nonref;
1273 static G_GNUC_UNUSED void
1274 count_ref_nonref_objs (void)
1278 count_pinned_ref = 0;
1279 count_pinned_nonref = 0;
1280 count_nonpinned_ref = 0;
1281 count_nonpinned_nonref = 0;
1283 major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
1284 major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
1286 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1288 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
1289 count_pinned_ref, count_nonpinned_ref,
1290 count_pinned_nonref, count_nonpinned_nonref,
1291 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1295 ms_calculate_block_obj_sizes (double factor, int *arr)
1297 double target_size = sizeof (MonoObject);
1302 int target_count = (int)ceil (MS_BLOCK_FREE / target_size);
1303 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1305 if (size != last_size) {
1307 arr [num_sizes] = size;
1312 target_size *= factor;
1313 } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1318 /* only valid during minor collections */
1319 static mword old_num_major_sections;
1322 major_start_nursery_collection (void)
1324 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1325 consistency_check ();
1328 old_num_major_sections = num_major_sections;
1332 major_finish_nursery_collection (void)
1334 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1335 consistency_check ();
1337 sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1341 major_start_major_collection (void)
1345 /* clear the free lists */
1346 for (i = 0; i < num_block_obj_sizes; ++i) {
1347 if (!evacuate_block_obj_sizes [i])
1350 free_block_lists [0][i] = NULL;
1351 free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
1354 // Sweep all unswept blocks
1358 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
1360 FOREACH_BLOCK (block) {
1361 sweep_block (block, TRUE);
1362 } END_FOREACH_BLOCK;
1364 MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
1369 major_finish_major_collection (ScannedObjectCounts *counts)
1371 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
1372 if (binary_protocol_is_enabled ()) {
1373 counts->num_scanned_objects = scanned_objects_list.next_slot;
1375 sgen_pointer_queue_sort_uniq (&scanned_objects_list);
1376 counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
1378 sgen_pointer_queue_clear (&scanned_objects_list);
1383 #if SIZEOF_VOID_P != 8
1385 compare_pointers (const void *va, const void *vb) {
1386 char *a = *(char**)va, *b = *(char**)vb;
1396 major_have_computer_minor_collection_allowance (void)
1398 size_t section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1400 g_assert (have_swept);
1402 #if SIZEOF_VOID_P != 8
1404 int i, num_empty_blocks_orig, num_blocks, arr_length;
1406 void **empty_block_arr;
1407 void **rebuild_next;
1411 * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
1412 * a VirtualAlloc ()-ed block.
1417 if (num_empty_blocks <= section_reserve)
1419 SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
1421 num_empty_blocks_orig = num_empty_blocks;
1422 empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
1423 INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
1424 if (!empty_block_arr)
1428 for (block = empty_blocks; block; block = *(void**)block)
1429 empty_block_arr [i++] = block;
1430 SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
1432 sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
1435 * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
1436 * contiguous ones. If we do, we free them. If that's not enough to get to
1437 * section_reserve, we halve the number of contiguous blocks we're looking
1438 * for and have another go, until we're done with looking for pairs of
1439 * blocks, at which point we give up and go to the fallback.
1441 arr_length = num_empty_blocks_orig;
1442 num_blocks = MS_BLOCK_ALLOC_NUM;
1443 while (num_empty_blocks > section_reserve && num_blocks > 1) {
1448 for (i = 0; i < arr_length; ++i) {
1450 void *block = empty_block_arr [i];
1451 SGEN_ASSERT (0, block, "we're not shifting correctly");
1453 empty_block_arr [dest] = block;
1455 * This is not strictly necessary, but we're
1458 empty_block_arr [i] = NULL;
1467 SGEN_ASSERT (0, first >= 0 && d > first, "algorithm is wrong");
1469 if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
1474 if (d + 1 - first == num_blocks) {
1476 * We found num_blocks contiguous blocks. Free them
1477 * and null their array entries. As an optimization
1478 * we could, instead of nulling the entries, shift
1479 * the following entries over to the left, while
1483 sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
1484 for (j = first; j <= d; ++j)
1485 empty_block_arr [j] = NULL;
1489 num_empty_blocks -= num_blocks;
1491 stat_major_blocks_freed += num_blocks;
1492 if (num_blocks == MS_BLOCK_ALLOC_NUM)
1493 stat_major_blocks_freed_ideal += num_blocks;
1495 stat_major_blocks_freed_less_ideal += num_blocks;
1500 SGEN_ASSERT (0, dest <= i && dest <= arr_length, "array length is off");
1502 SGEN_ASSERT (0, arr_length == num_empty_blocks, "array length is off");
1507 /* rebuild empty_blocks free list */
1508 rebuild_next = (void**)&empty_blocks;
1509 for (i = 0; i < arr_length; ++i) {
1510 void *block = empty_block_arr [i];
1511 SGEN_ASSERT (0, block, "we're missing blocks");
1512 *rebuild_next = block;
1513 rebuild_next = (void**)block;
1515 *rebuild_next = NULL;
1518 sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
1521 SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
1525 * This is our threshold. If there's not more empty than used blocks, we won't
1526 * release uncontiguous blocks, in fear of fragmenting the address space.
1528 if (num_empty_blocks <= num_major_sections)
1532 while (num_empty_blocks > section_reserve) {
1533 void *next = *(void**)empty_blocks;
1534 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
1535 empty_blocks = next;
1537 * Needs not be atomic because this is running
1542 ++stat_major_blocks_freed;
1543 #if SIZEOF_VOID_P != 8
1544 ++stat_major_blocks_freed_individual;
1550 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
1554 FOREACH_BLOCK (block) {
1555 sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
1556 &block->pin_queue_first_entry, &block->pin_queue_last_entry);
1557 } END_FOREACH_BLOCK;
1561 major_pin_objects (SgenGrayQueue *queue)
1565 FOREACH_BLOCK (block) {
1566 mark_pinned_objects_in_block (block, queue);
1567 } END_FOREACH_BLOCK;
1571 major_init_to_space (void)
1576 major_report_pinned_memory_usage (void)
1578 g_assert_not_reached ();
1582 major_get_used_size (void)
1587 FOREACH_BLOCK (block) {
1588 int count = MS_BLOCK_FREE / block->obj_size;
1590 size += count * block->obj_size;
1591 for (iter = block->free_list; iter; iter = (void**)*iter)
1592 size -= block->obj_size;
1593 } END_FOREACH_BLOCK;
1599 get_num_major_sections (void)
1601 return num_major_sections;
1605 major_handle_gc_param (const char *opt)
1607 if (g_str_has_prefix (opt, "evacuation-threshold=")) {
1608 const char *arg = strchr (opt, '=') + 1;
1609 int percentage = atoi (arg);
1610 if (percentage < 0 || percentage > 100) {
1611 fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
1614 evacuation_threshold = (float)percentage / 100.0f;
1616 } else if (!strcmp (opt, "lazy-sweep")) {
1619 } else if (!strcmp (opt, "no-lazy-sweep")) {
1628 major_print_gc_param_usage (void)
1632 " evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
1633 " (no-)lazy-sweep\n"
1638 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
1641 gboolean has_references;
1643 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1645 callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
1646 } END_FOREACH_BLOCK;
1649 #ifdef HEAVY_STATISTICS
1650 extern guint64 marked_cards;
1651 extern guint64 scanned_cards;
1652 extern guint64 scanned_objects;
1653 extern guint64 remarked_cards;
1656 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
1658 * MS blocks are 16K aligned.
1659 * Cardtables are 4K aligned, at least.
1660 * This means that the cardtable of a given block is 32 bytes aligned.
1663 initial_skip_card (guint8 *card_data)
1665 mword *cards = (mword*)card_data;
1668 for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
1674 if (i == CARD_WORDS_PER_BLOCK)
1675 return card_data + CARDS_PER_BLOCK;
1677 #if defined(__i386__) && defined(__GNUC__)
1678 return card_data + i * 4 + (__builtin_ffs (card) - 1) / 8;
1679 #elif defined(__x86_64__) && defined(__GNUC__)
1680 return card_data + i * 8 + (__builtin_ffsll (card) - 1) / 8;
1681 #elif defined(__s390x__) && defined(__GNUC__)
1682 return card_data + i * 8 + (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1684 for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
1686 return &card_data [i];
1693 static G_GNUC_UNUSED guint8*
1694 skip_card (guint8 *card_data, guint8 *card_data_end)
1696 while (card_data < card_data_end && !*card_data)
1701 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
1702 #define MS_BLOCK_OBJ_FAST(b,os,i) ((b) + MS_BLOCK_SKIP + (os) * (i))
1703 #define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
1706 major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
1709 gboolean has_references;
1710 ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
1712 #ifdef SGEN_HAVE_CONCURRENT_MARK
1713 if (!concurrent_mark)
1714 g_assert (!mod_union);
1716 g_assert (!mod_union);
1719 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1723 if (!has_references)
1726 block_obj_size = block->obj_size;
1727 block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
1729 if (block_obj_size >= CARD_SIZE_IN_BYTES) {
1731 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
1732 guint8 cards_data [CARDS_PER_BLOCK];
1734 char *obj, *end, *base;
1737 #ifdef SGEN_HAVE_CONCURRENT_MARK
1738 cards = block->cardtable_mod_union;
1740 * This happens when the nursery
1741 * collection that precedes finishing
1742 * the concurrent collection allocates
1749 /*We can avoid the extra copy since the remark cardtable was cleaned before */
1750 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1751 cards = sgen_card_table_get_card_scan_address ((mword)block_start);
1754 if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
1759 obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
1760 end = block_start + MS_BLOCK_SIZE;
1761 base = sgen_card_table_align_pointer (obj);
1763 cards += MS_BLOCK_SKIP >> CARD_BITS;
1769 sweep_block (block, FALSE);
1771 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
1775 /* FIXME: do this more efficiently */
1777 MS_CALC_MARK_BIT (w, b, obj);
1778 if (!MS_MARK_BIT (block, w, b))
1782 card_offset = (obj - base) >> CARD_BITS;
1783 sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, mod_union, queue);
1786 obj += block_obj_size;
1789 guint8 *card_data, *card_base;
1790 guint8 *card_data_end;
1793 * This is safe in face of card aliasing for the following reason:
1795 * Major blocks are 16k aligned, or 32 cards aligned.
1796 * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
1797 * sizes, they won't overflow the cardtable overlap modulus.
1800 #ifdef SGEN_HAVE_CONCURRENT_MARK
1801 card_data = card_base = block->cardtable_mod_union;
1803 * This happens when the nursery
1804 * collection that precedes finishing
1805 * the concurrent collection allocates
1811 g_assert_not_reached ();
1815 card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
1817 card_data_end = card_data + CARDS_PER_BLOCK;
1819 card_data += MS_BLOCK_SKIP >> CARD_BITS;
1821 for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
1823 size_t idx = card_data - card_base;
1824 char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
1825 char *end = start + CARD_SIZE_IN_BYTES;
1826 char *first_obj, *obj;
1828 HEAVY_STAT (++scanned_cards);
1834 sweep_block (block, FALSE);
1836 HEAVY_STAT (++marked_cards);
1838 sgen_card_table_prepare_card_for_scanning (card_data);
1843 index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
1845 obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
1847 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
1851 /* FIXME: do this more efficiently */
1853 MS_CALC_MARK_BIT (w, b, obj);
1854 if (!MS_MARK_BIT (block, w, b))
1858 HEAVY_STAT (++scanned_objects);
1859 scan_func (obj, sgen_obj_get_descriptor (obj), queue);
1861 obj += block_obj_size;
1863 HEAVY_STAT (if (*card_data) ++remarked_cards);
1864 binary_protocol_card_scan (first_obj, obj - first_obj);
1867 } END_FOREACH_BLOCK;
1871 major_count_cards (long long *num_total_cards, long long *num_marked_cards)
1874 gboolean has_references;
1875 long long total_cards = 0;
1876 long long marked_cards = 0;
1878 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1879 guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
1882 if (!has_references)
1885 total_cards += CARDS_PER_BLOCK;
1886 for (i = 0; i < CARDS_PER_BLOCK; ++i) {
1890 } END_FOREACH_BLOCK;
1892 *num_total_cards = total_cards;
1893 *num_marked_cards = marked_cards;
1896 #ifdef SGEN_HAVE_CONCURRENT_MARK
1898 update_cardtable_mod_union (void)
1902 FOREACH_BLOCK (block) {
1905 block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
1906 MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
1908 SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
1909 } END_FOREACH_BLOCK;
1913 major_get_cardtable_mod_union_for_object (char *obj)
1915 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
1916 return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block))) >> CARD_BITS];
1921 alloc_free_block_lists (MSBlockInfo ***lists)
1924 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
1925 lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
1928 #undef pthread_create
1931 post_param_init (SgenMajorCollector *collector)
1933 collector->sweeps_lazily = lazy_sweep;
1936 #ifdef SGEN_HAVE_CONCURRENT_MARK
1938 sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
1939 #else // SGEN_HAVE_CONCURRENT_MARK
1940 #error unknown configuration
1941 #endif // SGEN_HAVE_CONCURRENT_MARK
1945 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
1947 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
1948 block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
1949 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
1951 evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
1952 for (i = 0; i < num_block_obj_sizes; ++i)
1953 evacuate_block_obj_sizes [i] = FALSE;
1958 g_print ("block object sizes:\n");
1959 for (i = 0; i < num_block_obj_sizes; ++i)
1960 g_print ("%d\n", block_obj_sizes [i]);
1964 alloc_free_block_lists (free_block_lists);
1966 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
1967 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
1968 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
1969 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
1971 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
1972 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
1973 mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
1974 mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
1975 #if SIZEOF_VOID_P != 8
1976 mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
1977 mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
1978 mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
1979 mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
1982 collector->section_size = MAJOR_SECTION_SIZE;
1984 #ifdef SGEN_HAVE_CONCURRENT_MARK
1985 concurrent_mark = is_concurrent;
1986 if (is_concurrent) {
1987 collector->is_concurrent = TRUE;
1988 collector->want_synchronous_collection = &want_evacuation;
1992 collector->is_concurrent = FALSE;
1993 collector->want_synchronous_collection = NULL;
1995 collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
1996 collector->supports_cardtable = TRUE;
1998 collector->have_swept = &have_swept;
2000 collector->alloc_heap = major_alloc_heap;
2001 collector->is_object_live = major_is_object_live;
2002 collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2003 collector->alloc_degraded = major_alloc_degraded;
2005 collector->alloc_object = major_alloc_object;
2006 collector->free_pinned_object = free_pinned_object;
2007 collector->iterate_objects = major_iterate_objects;
2008 collector->free_non_pinned_object = major_free_non_pinned_object;
2009 collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
2010 collector->pin_objects = major_pin_objects;
2011 collector->pin_major_object = pin_major_object;
2012 collector->scan_card_table = major_scan_card_table;
2013 collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
2014 #ifdef SGEN_HAVE_CONCURRENT_MARK
2015 if (is_concurrent) {
2016 collector->update_cardtable_mod_union = update_cardtable_mod_union;
2017 collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_object;
2020 collector->init_to_space = major_init_to_space;
2021 collector->sweep = major_sweep;
2022 collector->check_scan_starts = major_check_scan_starts;
2023 collector->dump_heap = major_dump_heap;
2024 collector->get_used_size = major_get_used_size;
2025 collector->start_nursery_collection = major_start_nursery_collection;
2026 collector->finish_nursery_collection = major_finish_nursery_collection;
2027 collector->start_major_collection = major_start_major_collection;
2028 collector->finish_major_collection = major_finish_major_collection;
2029 collector->have_computed_minor_collection_allowance = major_have_computer_minor_collection_allowance;
2030 collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2031 collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
2032 collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2033 collector->get_num_major_sections = get_num_major_sections;
2034 collector->handle_gc_param = major_handle_gc_param;
2035 collector->print_gc_param_usage = major_print_gc_param_usage;
2036 collector->post_param_init = post_param_init;
2037 collector->is_valid_object = major_is_valid_object;
2038 collector->describe_pointer = major_describe_pointer;
2039 collector->count_cards = major_count_cards;
2041 collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
2042 collector->major_ops.scan_object = major_scan_object_with_evacuation;
2043 #ifdef SGEN_HAVE_CONCURRENT_MARK
2044 if (is_concurrent) {
2045 collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
2046 collector->major_concurrent_ops.scan_object = major_scan_object_no_mark_concurrent;
2047 collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
2051 #if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
2052 /* FIXME: this will not work with evacuation or the split nursery. */
2054 collector->drain_gray_stack = drain_gray_stack;
2056 #ifdef HEAVY_STATISTICS
2057 mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
2058 mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
2059 mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
2060 mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
2061 mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
2062 mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
2063 mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
2064 mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
2065 mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
2066 mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
2068 mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
2069 mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
2070 mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
2074 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
2075 mono_mutex_init (&scanned_objects_list_lock);
2078 SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
2080 /*cardtable requires major pages to be 8 cards aligned*/
2081 g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2084 #ifdef SGEN_HAVE_CONCURRENT_MARK
2086 sgen_marksweep_init (SgenMajorCollector *collector)
2088 sgen_marksweep_init_internal (collector, FALSE);
2092 sgen_marksweep_conc_init (SgenMajorCollector *collector)
2094 sgen_marksweep_init_internal (collector, TRUE);