2 * sgen-marksweep.c: The Mark & Sweep major collector.
5 * Mark Probst <mark.probst@gmail.com>
7 * Copyright 2009-2010 Novell, Inc.
8 * Copyright (C) 2012 Xamarin Inc
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Library General Public
12 * License 2.0 as published by the Free Software Foundation;
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License 2.0 along with this library; if not, write to the Free
21 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "utils/mono-counters.h"
32 #include "utils/mono-semaphore.h"
33 #include "utils/mono-time.h"
34 #include "metadata/object-internals.h"
35 #include "metadata/profiler-private.h"
37 #include "metadata/sgen-gc.h"
38 #include "metadata/sgen-protocol.h"
39 #include "metadata/sgen-cardtable.h"
40 #include "metadata/sgen-memory-governor.h"
41 #include "metadata/sgen-layout-stats.h"
42 #include "metadata/gc-internal.h"
43 #include "metadata/sgen-pointer-queue.h"
45 #define SGEN_HAVE_CONCURRENT_MARK
47 #define MS_BLOCK_SIZE (16*1024)
48 #define MS_BLOCK_SIZE_SHIFT 14
49 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
50 #define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
53 * Don't allocate single blocks, but alloc a contingent of this many
54 * blocks in one swoop. This must be a power of two.
56 #define MS_BLOCK_ALLOC_NUM 32
59 * Number of bytes before the first object in a block. At the start
60 * of a block is the MSBlockHeader, then opional padding, then come
61 * the objects, so this must be >= sizeof (MSBlockHeader).
63 #define MS_BLOCK_SKIP 16
65 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
67 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
69 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
70 #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
73 typedef struct _MSBlockInfo MSBlockInfo;
77 size_t pin_queue_num_entries;
78 unsigned int pinned : 1;
79 unsigned int has_references : 1;
80 unsigned int has_pinned : 1; /* means cannot evacuate */
81 unsigned int is_to_space : 1;
82 unsigned int swept : 1;
85 MSBlockInfo *next_free;
86 void **pin_queue_start;
87 #ifdef SGEN_HAVE_CONCURRENT_MARK
88 guint8 *cardtable_mod_union;
90 mword mark_words [MS_NUM_MARK_WORDS];
93 #define MS_BLOCK_FOR_BLOCK_INFO(b) ((b)->block)
95 #define MS_BLOCK_OBJ(b,i) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
96 #define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
97 #define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
103 #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
105 /* object index will always be small */
106 #define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
108 //casting to int is fine since blocks are 32k
109 #define MS_CALC_MARK_BIT(w,b,o) do { \
110 int i = ((int)((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o)))) >> SGEN_ALLOC_ALIGN_BITS; \
111 if (sizeof (mword) == 4) { \
120 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (ONE_P << (b)))
121 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
122 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
123 mword __old = (bl)->mark_words [(w)]; \
124 mword __bitmask = ONE_P << (b); \
125 if (__old & __bitmask) { \
129 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
130 (gpointer)(__old | __bitmask), \
131 (gpointer)__old) == \
133 was_marked = FALSE; \
138 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
140 #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
143 * This way we can lookup block object size indexes for sizes up to
144 * 256 bytes with a single load.
146 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
148 static int *block_obj_sizes;
149 static int num_block_obj_sizes;
150 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
152 #define MS_BLOCK_FLAG_PINNED 1
153 #define MS_BLOCK_FLAG_REFS 2
155 #define MS_BLOCK_TYPE_MAX 4
157 static gboolean *evacuate_block_obj_sizes;
158 static float evacuation_threshold = 0.666f;
159 #ifdef SGEN_HAVE_CONCURRENT_MARK
160 static float concurrent_evacuation_threshold = 0.666f;
161 static gboolean want_evacuation = FALSE;
164 static gboolean lazy_sweep = TRUE;
165 static gboolean have_swept;
167 #ifdef SGEN_HAVE_CONCURRENT_MARK
168 static gboolean concurrent_mark;
171 #define BLOCK_IS_TAGGED_HAS_REFERENCES(bl) SGEN_POINTER_IS_TAGGED_1 ((bl))
172 #define BLOCK_TAG_HAS_REFERENCES(bl) SGEN_POINTER_TAG_1 ((bl))
173 #define BLOCK_UNTAG_HAS_REFERENCES(bl) SGEN_POINTER_UNTAG_1 ((bl))
175 #define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
177 /* all allocated blocks in the system */
178 static SgenPointerQueue allocated_blocks;
180 /* non-allocated block free-list */
181 static void *empty_blocks = NULL;
182 static size_t num_empty_blocks = 0;
184 #define FOREACH_BLOCK(bl) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = BLOCK_UNTAG_HAS_REFERENCES (allocated_blocks.data [__index]);
185 #define FOREACH_BLOCK_HAS_REFERENCES(bl,hr) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = allocated_blocks.data [__index]; (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); (bl) = BLOCK_UNTAG_HAS_REFERENCES ((bl));
186 #define END_FOREACH_BLOCK } }
187 #define DELETE_BLOCK_IN_FOREACH() (allocated_blocks.data [__index] = NULL)
189 static size_t num_major_sections = 0;
190 /* one free block list for each block object size */
191 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
193 static long long stat_major_blocks_alloced = 0;
194 static long long stat_major_blocks_freed = 0;
195 static long long stat_major_blocks_lazy_swept = 0;
196 static long long stat_major_objects_evacuated = 0;
198 #if SIZEOF_VOID_P != 8
199 static long long stat_major_blocks_freed_ideal = 0;
200 static long long stat_major_blocks_freed_less_ideal = 0;
201 static long long stat_major_blocks_freed_individual = 0;
202 static long long stat_major_blocks_alloced_less_ideal = 0;
205 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
206 static long long num_major_objects_marked = 0;
207 #define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
209 #define INC_NUM_MAJOR_OBJECTS_MARKED()
213 sweep_block (MSBlockInfo *block, gboolean during_major_collection);
216 ms_find_block_obj_size_index (size_t size)
219 SGEN_ASSERT (9, size <= SGEN_MAX_SMALL_OBJ_SIZE, "size %d is bigger than max small object size %d", size, SGEN_MAX_SMALL_OBJ_SIZE);
220 for (i = 0; i < num_block_obj_sizes; ++i)
221 if (block_obj_sizes [i] >= size)
223 g_error ("no object of size %d\n", size);
226 #define FREE_BLOCKS_FROM(lists,p,r) (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
227 #define FREE_BLOCKS(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
229 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
230 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
231 fast_block_obj_size_indexes [((s)+7)>>3] : \
232 ms_find_block_obj_size_index ((s)))
235 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
239 start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
241 start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
247 update_heap_boundaries_for_block (MSBlockInfo *block)
249 sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
253 ms_get_empty_block (void)
257 void *block, *empty, *next;
262 * We try allocating MS_BLOCK_ALLOC_NUM blocks first. If that's
263 * unsuccessful, we halve the number of blocks and try again, until we're at
264 * 1. If that doesn't work, either, we assert.
266 int alloc_num = MS_BLOCK_ALLOC_NUM;
268 p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
269 alloc_num == 1 ? "major heap section" : NULL);
275 for (i = 0; i < alloc_num; ++i) {
278 * We do the free list update one after the
279 * other so that other threads can use the new
280 * blocks as quickly as possible.
283 empty = empty_blocks;
284 *(void**)block = empty;
285 } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block, empty) != empty);
289 SGEN_ATOMIC_ADD_P (num_empty_blocks, alloc_num);
291 stat_major_blocks_alloced += alloc_num;
292 #if SIZEOF_VOID_P != 8
293 if (alloc_num != MS_BLOCK_ALLOC_NUM)
294 stat_major_blocks_alloced_less_ideal += alloc_num;
299 empty = empty_blocks;
303 next = *(void**)block;
304 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
306 SGEN_ATOMIC_ADD_P (num_empty_blocks, -1);
308 *(void**)block = NULL;
310 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
316 ms_free_block (void *block)
320 sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
321 memset (block, 0, MS_BLOCK_SIZE);
324 empty = empty_blocks;
325 *(void**)block = empty;
326 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
328 SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
331 //#define MARKSWEEP_CONSISTENCY_CHECK
333 #ifdef MARKSWEEP_CONSISTENCY_CHECK
335 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
339 for (; block; block = block->next_free) {
340 g_assert (block->obj_size == size);
341 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
343 /* blocks in the free lists must have at least
346 g_assert (block->free_list);
348 /* the block must be in the allocated_blocks array */
349 g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
354 check_empty_blocks (void)
358 for (p = empty_blocks; p; p = *(void**)p)
360 g_assert (i == num_empty_blocks);
364 consistency_check (void)
369 /* check all blocks */
370 FOREACH_BLOCK (block) {
371 int count = MS_BLOCK_FREE / block->obj_size;
375 /* check block header */
376 g_assert (((MSBlockHeader*)block->block)->info == block);
378 /* count number of free slots */
379 for (i = 0; i < count; ++i) {
380 void **obj = (void**) MS_BLOCK_OBJ (block, i);
381 if (!MS_OBJ_ALLOCED (obj, block))
385 /* check free list */
386 for (free = block->free_list; free; free = (void**)*free) {
387 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
390 g_assert (num_free == 0);
392 /* check all mark words are zero */
394 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
395 g_assert (block->mark_words [i] == 0);
399 /* check free blocks */
400 for (i = 0; i < num_block_obj_sizes; ++i) {
402 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
403 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
406 check_empty_blocks ();
411 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
413 int size = block_obj_sizes [size_index];
414 int count = MS_BLOCK_FREE / size;
416 MSBlockHeader *header;
417 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
421 if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
424 info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
426 SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
428 info->obj_size = size;
429 info->obj_size_index = size_index;
430 info->pinned = pinned;
431 info->has_references = has_references;
432 info->has_pinned = pinned;
434 * Blocks that are to-space are not evacuated from. During an major collection
435 * blocks are allocated for two reasons: evacuating objects from the nursery and
436 * evacuating them from major blocks marked for evacuation. In both cases we don't
437 * want further evacuation.
439 info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
441 info->block = ms_get_empty_block ();
443 header = (MSBlockHeader*) info->block;
445 #ifdef SGEN_HAVE_CONCURRENT_MARK
446 info->cardtable_mod_union = NULL;
449 update_heap_boundaries_for_block (info);
451 /* build free list */
452 obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
453 info->free_list = (void**)obj_start;
454 /* we're skipping the last one - it must be nulled */
455 for (i = 0; i < count - 1; ++i) {
456 char *next_obj_start = obj_start + size;
457 *(void**)obj_start = next_obj_start;
458 obj_start = next_obj_start;
461 *(void**)obj_start = NULL;
463 info->next_free = free_blocks [size_index];
464 free_blocks [size_index] = info;
466 sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
468 ++num_major_sections;
473 obj_is_from_pinned_alloc (char *ptr)
477 FOREACH_BLOCK (block) {
478 if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
479 return block->pinned;
485 unlink_slot_from_free_list_uncontested (MSBlockInfo **free_blocks, int size_index)
490 block = free_blocks [size_index];
491 SGEN_ASSERT (9, block, "no free block to unlink from free_blocks %p size_index %d", free_blocks, size_index);
493 if (G_UNLIKELY (!block->swept)) {
494 stat_major_blocks_lazy_swept ++;
495 sweep_block (block, FALSE);
498 obj = block->free_list;
499 SGEN_ASSERT (9, obj, "block %p in free list had no available object to alloc from", block);
501 block->free_list = *(void**)obj;
502 if (!block->free_list) {
503 free_blocks [size_index] = block->next_free;
504 block->next_free = NULL;
511 alloc_obj (MonoVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
513 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
514 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
517 if (!free_blocks [size_index]) {
518 if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
522 obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
524 *(MonoVTable**)obj = vtable;
530 major_alloc_object (MonoVTable *vtable, size_t size, gboolean has_references)
532 return alloc_obj (vtable, size, FALSE, has_references);
536 * We're not freeing the block if it's empty. We leave that work for
537 * the next major collection.
539 * This is just called from the domain clearing code, which runs in a
540 * single thread and has the GC lock, so we don't need an extra lock.
543 free_object (char *obj, size_t size, gboolean pinned)
545 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
549 sweep_block (block, FALSE);
550 SGEN_ASSERT (9, (pinned && block->pinned) || (!pinned && !block->pinned), "free-object pinning mixup object %p pinned %d block %p pinned %d", obj, pinned, block, block->pinned);
551 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p is already free", obj);
552 MS_CALC_MARK_BIT (word, bit, obj);
553 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p has mark bit set");
554 if (!block->free_list) {
555 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
556 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
557 SGEN_ASSERT (9, !block->next_free, "block %p doesn't have a free-list of object but belongs to a free-list of blocks");
558 block->next_free = free_blocks [size_index];
559 free_blocks [size_index] = block;
561 memset (obj, 0, size);
562 *(void**)obj = block->free_list;
563 block->free_list = (void**)obj;
567 major_free_non_pinned_object (char *obj, size_t size)
569 free_object (obj, size, FALSE);
572 /* size is a multiple of SGEN_ALLOC_ALIGN */
574 major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
578 res = alloc_obj (vtable, size, TRUE, has_references);
579 /*If we failed to alloc memory, we better try releasing memory
580 *as pinned alloc is requested by the runtime.
583 sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
584 res = alloc_obj (vtable, size, TRUE, has_references);
590 free_pinned_object (char *obj, size_t size)
592 free_object (obj, size, TRUE);
596 * size is already rounded up and we hold the GC lock.
599 major_alloc_degraded (MonoVTable *vtable, size_t size)
602 size_t old_num_sections;
604 old_num_sections = num_major_sections;
606 obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
607 if (G_LIKELY (obj)) {
608 HEAVY_STAT (++stat_objects_alloced_degraded);
609 HEAVY_STAT (stat_bytes_alloced_degraded += size);
610 g_assert (num_major_sections >= old_num_sections);
611 sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
616 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
619 * obj is some object. If it's not in the major heap (i.e. if it's in
620 * the nursery or LOS), return FALSE. Otherwise return whether it's
621 * been marked or copied.
624 major_is_object_live (char *obj)
630 if (sgen_ptr_in_nursery (obj))
633 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
636 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
639 /* now we know it's in a major block */
640 block = MS_BLOCK_FOR_OBJ (obj);
641 SGEN_ASSERT (9, !block->pinned, "block %p is pinned, BTW why is this bad?");
642 MS_CALC_MARK_BIT (word, bit, obj);
643 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
647 major_ptr_is_in_non_pinned_space (char *ptr, char **start)
651 FOREACH_BLOCK (block) {
652 if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
653 int count = MS_BLOCK_FREE / block->obj_size;
657 for (i = 0; i <= count; ++i) {
658 if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
659 *start = MS_BLOCK_OBJ (block, i);
663 return !block->pinned;
670 major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc callback, void *data)
672 gboolean sweep = flags & ITERATE_OBJECTS_SWEEP;
673 gboolean non_pinned = flags & ITERATE_OBJECTS_NON_PINNED;
674 gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
677 FOREACH_BLOCK (block) {
678 int count = MS_BLOCK_FREE / block->obj_size;
681 if (block->pinned && !pinned)
683 if (!block->pinned && !non_pinned)
685 if (sweep && lazy_sweep) {
686 sweep_block (block, FALSE);
687 SGEN_ASSERT (0, block->swept, "Block must be swept after sweeping");
690 for (i = 0; i < count; ++i) {
691 void **obj = (void**) MS_BLOCK_OBJ (block, i);
694 MS_CALC_MARK_BIT (word, bit, obj);
695 if (!MS_MARK_BIT (block, word, bit))
698 if (MS_OBJ_ALLOCED (obj, block))
699 callback ((char*)obj, block->obj_size, data);
705 major_is_valid_object (char *object)
709 FOREACH_BLOCK (block) {
713 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
716 idx = MS_BLOCK_OBJ_INDEX (object, block);
717 obj = (char*)MS_BLOCK_OBJ (block, idx);
720 return MS_OBJ_ALLOCED (obj, block);
728 major_describe_pointer (char *ptr)
732 FOREACH_BLOCK (block) {
740 if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
743 SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
744 MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
746 idx = MS_BLOCK_OBJ_INDEX (ptr, block);
747 obj = (char*)MS_BLOCK_OBJ (block, idx);
748 live = MS_OBJ_ALLOCED (obj, block);
749 vtable = live ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
751 MS_CALC_MARK_BIT (w, b, obj);
752 marked = MS_MARK_BIT (block, w, b);
757 SGEN_LOG (0, "object");
759 SGEN_LOG (0, "dead-object");
762 SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
764 SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
767 SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
776 major_check_scan_starts (void)
781 major_dump_heap (FILE *heap_dump_file)
784 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
785 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
788 for (i = 0; i < num_block_obj_sizes; ++i)
789 slots_available [i] = slots_used [i] = 0;
791 FOREACH_BLOCK (block) {
792 int index = ms_find_block_obj_size_index (block->obj_size);
793 int count = MS_BLOCK_FREE / block->obj_size;
795 slots_available [index] += count;
796 for (i = 0; i < count; ++i) {
797 if (MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block))
798 ++slots_used [index];
802 fprintf (heap_dump_file, "<occupancies>\n");
803 for (i = 0; i < num_block_obj_sizes; ++i) {
804 fprintf (heap_dump_file, "<occupancy size=\"%d\" available=\"%d\" used=\"%d\" />\n",
805 block_obj_sizes [i], slots_available [i], slots_used [i]);
807 fprintf (heap_dump_file, "</occupancies>\n");
809 FOREACH_BLOCK (block) {
810 int count = MS_BLOCK_FREE / block->obj_size;
814 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
816 for (i = 0; i <= count; ++i) {
817 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
822 sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
828 fprintf (heap_dump_file, "</section>\n");
832 #define LOAD_VTABLE SGEN_LOAD_VTABLE
834 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do { \
836 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
837 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
838 MS_SET_MARK_BIT ((block), __word, __bit); \
839 if ((block)->has_references) \
840 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
841 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
842 INC_NUM_MAJOR_OBJECTS_MARKED (); \
845 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
847 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
848 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
849 if (!MS_MARK_BIT ((block), __word, __bit)) { \
850 MS_SET_MARK_BIT ((block), __word, __bit); \
851 if ((block)->has_references) \
852 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
853 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
854 INC_NUM_MAJOR_OBJECTS_MARKED (); \
857 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
859 gboolean __was_marked; \
860 SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
861 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
862 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
863 if (!__was_marked) { \
864 if ((block)->has_references) \
865 GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
866 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
867 INC_NUM_MAJOR_OBJECTS_MARKED (); \
872 pin_major_object (char *obj, SgenGrayQueue *queue)
876 #ifdef SGEN_HAVE_CONCURRENT_MARK
878 g_assert_not_reached ();
881 block = MS_BLOCK_FOR_OBJ (obj);
882 block->has_pinned = TRUE;
883 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
886 #include "sgen-major-copy-object.h"
888 #ifdef SGEN_HAVE_CONCURRENT_MARK
890 major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
892 g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
894 if (!sgen_ptr_in_nursery (obj)) {
897 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
899 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
900 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
901 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
903 if (sgen_los_object_is_pinned (obj))
907 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
908 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
909 MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
913 sgen_los_pin_object (obj);
914 if (SGEN_OBJECT_HAS_REFERENCES (obj))
915 GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
916 INC_NUM_MAJOR_OBJECTS_MARKED ();
923 major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
927 HEAVY_STAT (++stat_copy_object_called_major);
929 SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
930 SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
932 if (sgen_ptr_in_nursery (obj)) {
934 char *forwarded, *old_obj;
936 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
940 if (SGEN_OBJECT_IS_PINNED (obj))
943 /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
944 if (sgen_nursery_is_to_space (obj))
947 HEAVY_STAT (++stat_objects_copied_major);
951 obj = copy_object_no_checks (obj, queue);
952 if (G_UNLIKELY (old_obj == obj)) {
953 /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
954 if (!sgen_ptr_in_nursery (obj)) {
956 block = MS_BLOCK_FOR_OBJ (obj);
957 size_index = block->obj_size_index;
958 evacuate_block_obj_sizes [size_index] = FALSE;
959 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
966 * FIXME: See comment for copy_object_no_checks(). If
967 * we have that, we can let the allocation function
968 * give us the block info, too, and we won't have to
971 * FIXME (2): We should rework this to avoid all those nursery checks.
974 * For the split nursery allocator the object might
975 * still be in the nursery despite having being
976 * promoted, in which case we can't mark it.
978 if (!sgen_ptr_in_nursery (obj)) {
979 block = MS_BLOCK_FOR_OBJ (obj);
980 MS_CALC_MARK_BIT (word, bit, obj);
981 SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
982 MS_SET_MARK_BIT (block, word, bit);
983 binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
990 * If we have don't have a fixed heap we cannot know
991 * whether an object is in the LOS or in the small
992 * object major heap without checking its size. To do
993 * that, however, we need to know that we actually
994 * have a valid object, not a forwarding pointer, so
995 * we have to do this check first.
997 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
1002 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
1004 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
1008 block = MS_BLOCK_FOR_OBJ (obj);
1009 size_index = block->obj_size_index;
1010 evacuate = evacuate_block_obj_sizes [size_index];
1012 if (evacuate && !block->has_pinned) {
1013 g_assert (!SGEN_OBJECT_IS_PINNED (obj));
1014 if (block->is_to_space)
1016 HEAVY_STAT (++stat_major_objects_evacuated);
1017 goto do_copy_object;
1019 MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
1022 if (sgen_los_object_is_pinned (obj))
1024 binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
1026 #ifdef ENABLE_DTRACE
1027 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1028 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1029 MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
1033 sgen_los_pin_object (obj);
1034 if (SGEN_OBJECT_HAS_REFERENCES (obj))
1035 GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
1041 major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
1043 major_copy_or_mark_object (ptr, *ptr, queue);
1046 #ifdef SGEN_HAVE_CONCURRENT_MARK
1048 major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
1050 major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
1055 major_get_and_reset_num_major_objects_marked (void)
1057 #ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
1058 long long num = num_major_objects_marked;
1059 num_major_objects_marked = 0;
1066 #include "sgen-major-scan-object.h"
1068 #ifdef SGEN_HAVE_CONCURRENT_MARK
1069 #define SCAN_FOR_CONCURRENT_MARK
1070 #include "sgen-major-scan-object.h"
1071 #undef SCAN_FOR_CONCURRENT_MARK
1075 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
1078 int last_index = -1;
1080 if (!block->pin_queue_num_entries)
1083 block->has_pinned = TRUE;
1085 for (i = 0; i < block->pin_queue_num_entries; ++i) {
1086 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
1088 SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", block->pin_queue_start [i], index, MS_BLOCK_FREE / block->obj_size);
1089 if (index == last_index)
1091 obj = MS_BLOCK_OBJ (block, index);
1092 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
1098 sweep_block_for_size (MSBlockInfo *block, int count, int obj_size)
1102 for (obj_index = 0; obj_index < count; ++obj_index) {
1104 void *obj = MS_BLOCK_OBJ_FOR_SIZE (block, obj_index, obj_size);
1106 MS_CALC_MARK_BIT (word, bit, obj);
1107 if (MS_MARK_BIT (block, word, bit)) {
1108 SGEN_ASSERT (9, MS_OBJ_ALLOCED (obj, block), "object %p not allocated", obj);
1110 /* an unmarked object */
1111 if (MS_OBJ_ALLOCED (obj, block)) {
1113 * FIXME: Merge consecutive
1114 * slots for lower reporting
1115 * overhead. Maybe memset
1116 * will also benefit?
1118 binary_protocol_empty (obj, obj_size);
1119 MONO_GC_MAJOR_SWEPT ((mword)obj, obj_size);
1120 memset (obj, 0, obj_size);
1122 *(void**)obj = block->free_list;
1123 block->free_list = obj;
1131 * Traverse BLOCK, freeing and zeroing unused objects.
1134 sweep_block (MSBlockInfo *block, gboolean during_major_collection)
1137 void *reversed = NULL;
1139 if (!during_major_collection)
1140 g_assert (!sgen_concurrent_collection_in_progress ());
1145 count = MS_BLOCK_FREE / block->obj_size;
1147 block->free_list = NULL;
1149 /* Use inline instances specialized to constant sizes, this allows the compiler to replace the memset calls with inline code */
1150 // FIXME: Add more sizes
1151 switch (block->obj_size) {
1153 sweep_block_for_size (block, count, 16);
1156 sweep_block_for_size (block, count, block->obj_size);
1160 /* reset mark bits */
1161 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
1163 /* Reverse free list so that it's in address order */
1165 while (block->free_list) {
1166 void *next = *(void**)block->free_list;
1167 *(void**)block->free_list = reversed;
1168 reversed = block->free_list;
1169 block->free_list = next;
1171 block->free_list = reversed;
1182 if (sizeof (mword) == sizeof (unsigned long))
1183 count += __builtin_popcountl (d);
1185 count += __builtin_popcount (d);
1201 /* statistics for evacuation */
1202 int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
1203 int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
1204 int *num_blocks = alloca (sizeof (int) * num_block_obj_sizes);
1206 #ifdef SGEN_HAVE_CONCURRENT_MARK
1207 mword total_evacuate_heap = 0;
1208 mword total_evacuate_saved = 0;
1211 for (i = 0; i < num_block_obj_sizes; ++i)
1212 slots_available [i] = slots_used [i] = num_blocks [i] = 0;
1214 /* clear all the free lists */
1215 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
1216 MSBlockInfo **free_blocks = free_block_lists [i];
1218 for (j = 0; j < num_block_obj_sizes; ++j)
1219 free_blocks [j] = NULL;
1222 /* traverse all blocks, free and zero unmarked objects */
1223 FOREACH_BLOCK (block) {
1225 gboolean have_live = FALSE;
1226 gboolean has_pinned;
1227 gboolean have_free = FALSE;
1231 obj_size_index = block->obj_size_index;
1233 has_pinned = block->has_pinned;
1234 block->has_pinned = block->pinned;
1236 block->is_to_space = FALSE;
1239 count = MS_BLOCK_FREE / block->obj_size;
1241 #ifdef SGEN_HAVE_CONCURRENT_MARK
1242 if (block->cardtable_mod_union) {
1243 sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
1244 block->cardtable_mod_union = NULL;
1248 /* Count marked objects in the block */
1249 for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
1250 nused += bitcount (block->mark_words [i]);
1259 sweep_block (block, TRUE);
1263 ++num_blocks [obj_size_index];
1264 slots_used [obj_size_index] += nused;
1265 slots_available [obj_size_index] += count;
1269 * If there are free slots in the block, add
1270 * the block to the corresponding free list.
1273 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1274 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1275 block->next_free = free_blocks [index];
1276 free_blocks [index] = block;
1279 update_heap_boundaries_for_block (block);
1282 * Blocks without live objects are removed from the
1283 * block list and freed.
1285 DELETE_BLOCK_IN_FOREACH ();
1287 binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
1288 ms_free_block (block->block);
1289 sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
1291 --num_major_sections;
1293 } END_FOREACH_BLOCK;
1294 sgen_pointer_queue_remove_nulls (&allocated_blocks);
1296 for (i = 0; i < num_block_obj_sizes; ++i) {
1297 float usage = (float)slots_used [i] / (float)slots_available [i];
1298 if (num_blocks [i] > 5 && usage < evacuation_threshold) {
1299 evacuate_block_obj_sizes [i] = TRUE;
1301 g_print ("slot size %d - %d of %d used\n",
1302 block_obj_sizes [i], slots_used [i], slots_available [i]);
1305 evacuate_block_obj_sizes [i] = FALSE;
1307 #ifdef SGEN_HAVE_CONCURRENT_MARK
1309 mword total_bytes = block_obj_sizes [i] * slots_available [i];
1310 total_evacuate_heap += total_bytes;
1311 if (evacuate_block_obj_sizes [i])
1312 total_evacuate_saved += total_bytes - block_obj_sizes [i] * slots_used [i];
1317 #ifdef SGEN_HAVE_CONCURRENT_MARK
1318 want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
1330 static int count_pinned_ref;
1331 static int count_pinned_nonref;
1332 static int count_nonpinned_ref;
1333 static int count_nonpinned_nonref;
1336 count_nonpinned_callback (char *obj, size_t size, void *data)
1338 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1340 if (vtable->klass->has_references)
1341 ++count_nonpinned_ref;
1343 ++count_nonpinned_nonref;
1347 count_pinned_callback (char *obj, size_t size, void *data)
1349 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1351 if (vtable->klass->has_references)
1354 ++count_pinned_nonref;
1357 static G_GNUC_UNUSED void
1358 count_ref_nonref_objs (void)
1362 count_pinned_ref = 0;
1363 count_pinned_nonref = 0;
1364 count_nonpinned_ref = 0;
1365 count_nonpinned_nonref = 0;
1367 major_iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, count_nonpinned_callback, NULL);
1368 major_iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, count_pinned_callback, NULL);
1370 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1372 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
1373 count_pinned_ref, count_nonpinned_ref,
1374 count_pinned_nonref, count_nonpinned_nonref,
1375 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1379 ms_calculate_block_obj_sizes (double factor, int *arr)
1381 double target_size = sizeof (MonoObject);
1386 int target_count = (int)ceil (MS_BLOCK_FREE / target_size);
1387 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1389 if (size != last_size) {
1391 arr [num_sizes] = size;
1396 target_size *= factor;
1397 } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1402 /* only valid during minor collections */
1403 static mword old_num_major_sections;
1406 major_start_nursery_collection (void)
1408 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1409 consistency_check ();
1412 old_num_major_sections = num_major_sections;
1416 major_finish_nursery_collection (void)
1418 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1419 consistency_check ();
1421 sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1425 major_start_major_collection (void)
1429 /* clear the free lists */
1430 for (i = 0; i < num_block_obj_sizes; ++i) {
1431 if (!evacuate_block_obj_sizes [i])
1434 free_block_lists [0][i] = NULL;
1435 free_block_lists [MS_BLOCK_FLAG_REFS][i] = NULL;
1438 // Sweep all unswept blocks
1442 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
1444 FOREACH_BLOCK (block) {
1445 sweep_block (block, TRUE);
1446 } END_FOREACH_BLOCK;
1448 MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
1453 major_finish_major_collection (void)
1457 #if SIZEOF_VOID_P != 8
1459 compare_pointers (const void *va, const void *vb) {
1460 char *a = *(char**)va, *b = *(char**)vb;
1470 major_have_computer_minor_collection_allowance (void)
1472 size_t section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1474 g_assert (have_swept);
1476 #if SIZEOF_VOID_P != 8
1478 int i, num_empty_blocks_orig, num_blocks, arr_length;
1480 void **empty_block_arr;
1481 void **rebuild_next;
1485 * sgen_free_os_memory () asserts in mono_vfree () because windows doesn't like freeing the middle of
1486 * a VirtualAlloc ()-ed block.
1491 if (num_empty_blocks <= section_reserve)
1493 SGEN_ASSERT (0, num_empty_blocks > 0, "section reserve can't be negative");
1495 num_empty_blocks_orig = num_empty_blocks;
1496 empty_block_arr = (void**)sgen_alloc_internal_dynamic (sizeof (void*) * num_empty_blocks_orig,
1497 INTERNAL_MEM_MS_BLOCK_INFO_SORT, FALSE);
1498 if (!empty_block_arr)
1502 for (block = empty_blocks; block; block = *(void**)block)
1503 empty_block_arr [i++] = block;
1504 SGEN_ASSERT (0, i == num_empty_blocks, "empty block count wrong");
1506 sgen_qsort (empty_block_arr, num_empty_blocks, sizeof (void*), compare_pointers);
1509 * We iterate over the free blocks, trying to find MS_BLOCK_ALLOC_NUM
1510 * contiguous ones. If we do, we free them. If that's not enough to get to
1511 * section_reserve, we halve the number of contiguous blocks we're looking
1512 * for and have another go, until we're done with looking for pairs of
1513 * blocks, at which point we give up and go to the fallback.
1515 arr_length = num_empty_blocks_orig;
1516 num_blocks = MS_BLOCK_ALLOC_NUM;
1517 while (num_empty_blocks > section_reserve && num_blocks > 1) {
1522 for (i = 0; i < arr_length; ++i) {
1524 void *block = empty_block_arr [i];
1525 SGEN_ASSERT (0, block, "we're not shifting correctly");
1527 empty_block_arr [dest] = block;
1529 * This is not strictly necessary, but we're
1532 empty_block_arr [i] = NULL;
1541 SGEN_ASSERT (0, first >= 0 && d > first, "algorithm is wrong");
1543 if ((char*)block != ((char*)empty_block_arr [d-1]) + MS_BLOCK_SIZE) {
1548 if (d + 1 - first == num_blocks) {
1550 * We found num_blocks contiguous blocks. Free them
1551 * and null their array entries. As an optimization
1552 * we could, instead of nulling the entries, shift
1553 * the following entries over to the left, while
1557 sgen_free_os_memory (empty_block_arr [first], MS_BLOCK_SIZE * num_blocks, SGEN_ALLOC_HEAP);
1558 for (j = first; j <= d; ++j)
1559 empty_block_arr [j] = NULL;
1563 num_empty_blocks -= num_blocks;
1565 stat_major_blocks_freed += num_blocks;
1566 if (num_blocks == MS_BLOCK_ALLOC_NUM)
1567 stat_major_blocks_freed_ideal += num_blocks;
1569 stat_major_blocks_freed_less_ideal += num_blocks;
1574 SGEN_ASSERT (0, dest <= i && dest <= arr_length, "array length is off");
1576 SGEN_ASSERT (0, arr_length == num_empty_blocks, "array length is off");
1581 /* rebuild empty_blocks free list */
1582 rebuild_next = (void**)&empty_blocks;
1583 for (i = 0; i < arr_length; ++i) {
1584 void *block = empty_block_arr [i];
1585 SGEN_ASSERT (0, block, "we're missing blocks");
1586 *rebuild_next = block;
1587 rebuild_next = (void**)block;
1589 *rebuild_next = NULL;
1592 sgen_free_internal_dynamic (empty_block_arr, sizeof (void*) * num_empty_blocks_orig, INTERNAL_MEM_MS_BLOCK_INFO_SORT);
1595 SGEN_ASSERT (0, num_empty_blocks >= 0, "we freed more blocks than we had in the first place?");
1599 * This is our threshold. If there's not more empty than used blocks, we won't
1600 * release uncontiguous blocks, in fear of fragmenting the address space.
1602 if (num_empty_blocks <= num_major_sections)
1606 while (num_empty_blocks > section_reserve) {
1607 void *next = *(void**)empty_blocks;
1608 sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP);
1609 empty_blocks = next;
1611 * Needs not be atomic because this is running
1616 ++stat_major_blocks_freed;
1617 #if SIZEOF_VOID_P != 8
1618 ++stat_major_blocks_freed_individual;
1624 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
1628 FOREACH_BLOCK (block) {
1629 block->pin_queue_start = sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
1630 &block->pin_queue_num_entries);
1631 } END_FOREACH_BLOCK;
1635 major_pin_objects (SgenGrayQueue *queue)
1639 FOREACH_BLOCK (block) {
1640 mark_pinned_objects_in_block (block, queue);
1641 } END_FOREACH_BLOCK;
1645 major_init_to_space (void)
1650 major_report_pinned_memory_usage (void)
1652 g_assert_not_reached ();
1656 major_get_used_size (void)
1661 FOREACH_BLOCK (block) {
1662 int count = MS_BLOCK_FREE / block->obj_size;
1664 size += count * block->obj_size;
1665 for (iter = block->free_list; iter; iter = (void**)*iter)
1666 size -= block->obj_size;
1667 } END_FOREACH_BLOCK;
1673 get_num_major_sections (void)
1675 return num_major_sections;
1679 major_handle_gc_param (const char *opt)
1681 if (g_str_has_prefix (opt, "evacuation-threshold=")) {
1682 const char *arg = strchr (opt, '=') + 1;
1683 int percentage = atoi (arg);
1684 if (percentage < 0 || percentage > 100) {
1685 fprintf (stderr, "evacuation-threshold must be an integer in the range 0-100.\n");
1688 evacuation_threshold = (float)percentage / 100.0f;
1690 } else if (!strcmp (opt, "lazy-sweep")) {
1693 } else if (!strcmp (opt, "no-lazy-sweep")) {
1702 major_print_gc_param_usage (void)
1706 " evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
1707 " (no-)lazy-sweep\n"
1712 major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
1715 gboolean has_references;
1717 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1719 callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
1720 } END_FOREACH_BLOCK;
1723 #ifdef HEAVY_STATISTICS
1724 extern long long marked_cards;
1725 extern long long scanned_cards;
1726 extern long long scanned_objects;
1727 extern long long remarked_cards;
1730 #define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
1732 * MS blocks are 16K aligned.
1733 * Cardtables are 4K aligned, at least.
1734 * This means that the cardtable of a given block is 32 bytes aligned.
1737 initial_skip_card (guint8 *card_data)
1739 mword *cards = (mword*)card_data;
1742 for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) {
1748 if (i == CARD_WORDS_PER_BLOCK)
1749 return card_data + CARDS_PER_BLOCK;
1751 #if defined(__i386__) && defined(__GNUC__)
1752 return card_data + i * 4 + (__builtin_ffs (card) - 1) / 8;
1753 #elif defined(__x86_64__) && defined(__GNUC__)
1754 return card_data + i * 8 + (__builtin_ffsll (card) - 1) / 8;
1755 #elif defined(__s390x__) && defined(__GNUC__)
1756 return card_data + i * 8 + (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1758 for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) {
1760 return &card_data [i];
1767 static G_GNUC_UNUSED guint8*
1768 skip_card (guint8 *card_data, guint8 *card_data_end)
1770 while (card_data < card_data_end && !*card_data)
1775 #define MS_BLOCK_OBJ_INDEX_FAST(o,b,os) (((char*)(o) - ((b) + MS_BLOCK_SKIP)) / (os))
1776 #define MS_BLOCK_OBJ_FAST(b,os,i) ((b) + MS_BLOCK_SKIP + (os) * (i))
1777 #define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
1780 major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
1783 gboolean has_references;
1784 ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
1786 #ifdef SGEN_HAVE_CONCURRENT_MARK
1787 if (!concurrent_mark)
1788 g_assert (!mod_union);
1790 g_assert (!mod_union);
1793 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1797 if (!has_references)
1800 block_obj_size = block->obj_size;
1801 block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
1803 if (block_obj_size >= CARD_SIZE_IN_BYTES) {
1805 #ifndef SGEN_HAVE_OVERLAPPING_CARDS
1806 guint8 cards_data [CARDS_PER_BLOCK];
1808 char *obj, *end, *base;
1811 #ifdef SGEN_HAVE_CONCURRENT_MARK
1812 cards = block->cardtable_mod_union;
1814 * This happens when the nursery
1815 * collection that precedes finishing
1816 * the concurrent collection allocates
1823 /*We can avoid the extra copy since the remark cardtable was cleaned before */
1824 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1825 cards = sgen_card_table_get_card_scan_address ((mword)block_start);
1828 if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
1833 obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
1834 end = block_start + MS_BLOCK_SIZE;
1835 base = sgen_card_table_align_pointer (obj);
1841 sweep_block (block, FALSE);
1843 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
1847 /* FIXME: do this more efficiently */
1849 MS_CALC_MARK_BIT (w, b, obj);
1850 if (!MS_MARK_BIT (block, w, b))
1854 card_offset = (obj - base) >> CARD_BITS;
1855 sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, mod_union, queue);
1858 obj += block_obj_size;
1861 guint8 *card_data, *card_base;
1862 guint8 *card_data_end;
1865 * This is safe in face of card aliasing for the following reason:
1867 * Major blocks are 16k aligned, or 32 cards aligned.
1868 * Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
1869 * sizes, they won't overflow the cardtable overlap modulus.
1872 #ifdef SGEN_HAVE_CONCURRENT_MARK
1873 card_data = card_base = block->cardtable_mod_union;
1875 * This happens when the nursery
1876 * collection that precedes finishing
1877 * the concurrent collection allocates
1883 g_assert_not_reached ();
1887 card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
1889 card_data_end = card_data + CARDS_PER_BLOCK;
1891 for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
1893 size_t idx = card_data - card_base;
1894 char *start = (char*)(block_start + idx * CARD_SIZE_IN_BYTES);
1895 char *end = start + CARD_SIZE_IN_BYTES;
1896 char *first_obj, *obj;
1898 HEAVY_STAT (++scanned_cards);
1904 sweep_block (block, FALSE);
1906 HEAVY_STAT (++marked_cards);
1908 sgen_card_table_prepare_card_for_scanning (card_data);
1913 index = MS_BLOCK_OBJ_INDEX_FAST (start, block_start, block_obj_size);
1915 obj = first_obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
1917 if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
1921 /* FIXME: do this more efficiently */
1923 MS_CALC_MARK_BIT (w, b, obj);
1924 if (!MS_MARK_BIT (block, w, b))
1928 HEAVY_STAT (++scanned_objects);
1929 scan_func (obj, sgen_obj_get_descriptor (obj), queue);
1931 obj += block_obj_size;
1933 HEAVY_STAT (if (*card_data) ++remarked_cards);
1934 binary_protocol_card_scan (first_obj, obj - first_obj);
1937 } END_FOREACH_BLOCK;
1941 major_count_cards (long long *num_total_cards, long long *num_marked_cards)
1944 gboolean has_references;
1945 long long total_cards = 0;
1946 long long marked_cards = 0;
1948 FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
1949 guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
1952 if (!has_references)
1955 total_cards += CARDS_PER_BLOCK;
1956 for (i = 0; i < CARDS_PER_BLOCK; ++i) {
1960 } END_FOREACH_BLOCK;
1962 *num_total_cards = total_cards;
1963 *num_marked_cards = marked_cards;
1966 #ifdef SGEN_HAVE_CONCURRENT_MARK
1968 update_cardtable_mod_union (void)
1972 FOREACH_BLOCK (block) {
1975 block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
1976 MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
1978 SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
1979 } END_FOREACH_BLOCK;
1983 major_get_cardtable_mod_union_for_object (char *obj)
1985 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
1986 return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block))) >> CARD_BITS];
1991 alloc_free_block_lists (MSBlockInfo ***lists)
1994 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
1995 lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
1998 #undef pthread_create
2001 post_param_init (SgenMajorCollector *collector)
2003 collector->sweeps_lazily = lazy_sweep;
2006 #ifdef SGEN_HAVE_CONCURRENT_MARK
2008 sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
2009 #else // SGEN_HAVE_CONCURRENT_MARK
2010 #error unknown configuration
2011 #endif // SGEN_HAVE_CONCURRENT_MARK
2015 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
2017 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
2018 block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2019 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
2021 evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
2022 for (i = 0; i < num_block_obj_sizes; ++i)
2023 evacuate_block_obj_sizes [i] = FALSE;
2028 g_print ("block object sizes:\n");
2029 for (i = 0; i < num_block_obj_sizes; ++i)
2030 g_print ("%d\n", block_obj_sizes [i]);
2034 alloc_free_block_lists (free_block_lists);
2036 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
2037 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
2038 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
2039 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
2041 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
2042 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
2043 mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
2044 mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
2045 #if SIZEOF_VOID_P != 8
2046 mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
2047 mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
2048 mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
2049 mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
2052 collector->section_size = MAJOR_SECTION_SIZE;
2054 #ifdef SGEN_HAVE_CONCURRENT_MARK
2055 concurrent_mark = is_concurrent;
2056 if (is_concurrent) {
2057 collector->is_concurrent = TRUE;
2058 collector->want_synchronous_collection = &want_evacuation;
2062 collector->is_concurrent = FALSE;
2063 collector->want_synchronous_collection = NULL;
2065 collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
2066 collector->supports_cardtable = TRUE;
2068 collector->have_swept = &have_swept;
2070 collector->alloc_heap = major_alloc_heap;
2071 collector->is_object_live = major_is_object_live;
2072 collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
2073 collector->alloc_degraded = major_alloc_degraded;
2075 collector->alloc_object = major_alloc_object;
2076 collector->free_pinned_object = free_pinned_object;
2077 collector->iterate_objects = major_iterate_objects;
2078 collector->free_non_pinned_object = major_free_non_pinned_object;
2079 collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
2080 collector->pin_objects = major_pin_objects;
2081 collector->pin_major_object = pin_major_object;
2082 collector->scan_card_table = major_scan_card_table;
2083 collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
2084 #ifdef SGEN_HAVE_CONCURRENT_MARK
2085 if (is_concurrent) {
2086 collector->update_cardtable_mod_union = update_cardtable_mod_union;
2087 collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_object;
2090 collector->init_to_space = major_init_to_space;
2091 collector->sweep = major_sweep;
2092 collector->check_scan_starts = major_check_scan_starts;
2093 collector->dump_heap = major_dump_heap;
2094 collector->get_used_size = major_get_used_size;
2095 collector->start_nursery_collection = major_start_nursery_collection;
2096 collector->finish_nursery_collection = major_finish_nursery_collection;
2097 collector->start_major_collection = major_start_major_collection;
2098 collector->finish_major_collection = major_finish_major_collection;
2099 collector->have_computed_minor_collection_allowance = major_have_computer_minor_collection_allowance;
2100 collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
2101 collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
2102 collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
2103 collector->get_num_major_sections = get_num_major_sections;
2104 collector->handle_gc_param = major_handle_gc_param;
2105 collector->print_gc_param_usage = major_print_gc_param_usage;
2106 collector->post_param_init = post_param_init;
2107 collector->is_valid_object = major_is_valid_object;
2108 collector->describe_pointer = major_describe_pointer;
2109 collector->count_cards = major_count_cards;
2111 collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
2112 collector->major_ops.scan_object = major_scan_object;
2113 #ifdef SGEN_HAVE_CONCURRENT_MARK
2114 if (is_concurrent) {
2115 collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
2116 collector->major_concurrent_ops.scan_object = major_scan_object_concurrent;
2117 collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
2121 /*cardtable requires major pages to be 8 cards aligned*/
2122 g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
2125 #ifdef SGEN_HAVE_CONCURRENT_MARK
2127 sgen_marksweep_init (SgenMajorCollector *collector)
2129 sgen_marksweep_init_internal (collector, FALSE);
2133 sgen_marksweep_conc_init (SgenMajorCollector *collector)
2135 sgen_marksweep_init_internal (collector, TRUE);