2 * sgen-marksweep.c: Simple generational GC.
5 * Mark Probst <mark.probst@gmail.com>
7 * Copyright 2009-2010 Novell, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
24 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include "utils/mono-counters.h"
34 #include "metadata/object-internals.h"
35 #include "metadata/profiler-private.h"
37 #include "metadata/sgen-gc.h"
38 #include "metadata/sgen-protocol.h"
42 #define MS_BLOCK_SIZE (16*1024)
43 #define MS_BLOCK_SIZE_SHIFT 14
44 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
47 #define MS_DEFAULT_HEAP_NUM_BLOCKS (32 * 1024) /* 512 MB */
51 * Don't allocate single blocks, but alloc a contingent of this many
52 * blocks in one swoop.
54 #define MS_BLOCK_ALLOC_NUM 32
57 * Number of bytes before the first object in a block. At the start
58 * of a block is the MSBlockHeader, then opional padding, then come
59 * the objects, so this must be >= sizeof (MSBlockHeader).
62 #define MS_BLOCK_SKIP 0
64 #define MS_BLOCK_SKIP 16
67 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
69 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
71 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
72 #error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
75 typedef struct _MSBlockInfo MSBlockInfo;
79 gboolean has_references;
87 MSBlockInfo *next_free;
88 void **pin_queue_start;
89 int pin_queue_num_entries;
90 mword mark_words [MS_NUM_MARK_WORDS];
94 static int ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
96 #define ms_heap_start nursery_end
97 static char *ms_heap_end;
99 #define MS_PTR_IN_SMALL_MAJOR_HEAP(p) ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
101 /* array of all all block infos in the system */
102 static MSBlockInfo *block_infos;
105 #define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
106 #define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
109 #define MS_BLOCK_FOR_OBJ(o) (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
115 #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
118 #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
120 #define MS_CALC_MARK_BIT(w,b,o) do { \
121 int i = ((char*)(o) - MS_BLOCK_DATA_FOR_OBJ ((o))) >> SGEN_ALLOC_ALIGN_BITS; \
122 if (sizeof (mword) == 4) { \
131 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (1L << (b)))
132 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
133 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
134 mword __old = (bl)->mark_words [(w)]; \
135 mword __bitmask = 1L << (b); \
136 if (__old & __bitmask) { \
140 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
141 (gpointer)(__old | __bitmask), \
142 (gpointer)__old) == \
144 was_marked = FALSE; \
149 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
151 #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
154 * This way we can lookup block object size indexes for sizes up to
155 * 256 bytes with a single load.
157 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
159 static int *block_obj_sizes;
160 static int num_block_obj_sizes;
161 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
163 #define MS_BLOCK_FLAG_PINNED 1
164 #define MS_BLOCK_FLAG_REFS 2
166 #define MS_BLOCK_TYPE_MAX 4
168 #ifdef SGEN_PARALLEL_MARK
169 static LOCK_DECLARE (ms_block_list_mutex);
170 #define LOCK_MS_BLOCK_LIST pthread_mutex_lock (&ms_block_list_mutex)
171 #define UNLOCK_MS_BLOCK_LIST pthread_mutex_unlock (&ms_block_list_mutex)
173 #define LOCK_MS_BLOCK_LIST
174 #define UNLOCK_MS_BLOCK_LIST
177 /* we get this at init */
178 static int nursery_bits;
179 static char *nursery_start;
180 static char *nursery_end;
182 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), nursery_bits, nursery_start, nursery_end))
185 /* non-allocated block free-list */
186 static MSBlockInfo *empty_blocks = NULL;
188 /* non-allocated block free-list */
189 static void *empty_blocks = NULL;
190 /* all allocated blocks in the system */
191 static MSBlockInfo *all_blocks;
192 static int num_empty_blocks = 0;
196 #define FOREACH_BLOCK(bl) { \
198 for (__block_i = 0; __block_i < ms_heap_num_blocks; ++__block_i) { \
199 (bl) = &block_infos [__block_i]; \
200 if (!(bl)->used) continue;
201 #define END_FOREACH_BLOCK }}
203 #define FOREACH_BLOCK(bl) for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
204 #define END_FOREACH_BLOCK }
207 static int num_major_sections = 0;
208 /* one free block list for each block object size */
209 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
211 static long long stat_major_blocks_alloced = 0;
212 static long long stat_major_blocks_freed = 0;
215 ms_find_block_obj_size_index (int size)
218 DEBUG (9, g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE));
219 for (i = 0; i < num_block_obj_sizes; ++i)
220 if (block_obj_sizes [i] >= size)
222 g_assert_not_reached ();
225 #define FREE_BLOCKS(p,r) (free_block_lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
227 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
228 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
229 fast_block_obj_size_indexes [((s)+7)>>3] : \
230 ms_find_block_obj_size_index ((s)))
234 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
237 mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
238 mword alloc_size = nursery_size + major_heap_size;
241 g_assert (ms_heap_num_blocks > 0);
242 g_assert (nursery_size % MS_BLOCK_SIZE == 0);
244 g_assert (nursery_align % MS_BLOCK_SIZE == 0);
246 nursery_start = mono_sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, TRUE);
247 nursery_end = heap_start = nursery_start + nursery_size;
248 nursery_bits = the_nursery_bits;
250 ms_heap_end = heap_start + major_heap_size;
252 block_infos = mono_sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO);
254 for (i = 0; i < ms_heap_num_blocks; ++i) {
255 block_infos [i].block = heap_start + i * MS_BLOCK_SIZE;
256 if (i < ms_heap_num_blocks - 1)
257 block_infos [i].next_free = &block_infos [i + 1];
259 block_infos [i].next_free = NULL;
262 empty_blocks = &block_infos [0];
264 return nursery_start;
268 major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
271 nursery_start = mono_sgen_alloc_os_memory_aligned (nursery_size, nursery_align, TRUE);
273 nursery_start = mono_sgen_alloc_os_memory (nursery_size, TRUE);
275 nursery_end = nursery_start + nursery_size;
276 nursery_bits = the_nursery_bits;
278 return nursery_start;
284 ms_get_empty_block (void)
288 g_assert (empty_blocks);
290 block = empty_blocks;
291 empty_blocks = empty_blocks->next_free;
295 mono_sgen_update_heap_boundaries ((mword)block, (mword)block + MS_BLOCK_SIZE);
301 ms_free_block (MSBlockInfo *block)
303 block->next_free = empty_blocks;
304 empty_blocks = block;
309 ms_get_empty_block (void)
313 void *block, *empty, *next;
317 p = mono_sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
319 for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
322 * We do the free list update one after the
323 * other so that other threads can use the new
324 * blocks as quickly as possible.
327 empty = empty_blocks;
328 *(void**)block = empty;
329 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
333 SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
335 stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
339 empty = empty_blocks;
343 next = *(void**)block;
344 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
346 SGEN_ATOMIC_ADD (num_empty_blocks, -1);
348 *(void**)block = NULL;
350 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
352 mono_sgen_update_heap_boundaries ((mword)block, (mword)block + MS_BLOCK_SIZE);
358 ms_free_block (void *block)
362 memset (block, 0, MS_BLOCK_SIZE);
365 empty = empty_blocks;
366 *(void**)block = empty;
367 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
369 SGEN_ATOMIC_ADD (num_empty_blocks, 1);
373 //#define MARKSWEEP_CONSISTENCY_CHECK
375 #ifdef MARKSWEEP_CONSISTENCY_CHECK
377 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
381 for (; block; block = block->next_free) {
382 g_assert (block->obj_size == size);
383 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
385 /* blocks in the free lists must have at least
387 g_assert (block->free_list);
390 /* the block must not be in the empty_blocks list */
391 for (b = empty_blocks; b; b = b->next_free)
392 g_assert (b != block);
394 /* the block must be in the all_blocks list */
395 for (b = all_blocks; b; b = b->next) {
399 g_assert (b == block);
405 check_empty_blocks (void)
410 for (p = empty_blocks; p; p = *(void**)p)
412 g_assert (i == num_empty_blocks);
417 consistency_check (void)
422 /* check all blocks */
423 FOREACH_BLOCK (block) {
424 int count = MS_BLOCK_FREE / block->obj_size;
429 /* check block header */
430 g_assert (((MSBlockHeader*)block->block)->info == block);
433 /* count number of free slots */
434 for (i = 0; i < count; ++i) {
435 void **obj = (void**) MS_BLOCK_OBJ (block, i);
436 if (!MS_OBJ_ALLOCED (obj, block))
440 /* check free list */
441 for (free = block->free_list; free; free = (void**)*free) {
442 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
445 g_assert (num_free == 0);
447 /* check all mark words are zero */
448 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
449 g_assert (block->mark_words [i] == 0);
452 /* check free blocks */
453 for (i = 0; i < num_block_obj_sizes; ++i) {
455 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
456 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
459 check_empty_blocks ();
464 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
466 int size = block_obj_sizes [size_index];
467 int count = MS_BLOCK_FREE / size;
469 MSBlockInfo *info = ms_get_empty_block ();
471 MSBlockInfo *info = mono_sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
472 MSBlockHeader *header;
474 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
478 DEBUG (9, g_assert (count >= 2));
480 info->obj_size = size;
481 info->pinned = pinned;
482 info->has_references = has_references;
484 info->block = ms_get_empty_block ();
486 header = (MSBlockHeader*) info->block;
490 /* build free list */
491 obj_start = info->block + MS_BLOCK_SKIP;
492 info->free_list = (void**)obj_start;
493 /* we're skipping the last one - it must be nulled */
494 for (i = 0; i < count - 1; ++i) {
495 char *next_obj_start = obj_start + size;
496 *(void**)obj_start = next_obj_start;
497 obj_start = next_obj_start;
500 *(void**)obj_start = NULL;
502 info->next_free = free_blocks [size_index];
503 free_blocks [size_index] = info;
506 info->next = all_blocks;
510 ++num_major_sections;
514 obj_is_from_pinned_alloc (char *obj)
516 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
517 return block->pinned;
521 alloc_obj (int size, gboolean pinned, gboolean has_references)
523 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
524 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
528 /* FIXME: try to do this without locking */
532 if (!free_blocks [size_index])
533 ms_alloc_block (size_index, pinned, has_references);
535 block = free_blocks [size_index];
536 DEBUG (9, g_assert (block));
538 obj = block->free_list;
539 DEBUG (9, g_assert (obj));
541 block->free_list = *(void**)obj;
542 if (!block->free_list) {
543 free_blocks [size_index] = block->next_free;
544 block->next_free = NULL;
547 UNLOCK_MS_BLOCK_LIST;
550 * FIXME: This should not be necessary because it'll be
551 * overwritten by the vtable immediately.
559 major_alloc_object (int size, gboolean has_references)
561 return alloc_obj (size, FALSE, has_references);
565 * We're not freeing the block if it's empty. We leave that work for
566 * the next major collection.
568 * This is just called from the domain clearing code, which runs in a
569 * single thread and has the GC lock, so we don't need an extra lock.
572 free_object (char *obj, size_t size, gboolean pinned)
574 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
576 DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
577 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
578 MS_CALC_MARK_BIT (word, bit, obj);
579 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
580 if (!block->free_list) {
581 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
582 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
583 DEBUG (9, g_assert (!block->next_free));
584 block->next_free = free_blocks [size_index];
585 free_blocks [size_index] = block;
587 memset (obj, 0, size);
588 *(void**)obj = block->free_list;
589 block->free_list = (void**)obj;
593 major_free_non_pinned_object (char *obj, size_t size)
595 free_object (obj, size, FALSE);
598 /* size is a multiple of SGEN_ALLOC_ALIGN */
600 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
602 return alloc_obj (size, TRUE, has_references);
606 free_pinned_object (char *obj, size_t size)
608 free_object (obj, size, TRUE);
612 * size is already rounded up and we hold the GC lock.
615 major_alloc_degraded (MonoVTable *vtable, size_t size)
618 int old_num_sections = num_major_sections;
619 obj = alloc_obj (size, FALSE, vtable->klass->has_references);
620 *(MonoVTable**)obj = vtable;
621 HEAVY_STAT (++stat_objects_alloced_degraded);
622 HEAVY_STAT (stat_bytes_alloced_degraded += size);
623 g_assert (num_major_sections >= old_num_sections);
624 mono_sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
628 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
631 * obj is some object. If it's not in the major heap (i.e. if it's in
632 * the nursery or LOS), return FALSE. Otherwise return whether it's
633 * been marked or copied.
636 major_is_object_live (char *obj)
644 if (ptr_in_nursery (obj))
649 if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
652 objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));
655 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
659 /* now we know it's in a major block */
660 block = MS_BLOCK_FOR_OBJ (obj);
661 DEBUG (9, g_assert (!block->pinned));
662 MS_CALC_MARK_BIT (word, bit, obj);
663 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
667 major_ptr_is_in_non_pinned_space (char *ptr)
669 g_assert_not_reached ();
673 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
677 FOREACH_BLOCK (block) {
678 int count = MS_BLOCK_FREE / block->obj_size;
681 if (block->pinned && !pinned)
683 if (!block->pinned && !non_pinned)
686 for (i = 0; i < count; ++i) {
687 void **obj = (void**) MS_BLOCK_OBJ (block, i);
688 if (MS_OBJ_ALLOCED (obj, block))
689 callback ((char*)obj, block->obj_size, data);
695 major_check_scan_starts (void)
700 major_dump_heap (FILE *heap_dump_file)
704 FOREACH_BLOCK (block) {
705 int count = MS_BLOCK_FREE / block->obj_size;
709 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
711 for (i = 0; i <= count; ++i) {
712 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
717 mono_sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
723 fprintf (heap_dump_file, "</section>\n");
727 #define LOAD_VTABLE SGEN_LOAD_VTABLE
729 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
731 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
732 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
733 MS_SET_MARK_BIT ((block), __word, __bit); \
734 if ((block)->has_references) \
735 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
736 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), mono_sgen_safe_object_get_size ((MonoObject*)(obj))); \
739 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
741 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
742 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block)))); \
743 if (!MS_MARK_BIT ((block), __word, __bit)) { \
744 MS_SET_MARK_BIT ((block), __word, __bit); \
745 if ((block)->has_references) \
746 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
747 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), mono_sgen_safe_object_get_size ((MonoObject*)(obj))); \
750 #define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
752 gboolean __was_marked; \
753 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block)))); \
754 MS_CALC_MARK_BIT (__word, __bit, (obj)); \
755 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
756 if (!__was_marked) { \
757 if ((block)->has_references) \
758 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
759 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), mono_sgen_safe_object_get_size ((MonoObject*)(obj))); \
763 #include "sgen-major-copy-object.h"
765 #ifdef SGEN_PARALLEL_MARK
767 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
770 mword vtable_word = *(mword*)obj;
771 MonoVTable *vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
775 HEAVY_STAT (++stat_copy_object_called_major);
777 DEBUG (9, g_assert (obj));
778 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
780 if (ptr_in_nursery (obj)) {
782 gboolean has_references;
785 if (vtable_word & SGEN_FORWARDED_BIT) {
790 if (vtable_word & SGEN_PINNED_BIT)
793 HEAVY_STAT (++stat_objects_copied_major);
795 objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));
796 has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
798 destination = major_alloc_object (objsize, has_references);
800 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
803 par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
808 * FIXME: If we make major_alloc_object() give
809 * us the block info, too, we won't have to
812 block = MS_BLOCK_FOR_OBJ (obj);
813 MS_CALC_MARK_BIT (word, bit, obj);
814 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
815 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
818 * FIXME: We have allocated destination, but
819 * we cannot use it. Give it back to the
822 *(void**)destination = NULL;
824 vtable_word = *(mword*)obj;
825 g_assert (vtable_word & SGEN_FORWARDED_BIT);
827 obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
833 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
835 objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));
837 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
840 block = MS_BLOCK_FOR_OBJ (obj);
841 MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
843 if (vtable_word & SGEN_PINNED_BIT)
845 binary_protocol_pin (obj, vt, mono_sgen_safe_object_get_size ((MonoObject*)obj));
846 if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
847 if (SGEN_VTABLE_HAS_REFERENCES (vt))
848 GRAY_OBJECT_ENQUEUE (queue, obj);
850 g_assert (SGEN_OBJECT_IS_PINNED (obj));
857 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
863 HEAVY_STAT (++stat_copy_object_called_major);
865 DEBUG (9, g_assert (obj));
866 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
868 if (ptr_in_nursery (obj)) {
872 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
876 if (SGEN_OBJECT_IS_PINNED (obj))
879 HEAVY_STAT (++stat_objects_copied_major);
881 obj = copy_object_no_checks (obj, queue);
885 * FIXME: See comment for copy_object_no_checks(). If
886 * we have that, we can let the allocation function
887 * give us the block info, too, and we won't have to
890 block = MS_BLOCK_FOR_OBJ (obj);
891 MS_CALC_MARK_BIT (word, bit, obj);
892 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
893 MS_SET_MARK_BIT (block, word, bit);
896 if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
898 objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));
900 if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
903 block = MS_BLOCK_FOR_OBJ (obj);
904 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
906 if (SGEN_OBJECT_IS_PINNED (obj))
908 binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj));
909 SGEN_PIN_OBJECT (obj);
910 /* FIXME: only enqueue if object has references */
911 GRAY_OBJECT_ENQUEUE (queue, obj);
917 #include "sgen-major-scan-object.h"
920 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
924 int count = MS_BLOCK_FREE / block->obj_size;
926 for (i = 0; i < block->pin_queue_num_entries; ++i) {
927 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
928 DEBUG (9, g_assert (index >= 0 && index < count));
929 if (index == last_index)
931 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
946 /* clear all the free lists */
947 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
948 MSBlockInfo **free_blocks = free_block_lists [i];
950 for (j = 0; j < num_block_obj_sizes; ++j)
951 free_blocks [j] = NULL;
954 /* traverse all blocks, free and zero unmarked objects */
956 for (j = 0; j < ms_heap_num_blocks; ++j) {
957 MSBlockInfo *block = &block_infos [j];
961 MSBlockInfo *block = *iter;
964 gboolean have_live = FALSE;
972 count = MS_BLOCK_FREE / block->obj_size;
973 block->free_list = NULL;
975 for (obj_index = 0; obj_index < count; ++obj_index) {
977 void *obj = MS_BLOCK_OBJ (block, obj_index);
979 MS_CALC_MARK_BIT (word, bit, obj);
980 if (MS_MARK_BIT (block, word, bit)) {
981 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
984 /* an unmarked object */
985 if (MS_OBJ_ALLOCED (obj, block)) {
986 binary_protocol_empty (obj, block->obj_size);
987 memset (obj, 0, block->obj_size);
989 *(void**)obj = block->free_list;
990 block->free_list = obj;
994 /* reset mark bits */
995 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
998 * FIXME: reverse free list so that it's in address
1004 iter = &block->next;
1008 * If there are free slots in the block, add
1009 * the block to the corresponding free list.
1011 if (block->free_list) {
1012 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
1013 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
1014 block->next_free = free_blocks [index];
1015 free_blocks [index] = block;
1019 * Blocks without live objects are removed from the
1020 * block list and freed.
1023 ms_free_block (block);
1025 *iter = block->next;
1027 ms_free_block (block->block);
1028 mono_sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
1031 --num_major_sections;
1036 static int count_pinned_ref;
1037 static int count_pinned_nonref;
1038 static int count_nonpinned_ref;
1039 static int count_nonpinned_nonref;
1042 count_nonpinned_callback (char *obj, size_t size, void *data)
1044 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1046 if (vtable->klass->has_references)
1047 ++count_nonpinned_ref;
1049 ++count_nonpinned_nonref;
1053 count_pinned_callback (char *obj, size_t size, void *data)
1055 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
1057 if (vtable->klass->has_references)
1060 ++count_pinned_nonref;
1063 static void __attribute__ ((unused))
1064 count_ref_nonref_objs (void)
1068 count_pinned_ref = 0;
1069 count_pinned_nonref = 0;
1070 count_nonpinned_ref = 0;
1071 count_nonpinned_nonref = 0;
1073 major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
1074 major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
1076 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
1078 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
1079 count_pinned_ref, count_nonpinned_ref,
1080 count_pinned_nonref, count_nonpinned_nonref,
1081 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
1085 ms_calculate_block_obj_sizes (double factor, int *arr)
1087 double target_size = sizeof (MonoObject);
1092 int target_count = ceil (MS_BLOCK_FREE / target_size);
1093 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
1095 if (size != last_size) {
1097 arr [num_sizes] = size;
1102 target_size *= factor;
1103 } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
1108 /* only valid during minor collections */
1109 static int old_num_major_sections;
1112 major_start_nursery_collection (void)
1114 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1115 consistency_check ();
1118 old_num_major_sections = num_major_sections;
1122 major_finish_nursery_collection (void)
1124 #ifdef MARKSWEEP_CONSISTENCY_CHECK
1125 consistency_check ();
1127 mono_sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
1131 major_finish_major_collection (void)
1134 int section_reserve = mono_sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
1137 * FIXME: We don't free blocks on 32 bit platforms because it
1138 * can lead to address space fragmentation, since we're
1139 * allocating blocks in larger contingents.
1141 if (sizeof (mword) < 8)
1144 while (num_empty_blocks > section_reserve) {
1145 void *next = *(void**)empty_blocks;
1146 mono_sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
1147 empty_blocks = next;
1149 * Needs not be atomic because this is running
1154 ++stat_major_blocks_freed;
1160 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
1164 FOREACH_BLOCK (block) {
1165 block->pin_queue_start = mono_sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
1166 &block->pin_queue_num_entries);
1167 } END_FOREACH_BLOCK;
1171 major_pin_objects (SgenGrayQueue *queue)
1175 FOREACH_BLOCK (block) {
1176 mark_pinned_objects_in_block (block, queue);
1177 } END_FOREACH_BLOCK;
1181 major_init_to_space (void)
1186 major_report_pinned_memory_usage (void)
1188 g_assert_not_reached ();
1192 major_get_used_size (void)
1197 FOREACH_BLOCK (block) {
1198 int count = MS_BLOCK_FREE / block->obj_size;
1200 size += count * block->obj_size;
1201 for (iter = block->free_list; iter; iter = (void**)*iter)
1202 size -= block->obj_size;
1203 } END_FOREACH_BLOCK;
1209 get_num_major_sections (void)
1211 return num_major_sections;
1216 major_handle_gc_param (const char *opt)
1218 if (g_str_has_prefix (opt, "major-heap-size=")) {
1219 const char *arg = strchr (opt, '=') + 1;
1221 if (!mono_sgen_parse_environment_string_extract_number (arg, &size))
1223 ms_heap_num_blocks = (size + MS_BLOCK_SIZE - 1) / MS_BLOCK_SIZE;
1224 g_assert (ms_heap_num_blocks > 0);
1232 major_print_gc_param_usage (void)
1234 fprintf (stderr, " major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
1239 #ifdef SGEN_PARALLEL_MARK
1241 mono_sgen_marksweep_fixed_par_init
1243 mono_sgen_marksweep_par_init
1247 mono_sgen_marksweep_fixed_init
1249 mono_sgen_marksweep_init
1252 (SgenMajorCollector *collector)
1257 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
1260 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
1261 block_obj_sizes = mono_sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1262 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
1267 g_print ("block object sizes:\n");
1268 for (i = 0; i < num_block_obj_sizes; ++i)
1269 g_print ("%d\n", block_obj_sizes [i]);
1273 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
1274 free_block_lists [i] = mono_sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
1276 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
1277 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
1278 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
1279 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
1281 LOCK_INIT (ms_block_list_mutex);
1283 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
1284 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
1286 collector->section_size = MAJOR_SECTION_SIZE;
1287 #ifdef SGEN_PARALLEL_MARK
1288 collector->is_parallel = TRUE;
1290 collector->is_parallel = FALSE;
1293 collector->alloc_heap = major_alloc_heap;
1294 collector->is_object_live = major_is_object_live;
1295 collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
1296 collector->alloc_degraded = major_alloc_degraded;
1297 collector->copy_or_mark_object = major_copy_or_mark_object;
1298 collector->alloc_object = major_alloc_object;
1299 collector->free_pinned_object = free_pinned_object;
1300 collector->iterate_objects = major_iterate_objects;
1301 collector->free_non_pinned_object = major_free_non_pinned_object;
1302 collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
1303 collector->pin_objects = major_pin_objects;
1304 collector->init_to_space = major_init_to_space;
1305 collector->sweep = major_sweep;
1306 collector->check_scan_starts = major_check_scan_starts;
1307 collector->dump_heap = major_dump_heap;
1308 collector->get_used_size = major_get_used_size;
1309 collector->start_nursery_collection = major_start_nursery_collection;
1310 collector->finish_nursery_collection = major_finish_nursery_collection;
1311 collector->finish_major_collection = major_finish_major_collection;
1312 collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
1313 collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
1314 collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
1315 collector->get_num_major_sections = get_num_major_sections;
1317 collector->handle_gc_param = major_handle_gc_param;
1318 collector->print_gc_param_usage = major_print_gc_param_usage;
1320 collector->handle_gc_param = NULL;
1321 collector->print_gc_param_usage = NULL;
1324 FILL_COLLECTOR_COPY_OBJECT (collector);
1325 FILL_COLLECTOR_SCAN_OBJECT (collector);