3 #define MS_BLOCK_SIZE (16*1024)
4 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
7 * Don't allocate single blocks, but alloc a contingent of this many
10 #define MS_BLOCK_ALLOC_NUM 32
13 * Number of bytes before the first object in a block. At the start
14 * of a block is the MSBlockHeader, then opional padding, then come
15 * the objects, so this must be >= sizeof (MSBlockHeader).
17 #define MS_BLOCK_SKIP 16
19 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
21 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
23 #if MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
24 #error MAX_SMALL_OBJ_SIZE must be at most (MS_BLOCK_SIZE - MS_BLOCK_SKIP) / 2
27 typedef struct _MSBlockInfo MSBlockInfo;
31 gboolean has_references;
34 MSBlockInfo *next_free;
38 mword mark_words [MS_NUM_MARK_WORDS];
41 #define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
47 #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)((mword)(o) & ~(MS_BLOCK_SIZE-1)))->info)
49 #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
51 #define MS_CALC_MARK_BIT(w,b,o,bl) do { \
52 int i = ((char*)(o) - (bl)->block) >> ALLOC_ALIGN_BITS; \
53 if (sizeof (mword) == 4) { \
62 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (1L << (b)))
63 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
64 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
65 mword __old = (bl)->mark_words [(w)]; \
66 mword __bitmask = 1L << (b); \
67 if (__old & __bitmask) { \
71 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
72 (gpointer)(__old | __bitmask), \
80 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
82 #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
85 * This way we can lookup block object size indexes for sizes up to
86 * 256 bytes with a single load.
88 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
90 static int *block_obj_sizes;
91 static int num_block_obj_sizes;
92 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
94 #define MS_BLOCK_FLAG_PINNED 1
95 #define MS_BLOCK_FLAG_REFS 2
97 #define MS_BLOCK_TYPE_MAX 4
99 #ifdef SGEN_PARALLEL_MARK
100 static LOCK_DECLARE (ms_block_list_mutex);
101 #define LOCK_MS_BLOCK_LIST pthread_mutex_lock (&ms_block_list_mutex)
102 #define UNLOCK_MS_BLOCK_LIST pthread_mutex_unlock (&ms_block_list_mutex)
104 #define LOCK_MS_BLOCK_LIST
105 #define UNLOCK_MS_BLOCK_LIST
108 /* non-allocated block free-list */
109 static void *empty_blocks = NULL;
110 static int num_empty_blocks = 0;
111 /* all allocated blocks in the system */
112 static MSBlockInfo *all_blocks;
113 static int num_major_sections = 0;
114 /* one free block list for each block object size */
115 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
117 static long long stat_major_blocks_alloced = 0;
118 static long long stat_major_blocks_freed = 0;
121 ms_find_block_obj_size_index (int size)
124 DEBUG (9, g_assert (size <= MAX_SMALL_OBJ_SIZE));
125 for (i = 0; i < num_block_obj_sizes; ++i)
126 if (block_obj_sizes [i] >= size)
128 g_assert_not_reached ();
131 #define FREE_BLOCKS(p,r) (free_block_lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
133 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
134 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
135 fast_block_obj_size_indexes [((s)+7)>>3] : \
136 ms_find_block_obj_size_index ((s)))
139 ms_get_empty_block (void)
143 void *block, *empty, *next;
147 p = mono_sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
149 for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
152 * We do the free list update one after the
153 * other so that other threads can use the new
154 * blocks as quickly as possible.
157 empty = empty_blocks;
158 *(void**)block = empty;
159 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
163 SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
165 stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
169 empty = empty_blocks;
173 next = *(void**)block;
174 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
176 SGEN_ATOMIC_ADD (num_empty_blocks, -1);
178 *(void**)block = NULL;
180 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
182 mono_sgen_update_heap_boundaries ((mword)block, (mword)block + MS_BLOCK_SIZE);
188 ms_free_block (void *block)
192 memset (block, 0, MS_BLOCK_SIZE);
195 empty = empty_blocks;
196 *(void**)block = empty;
197 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
199 SGEN_ATOMIC_ADD (num_empty_blocks, 1);
202 //#define MARKSWEEP_CONSISTENCY_CHECK
204 #ifdef MARKSWEEP_CONSISTENCY_CHECK
206 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
210 for (; block; block = block->next_free) {
211 g_assert (block->obj_size == size);
212 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
214 /* blocks in the free lists must have at least
216 g_assert (block->free_list);
218 /* the block must be in the all_blocks list */
219 for (b = all_blocks; b; b = b->next) {
223 g_assert (b == block);
228 check_empty_blocks (void)
232 for (p = empty_blocks; p; p = *(void**)p)
234 g_assert (i == num_empty_blocks);
238 consistency_check (void)
243 /* check all blocks */
244 for (block = all_blocks; block; block = block->next) {
245 int count = MS_BLOCK_FREE / block->obj_size;
249 /* check block header */
250 g_assert (((MSBlockHeader*)block->block)->info == block);
252 /* count number of free slots */
253 for (i = 0; i < count; ++i) {
254 void **obj = (void**) MS_BLOCK_OBJ (block, i);
255 if (!MS_OBJ_ALLOCED (obj, block))
259 /* check free list */
260 for (free = block->free_list; free; free = (void**)*free) {
261 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
264 g_assert (num_free == 0);
266 /* check all mark words are zero */
267 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
268 g_assert (block->mark_words [i] == 0);
271 /* check free blocks */
272 for (i = 0; i < num_block_obj_sizes; ++i) {
274 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
275 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
278 check_empty_blocks ();
283 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
285 int size = block_obj_sizes [size_index];
286 int count = MS_BLOCK_FREE / size;
287 MSBlockInfo *info = mono_sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
288 MSBlockHeader *header;
289 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
293 DEBUG (9, g_assert (count >= 2));
295 info->obj_size = size;
296 info->pinned = pinned;
297 info->has_references = has_references;
298 info->block = ms_get_empty_block ();
300 header = (MSBlockHeader*) info->block;
303 /* build free list */
304 obj_start = info->block + MS_BLOCK_SKIP;
305 info->free_list = (void**)obj_start;
306 /* we're skipping the last one - it's already NULL */
307 for (i = 0; i < count - 1; ++i) {
308 char *next_obj_start = obj_start + size;
309 *(void**)obj_start = next_obj_start;
310 obj_start = next_obj_start;
313 info->next_free = free_blocks [size_index];
314 free_blocks [size_index] = info;
316 info->next = all_blocks;
319 ++num_major_sections;
323 obj_is_from_pinned_alloc (char *obj)
325 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
326 return block->pinned;
330 alloc_obj (int size, gboolean pinned, gboolean has_references)
332 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
333 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
337 /* FIXME: try to do this without locking */
341 if (!free_blocks [size_index])
342 ms_alloc_block (size_index, pinned, has_references);
344 block = free_blocks [size_index];
345 DEBUG (9, g_assert (block));
347 obj = block->free_list;
348 DEBUG (9, g_assert (obj));
350 block->free_list = *(void**)obj;
351 if (!block->free_list) {
352 free_blocks [size_index] = block->next_free;
353 block->next_free = NULL;
356 UNLOCK_MS_BLOCK_LIST;
359 * FIXME: This should not be necessary because it'll be
360 * overwritten by the vtable immediately.
368 ms_alloc_obj (int size, gboolean has_references)
370 return alloc_obj (size, FALSE, has_references);
373 /* FIXME: inline fast path */
374 #define MAJOR_GET_COPY_OBJECT_SPACE(dest, size, refs) do { \
375 (dest) = ms_alloc_obj ((size), (refs)); \
379 * We're not freeing the block if it's empty. We leave that work for
380 * the next major collection.
382 * This is just called from the domain clearing code, which runs in a
383 * single thread and has the GC lock, so we don't need an extra lock.
386 free_object (char *obj, size_t size, gboolean pinned)
388 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
390 DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
391 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
392 MS_CALC_MARK_BIT (word, bit, obj, block);
393 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
394 if (!block->free_list) {
395 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
396 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
397 DEBUG (9, g_assert (!block->next_free));
398 block->next_free = free_blocks [size_index];
399 free_blocks [size_index] = block;
401 memset (obj, 0, size);
402 *(void**)obj = block->free_list;
403 block->free_list = (void**)obj;
407 major_free_non_pinned_object (char *obj, size_t size)
409 free_object (obj, size, FALSE);
412 /* size is a multiple of ALLOC_ALIGN */
414 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
416 return alloc_obj (size, TRUE, has_references);
420 free_pinned_object (char *obj, size_t size)
422 free_object (obj, size, TRUE);
426 * size is already rounded up and we hold the GC lock.
429 major_alloc_degraded (MonoVTable *vtable, size_t size)
432 int old_num_sections = num_major_sections;
433 obj = alloc_obj (size, FALSE, vtable->klass->has_references);
434 *(MonoVTable**)obj = vtable;
435 HEAVY_STAT (++stat_objects_alloced_degraded);
436 HEAVY_STAT (stat_bytes_alloced_degraded += size);
437 g_assert (num_major_sections >= old_num_sections);
438 minor_collection_sections_alloced += num_major_sections - old_num_sections;
442 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
445 * obj is some object. If it's not in the major heap (i.e. if it's in
446 * the nursery or LOS), return FALSE. Otherwise return whether it's
447 * been marked or copied.
450 major_is_object_live (char *obj)
456 if (ptr_in_nursery (obj))
459 objsize = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
462 if (objsize > MAX_SMALL_OBJ_SIZE)
465 /* now we know it's in a major block */
466 block = MS_BLOCK_FOR_OBJ (obj);
467 DEBUG (9, g_assert (!block->pinned));
468 MS_CALC_MARK_BIT (word, bit, obj, block);
469 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
473 major_ptr_is_in_non_pinned_space (char *ptr)
475 g_assert_not_reached ();
479 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
483 for (block = all_blocks; block; block = block->next) {
484 int count = MS_BLOCK_FREE / block->obj_size;
487 if (block->pinned && !pinned)
489 if (!block->pinned && !non_pinned)
492 for (i = 0; i < count; ++i) {
493 void **obj = (void**) MS_BLOCK_OBJ (block, i);
494 if (MS_OBJ_ALLOCED (obj, block))
495 callback ((char*)obj, block->obj_size, data);
500 #define major_check_scan_starts()
503 major_dump_heap (void)
507 for (block = all_blocks; block; block = block->next) {
508 int count = MS_BLOCK_FREE / block->obj_size;
512 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
514 for (i = 0; i <= count; ++i) {
515 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
520 dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
526 fprintf (heap_dump_file, "</section>\n");
530 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
532 MS_CALC_MARK_BIT (__word, __bit, (obj), (block)); \
533 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
534 MS_SET_MARK_BIT ((block), __word, __bit); \
535 if ((block)->has_references) \
536 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
537 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), safe_object_get_size ((MonoObject*)(obj))); \
540 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
542 gboolean __was_marked; \
543 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block)))); \
544 MS_CALC_MARK_BIT (__word, __bit, (obj), (block)); \
545 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
546 if (!__was_marked) { \
547 if ((block)->has_references) \
548 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
549 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), safe_object_get_size ((MonoObject*)(obj))); \
554 major_copy_or_mark_object (void **ptr, GrayQueue *queue)
557 mword vtable_word = *(mword*)obj;
558 MonoVTable *vt = (MonoVTable*)(vtable_word & ~VTABLE_BITS_MASK);
562 HEAVY_STAT (++stat_copy_object_called_major);
564 DEBUG (9, g_assert (obj));
565 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
567 if (ptr_in_nursery (obj)) {
569 gboolean has_references;
572 if (vtable_word & FORWARDED_BIT) {
577 if (vtable_word & PINNED_BIT)
580 HEAVY_STAT (++stat_objects_copied_major);
582 objsize = ALIGN_UP (par_object_get_size (vt, (MonoObject*)obj));
583 has_references = VTABLE_HAS_REFERENCES (vt);
585 destination = ms_alloc_obj (objsize, has_references);
587 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | FORWARDED_BIT), vt) == vt) {
590 par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
595 * FIXME: If we make ms_alloc_obj() give us
596 * the block info, too, we won't have to
599 block = MS_BLOCK_FOR_OBJ (obj);
600 MS_CALC_MARK_BIT (word, bit, obj, block);
601 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
602 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
605 * FIXME: We have allocated destination, but
606 * we cannot use it. Give it back to the
609 *(void**)destination = NULL;
611 vtable_word = *(mword*)obj;
612 g_assert (vtable_word & FORWARDED_BIT);
614 obj = (void*)(vtable_word & ~VTABLE_BITS_MASK);
621 objsize = ALIGN_UP (par_object_get_size (vt, (MonoObject*)obj));
623 if (objsize > MAX_SMALL_OBJ_SIZE) {
624 if (vtable_word & PINNED_BIT)
626 binary_protocol_pin (obj, vt, safe_object_get_size ((MonoObject*)obj));
627 if (SGEN_CAS_PTR (obj, (void*)(vtable_word | PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
628 if (VTABLE_HAS_REFERENCES (vt))
629 GRAY_OBJECT_ENQUEUE (queue, obj);
631 g_assert (object_is_pinned (obj));
636 block = MS_BLOCK_FOR_OBJ (obj);
637 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
641 mark_pinned_objects_in_block (MSBlockInfo *block, GrayQueue *queue)
645 int count = MS_BLOCK_FREE / block->obj_size;
647 for (i = block->pin_queue_start; i < block->pin_queue_end; ++i) {
648 int index = MS_BLOCK_OBJ_INDEX (pin_queue [i], block);
649 DEBUG (9, g_assert (index >= 0 && index < count));
650 if (index == last_index)
652 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
663 /* clear all the free lists */
664 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
665 MSBlockInfo **free_blocks = free_block_lists [i];
667 for (j = 0; j < num_block_obj_sizes; ++j)
668 free_blocks [j] = NULL;
671 /* traverse all blocks, free and zero unmarked objects */
674 MSBlockInfo *block = *iter;
675 int count = MS_BLOCK_FREE / block->obj_size;
676 gboolean have_live = FALSE;
679 block->free_list = NULL;
681 for (obj_index = 0; obj_index < count; ++obj_index) {
683 void *obj = MS_BLOCK_OBJ (block, obj_index);
685 MS_CALC_MARK_BIT (word, bit, obj, block);
686 if (MS_MARK_BIT (block, word, bit)) {
687 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
690 /* an unmarked object */
691 if (MS_OBJ_ALLOCED (obj, block)) {
692 binary_protocol_empty (obj, block->obj_size);
693 memset (obj, 0, block->obj_size);
695 *(void**)obj = block->free_list;
696 block->free_list = obj;
700 /* reset mark bits */
701 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
704 * FIXME: reverse free list so that it's in address
712 * If there are free slots in the block, add
713 * the block to the corresponding free list.
715 if (block->free_list) {
716 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
717 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
718 block->next_free = free_blocks [index];
719 free_blocks [index] = block;
723 * Blocks without live objects are removed from the
724 * block list and freed.
728 ms_free_block (block->block);
729 mono_sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
731 --num_major_sections;
736 static int count_pinned_ref;
737 static int count_pinned_nonref;
738 static int count_nonpinned_ref;
739 static int count_nonpinned_nonref;
742 count_nonpinned_callback (char *obj, size_t size, void *data)
744 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
746 if (vtable->klass->has_references)
747 ++count_nonpinned_ref;
749 ++count_nonpinned_nonref;
753 count_pinned_callback (char *obj, size_t size, void *data)
755 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
757 if (vtable->klass->has_references)
760 ++count_pinned_nonref;
763 static void __attribute__ ((unused))
764 count_ref_nonref_objs (void)
768 count_pinned_ref = 0;
769 count_pinned_nonref = 0;
770 count_nonpinned_ref = 0;
771 count_nonpinned_nonref = 0;
773 major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
774 major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
776 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
778 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
779 count_pinned_ref, count_nonpinned_ref,
780 count_pinned_nonref, count_nonpinned_nonref,
781 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
785 ms_calculate_block_obj_sizes (double factor, int *arr)
787 double target_size = sizeof (MonoObject);
792 int target_count = ceil (MS_BLOCK_FREE / target_size);
793 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(ALLOC_ALIGN - 1), MAX_SMALL_OBJ_SIZE);
795 if (size != last_size) {
797 arr [num_sizes] = size;
802 target_size *= factor;
803 } while (last_size < MAX_SMALL_OBJ_SIZE);
813 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
815 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
816 block_obj_sizes = mono_sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
817 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
822 g_print ("block object sizes:\n");
823 for (i = 0; i < num_block_obj_sizes; ++i)
824 g_print ("%d\n", block_obj_sizes [i]);
828 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
829 free_block_lists [i] = mono_sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
831 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
832 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
833 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
834 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
836 LOCK_INIT (ms_block_list_mutex);
838 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
839 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
842 /* only valid during minor collections */
843 static int old_num_major_sections;
846 major_start_nursery_collection (void)
848 #ifdef MARKSWEEP_CONSISTENCY_CHECK
849 consistency_check ();
852 old_num_major_sections = num_major_sections;
856 major_finish_nursery_collection (void)
858 int sections_alloced;
860 #ifdef MARKSWEEP_CONSISTENCY_CHECK
861 consistency_check ();
864 sections_alloced = num_major_sections - old_num_major_sections;
865 minor_collection_sections_alloced += sections_alloced;
869 major_finish_major_collection (void)
871 int section_reserve = minor_collection_allowance / MS_BLOCK_SIZE;
874 * FIXME: We don't free blocks on 32 bit platforms because it
875 * can lead to address space fragmentation, since we're
876 * allocating blocks in larger contingents.
878 if (sizeof (mword) < 8)
881 while (num_empty_blocks > section_reserve) {
882 void *next = *(void**)empty_blocks;
883 mono_sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
886 * Needs not be atomic because this is running
891 ++stat_major_blocks_freed;
896 major_find_pin_queue_start_ends (GrayQueue *queue)
900 for (block = all_blocks; block; block = block->next) {
901 find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
902 &block->pin_queue_start, &block->pin_queue_end);
907 major_pin_objects (GrayQueue *queue)
911 for (block = all_blocks; block; block = block->next)
912 mark_pinned_objects_in_block (block, queue);
916 major_init_to_space (void)
921 major_report_pinned_memory_usage (void)
923 g_assert_not_reached ();
927 major_get_used_size (void)
932 for (block = all_blocks; block; block = block->next) {
933 int count = MS_BLOCK_FREE / block->obj_size;
935 size += count * block->obj_size;
936 for (iter = block->free_list; iter; iter = (void**)*iter)
937 size -= block->obj_size;