#include "metadata/sgen-memory-governor.h"
#include "metadata/sgen-layout-stats.h"
#include "metadata/gc-internal.h"
+#include "metadata/sgen-pointer-queue.h"
+#include "metadata/sgen-pinning.h"
+#include "metadata/sgen-workers.h"
-#if !defined(SGEN_PARALLEL_MARK) && !defined(FIXED_HEAP)
#define SGEN_HAVE_CONCURRENT_MARK
-#endif
-#define MS_BLOCK_SIZE (16*1024)
-#define MS_BLOCK_SIZE_SHIFT 14
+#if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
+#define MS_BLOCK_SIZE ARCH_MIN_MS_BLOCK_SIZE
+#define MS_BLOCK_SIZE_SHIFT ARCH_MIN_MS_BLOCK_SIZE_SHIFT
+#else
+#define MS_BLOCK_SIZE_SHIFT 14 /* INT FASTENABLE */
+#define MS_BLOCK_SIZE (1 << MS_BLOCK_SIZE_SHIFT)
+#endif
#define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
#define CARDS_PER_BLOCK (MS_BLOCK_SIZE / CARD_SIZE_IN_BYTES)
-#ifdef FIXED_HEAP
-#define MS_DEFAULT_HEAP_NUM_BLOCKS (32 * 1024) /* 512 MB */
-#endif
-
/*
* Don't allocate single blocks, but alloc a contingent of this many
* blocks in one swoop. This must be a power of two.
* of a block is the MSBlockHeader, then opional padding, then come
* the objects, so this must be >= sizeof (MSBlockHeader).
*/
-#ifdef FIXED_HEAP
-#define MS_BLOCK_SKIP 0
-#else
-#define MS_BLOCK_SKIP 16
-#endif
+#define MS_BLOCK_SKIP ((sizeof (MSBlockHeader) + 15) & ~15)
#define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
#define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
-#if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
-#error MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2
-#endif
-
typedef struct _MSBlockInfo MSBlockInfo;
struct _MSBlockInfo {
int obj_size;
int obj_size_index;
- size_t pin_queue_num_entries;
unsigned int pinned : 1;
unsigned int has_references : 1;
unsigned int has_pinned : 1; /* means cannot evacuate */
unsigned int is_to_space : 1;
unsigned int swept : 1;
-#ifdef FIXED_HEAP
- unsigned int used : 1;
- unsigned int zeroed : 1;
-#endif
- MSBlockInfo *next;
- char *block;
void **free_list;
MSBlockInfo *next_free;
- void **pin_queue_start;
+ size_t pin_queue_first_entry;
+ size_t pin_queue_last_entry;
#ifdef SGEN_HAVE_CONCURRENT_MARK
guint8 *cardtable_mod_union;
#endif
mword mark_words [MS_NUM_MARK_WORDS];
};
-#ifdef FIXED_HEAP
-static mword ms_heap_num_blocks = MS_DEFAULT_HEAP_NUM_BLOCKS;
+#define MS_BLOCK_FOR_BLOCK_INFO(b) ((char*)(b))
-static char *ms_heap_start;
-static char *ms_heap_end;
-
-#define MS_PTR_IN_SMALL_MAJOR_HEAP(p) ((char*)(p) >= ms_heap_start && (char*)(p) < ms_heap_end)
-
-/* array of all all block infos in the system */
-static MSBlockInfo *block_infos;
-#endif
-
-#define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
-#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) ((b)->block + MS_BLOCK_SKIP + (obj_size) * (i))
+#define MS_BLOCK_OBJ(b,i) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
+#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
#define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
-#ifdef FIXED_HEAP
-#define MS_BLOCK_FOR_OBJ(o) (&block_infos [(mword)((char*)(o) - ms_heap_start) >> MS_BLOCK_SIZE_SHIFT])
-#else
typedef struct {
- MSBlockInfo *info;
+ MSBlockInfo info;
} MSBlockHeader;
-#define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
-#endif
+#define MS_BLOCK_FOR_OBJ(o) (&((MSBlockHeader*)MS_BLOCK_DATA_FOR_OBJ ((o)))->info)
/* object index will always be small */
-#define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size))
+#define MS_BLOCK_OBJ_INDEX(o,b) ((int)(((char*)(o) - (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP)) / (b)->obj_size))
//casting to int is fine since blocks are 32k
#define MS_CALC_MARK_BIT(w,b,o) do { \
#define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (ONE_P << (b)))
#define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (ONE_P << (b)))
-#define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
- mword __old = (bl)->mark_words [(w)]; \
- mword __bitmask = ONE_P << (b); \
- if (__old & __bitmask) { \
- was_marked = TRUE; \
- break; \
- } \
- if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
- (gpointer)(__old | __bitmask), \
- (gpointer)__old) == \
- (gpointer)__old) { \
- was_marked = FALSE; \
- break; \
- } \
- } while (1)
-#define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
+#define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < MS_BLOCK_FOR_BLOCK_INFO (b) || *(char**)(o) >= MS_BLOCK_FOR_BLOCK_INFO (b) + MS_BLOCK_SIZE))
-#define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
+#define MS_BLOCK_OBJ_SIZE_FACTOR (pow (2.0, 1.0 / 3))
/*
* This way we can lookup block object size indexes for sizes up to
#define MS_BLOCK_TYPE_MAX 4
-#ifdef SGEN_PARALLEL_MARK
-static LOCK_DECLARE (ms_block_list_mutex);
-#define LOCK_MS_BLOCK_LIST mono_mutex_lock (&ms_block_list_mutex)
-#define UNLOCK_MS_BLOCK_LIST mono_mutex_unlock (&ms_block_list_mutex)
-#endif
-
static gboolean *evacuate_block_obj_sizes;
static float evacuation_threshold = 0.666f;
#ifdef SGEN_HAVE_CONCURRENT_MARK
static gboolean concurrent_mark;
#endif
+#define BLOCK_IS_TAGGED_HAS_REFERENCES(bl) SGEN_POINTER_IS_TAGGED_1 ((bl))
+#define BLOCK_TAG_HAS_REFERENCES(bl) SGEN_POINTER_TAG_1 ((bl))
+#define BLOCK_UNTAG_HAS_REFERENCES(bl) SGEN_POINTER_UNTAG_1 ((bl))
+
+#define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
+
/* all allocated blocks in the system */
-static MSBlockInfo *all_blocks;
+static SgenPointerQueue allocated_blocks;
-#ifdef FIXED_HEAP
-/* non-allocated block free-list */
-static MSBlockInfo *empty_blocks = NULL;
-#else
/* non-allocated block free-list */
static void *empty_blocks = NULL;
static size_t num_empty_blocks = 0;
-#endif
-#define FOREACH_BLOCK(bl) for ((bl) = all_blocks; (bl); (bl) = (bl)->next) {
-#define END_FOREACH_BLOCK }
+#define FOREACH_BLOCK(bl) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = BLOCK_UNTAG_HAS_REFERENCES (allocated_blocks.data [__index]);
+#define FOREACH_BLOCK_HAS_REFERENCES(bl,hr) { size_t __index; for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { (bl) = allocated_blocks.data [__index]; (hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); (bl) = BLOCK_UNTAG_HAS_REFERENCES ((bl));
+#define END_FOREACH_BLOCK } }
+#define DELETE_BLOCK_IN_FOREACH() (allocated_blocks.data [__index] = NULL)
static size_t num_major_sections = 0;
/* one free block list for each block object size */
static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
-#ifdef SGEN_PARALLEL_MARK
-#ifdef HAVE_KW_THREAD
-static __thread MSBlockInfo ***workers_free_block_lists;
-#else
-static MonoNativeTlsKey workers_free_block_lists_key;
-#endif
-#endif
-
-static long long stat_major_blocks_alloced = 0;
-static long long stat_major_blocks_freed = 0;
-static long long stat_major_blocks_lazy_swept = 0;
-static long long stat_major_objects_evacuated = 0;
+static guint64 stat_major_blocks_alloced = 0;
+static guint64 stat_major_blocks_freed = 0;
+static guint64 stat_major_blocks_lazy_swept = 0;
+static guint64 stat_major_objects_evacuated = 0;
#if SIZEOF_VOID_P != 8
-static long long stat_major_blocks_freed_ideal = 0;
-static long long stat_major_blocks_freed_less_ideal = 0;
-static long long stat_major_blocks_freed_individual = 0;
-static long long stat_major_blocks_alloced_less_ideal = 0;
+static guint64 stat_major_blocks_freed_ideal = 0;
+static guint64 stat_major_blocks_freed_less_ideal = 0;
+static guint64 stat_major_blocks_freed_individual = 0;
+static guint64 stat_major_blocks_alloced_less_ideal = 0;
#endif
#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
-static long long num_major_objects_marked = 0;
+static guint64 num_major_objects_marked = 0;
#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
#else
#define INC_NUM_MAJOR_OBJECTS_MARKED()
#endif
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+static mono_mutex_t scanned_objects_list_lock;
+static SgenPointerQueue scanned_objects_list;
+
+static void
+add_scanned_object (void *ptr)
+{
+ if (!binary_protocol_is_enabled ())
+ return;
+
+ mono_mutex_lock (&scanned_objects_list_lock);
+ sgen_pointer_queue_add (&scanned_objects_list, ptr);
+ mono_mutex_unlock (&scanned_objects_list_lock);
+}
+#endif
+
static void
sweep_block (MSBlockInfo *block, gboolean during_major_collection);
#define FREE_BLOCKS_FROM(lists,p,r) (lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
#define FREE_BLOCKS(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
-#ifdef SGEN_PARALLEL_MARK
-#ifdef HAVE_KW_THREAD
-#define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (workers_free_block_lists, (p), (r)))
-#else
-#define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (((MSBlockInfo***)(mono_native_tls_get_value (workers_free_block_lists_key))), (p), (r)))
-#endif
-#else
-//#define FREE_BLOCKS_LOCAL(p,r) (FREE_BLOCKS_FROM (free_block_lists, (p), (r)))
-#endif
#define MS_BLOCK_OBJ_SIZE_INDEX(s) \
(((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
fast_block_obj_size_indexes [((s)+7)>>3] : \
ms_find_block_obj_size_index ((s)))
-#ifdef FIXED_HEAP
-static void*
-major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
-{
- char *nursery_start;
- mword major_heap_size = ms_heap_num_blocks * MS_BLOCK_SIZE;
- mword alloc_size = nursery_size + major_heap_size;
- mword i;
-
- g_assert (ms_heap_num_blocks > 0);
- g_assert (nursery_size % MS_BLOCK_SIZE == 0);
- if (nursery_align)
- g_assert (nursery_align % MS_BLOCK_SIZE == 0);
-
- nursery_start = sgen_alloc_os_memory_aligned (alloc_size, nursery_align ? nursery_align : MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "heap");
- ms_heap_start = nursery_start + nursery_size;
- ms_heap_end = ms_heap_start + major_heap_size;
-
- block_infos = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo) * ms_heap_num_blocks, INTERNAL_MEM_MS_BLOCK_INFO, TRUE);
-
- for (i = 0; i < ms_heap_num_blocks; ++i) {
- block_infos [i].block = ms_heap_start + i * MS_BLOCK_SIZE;
- if (i < ms_heap_num_blocks - 1)
- block_infos [i].next_free = &block_infos [i + 1];
- else
- block_infos [i].next_free = NULL;
- block_infos [i].zeroed = TRUE;
- }
-
- empty_blocks = &block_infos [0];
-
- return nursery_start;
-}
-#else
static void*
major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
{
return start;
}
-#endif
static void
update_heap_boundaries_for_block (MSBlockInfo *block)
{
- sgen_update_heap_boundaries ((mword)block->block, (mword)block->block + MS_BLOCK_SIZE);
-}
-
-#ifdef FIXED_HEAP
-static MSBlockInfo*
-ms_get_empty_block (void)
-{
- MSBlockInfo *block;
-
- g_assert (empty_blocks);
-
- do {
- block = empty_blocks;
- } while (SGEN_CAS_PTR ((gpointer*)&empty_blocks, block->next_free, block) != block);
-
- block->used = TRUE;
-
- if (!block->zeroed)
- memset (block->block, 0, MS_BLOCK_SIZE);
-
- return block;
+ sgen_update_heap_boundaries ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), (mword)MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE);
}
-static void
-ms_free_block (MSBlockInfo *block)
-{
- block->next_free = empty_blocks;
- empty_blocks = block;
- block->used = FALSE;
- block->zeroed = FALSE;
- sgen_memgov_release_space (MS_BLOCK_SIZE, SPACE_MAJOR);
-}
-#else
static void*
ms_get_empty_block (void)
{
SGEN_ATOMIC_ADD_P (num_empty_blocks, 1);
}
-#endif
//#define MARKSWEEP_CONSISTENCY_CHECK
if (block->swept)
g_assert (block->free_list);
-#ifdef FIXED_HEAP
- /* the block must not be in the empty_blocks list */
- for (b = empty_blocks; b; b = b->next_free)
- g_assert (b != block);
-#endif
- /* the block must be in the all_blocks list */
- for (b = all_blocks; b; b = b->next) {
- if (b == block)
- break;
- }
- g_assert (b == block);
+ /* the block must be in the allocated_blocks array */
+ g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
}
}
static void
check_empty_blocks (void)
{
-#ifndef FIXED_HEAP
void *p;
size_t i = 0;
for (p = empty_blocks; p; p = *(void**)p)
++i;
g_assert (i == num_empty_blocks);
-#endif
}
static void
int num_free = 0;
void **free;
-#ifndef FIXED_HEAP
/* check block header */
g_assert (((MSBlockHeader*)block->block)->info == block);
-#endif
/* count number of free slots */
for (i = 0; i < count; ++i) {
int size = block_obj_sizes [size_index];
int count = MS_BLOCK_FREE / size;
MSBlockInfo *info;
-#ifdef SGEN_PARALLEL_MARK
- MSBlockInfo *next;
-#endif
-#ifndef FIXED_HEAP
- MSBlockHeader *header;
-#endif
MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
char *obj_start;
int i;
if (!sgen_memgov_try_alloc_space (MS_BLOCK_SIZE, SPACE_MAJOR))
return FALSE;
-#ifdef FIXED_HEAP
- info = ms_get_empty_block ();
-#else
- info = sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
-#endif
+ info = (MSBlockInfo*)ms_get_empty_block ();
SGEN_ASSERT (9, count >= 2, "block with %d objects, it must hold at least 2", count);
*/
info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
info->swept = 1;
-#ifndef FIXED_HEAP
- info->block = ms_get_empty_block ();
-
- header = (MSBlockHeader*) info->block;
- header->info = info;
-#endif
#ifdef SGEN_HAVE_CONCURRENT_MARK
info->cardtable_mod_union = NULL;
#endif
update_heap_boundaries_for_block (info);
/* build free list */
- obj_start = info->block + MS_BLOCK_SKIP;
+ obj_start = MS_BLOCK_FOR_BLOCK_INFO (info) + MS_BLOCK_SKIP;
info->free_list = (void**)obj_start;
/* we're skipping the last one - it must be nulled */
for (i = 0; i < count - 1; ++i) {
/* the last one */
*(void**)obj_start = NULL;
-#ifdef SGEN_PARALLEL_MARK
- do {
- next = info->next_free = free_blocks [size_index];
- } while (SGEN_CAS_PTR ((void**)&free_blocks [size_index], info, next) != next);
-
- do {
- next = info->next = all_blocks;
- } while (SGEN_CAS_PTR ((void**)&all_blocks, info, next) != next);
-#else
info->next_free = free_blocks [size_index];
free_blocks [size_index] = info;
- info->next = all_blocks;
- all_blocks = info;
-#endif
+ sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
++num_major_sections;
return TRUE;
MSBlockInfo *block;
FOREACH_BLOCK (block) {
- if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE)
+ if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
return block->pinned;
} END_FOREACH_BLOCK;
return FALSE;
return obj;
}
-#ifdef SGEN_PARALLEL_MARK
-static gboolean
-try_remove_block_from_free_list (MSBlockInfo *block, MSBlockInfo **free_blocks, int size_index)
-{
- /*
- * No more free slots in the block, so try to free the block.
- * Don't try again if we don't succeed - another thread will
- * already have done it.
- */
- MSBlockInfo *next_block = block->next_free;
- if (SGEN_CAS_PTR ((void**)&free_blocks [size_index], next_block, block) == block) {
- /*
- void *old = SGEN_CAS_PTR ((void**)&block->next_free, NULL, next_block);
- g_assert (old == next_block);
- */
- block->next_free = NULL;
- return TRUE;
- }
- return FALSE;
-}
-
-static void*
-alloc_obj_par (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
-{
- int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
- MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
- MSBlockInfo *block;
- void *obj;
-
-#ifdef SGEN_HAVE_CONCURRENT_MARK
- if (concurrent_mark)
- g_assert_not_reached ();
-#endif
-
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
- if (free_blocks_local [size_index]) {
- get_slot:
- obj = unlink_slot_from_free_list_uncontested (free_blocks_local, size_index);
- } else {
- MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
-
- get_block:
- block = free_blocks [size_index];
- if (block) {
- if (!try_remove_block_from_free_list (block, free_blocks, size_index))
- goto get_block;
-
- g_assert (block->next_free == NULL);
- g_assert (block->free_list);
- block->next_free = free_blocks_local [size_index];
- free_blocks_local [size_index] = block;
-
- goto get_slot;
- } else {
- gboolean success;
-
- LOCK_MS_BLOCK_LIST;
- success = ms_alloc_block (size_index, pinned, has_references);
- UNLOCK_MS_BLOCK_LIST;
-
- if (G_UNLIKELY (!success))
- return NULL;
-
- goto get_block;
- }
- }
-
- *(MonoVTable**)obj = vtable;
-
- return obj;
-}
-
-static void*
-major_par_alloc_object (MonoVTable *vtable, size_t size, gboolean has_references)
-{
- return alloc_obj_par (vtable, size, FALSE, has_references);
-}
-#endif
-
static void*
alloc_obj (MonoVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
{
MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
void *obj;
-#ifdef SGEN_PARALLEL_MARK
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
-#endif
-
if (!free_blocks [size_index]) {
if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
return NULL;
return obj;
}
-#define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
-
/*
* obj is some object. If it's not in the major heap (i.e. if it's in
* the nursery or LOS), return FALSE. Otherwise return whether it's
{
MSBlockInfo *block;
int word, bit;
-#ifndef FIXED_HEAP
mword objsize;
-#endif
if (sgen_ptr_in_nursery (obj))
return FALSE;
-#ifdef FIXED_HEAP
- /* LOS */
- if (!MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
- return FALSE;
-#else
objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
/* LOS */
if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
return FALSE;
-#endif
/* now we know it's in a major block */
block = MS_BLOCK_FOR_OBJ (obj);
MSBlockInfo *block;
FOREACH_BLOCK (block) {
- if (ptr >= block->block && ptr <= block->block + MS_BLOCK_SIZE) {
+ if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
int count = MS_BLOCK_FREE / block->obj_size;
int i;
int idx;
char *obj;
- if ((block->block > object) || ((block->block + MS_BLOCK_SIZE) <= object))
+ if ((MS_BLOCK_FOR_BLOCK_INFO (block) > object) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= object))
continue;
idx = MS_BLOCK_OBJ_INDEX (object, block);
int w, b;
gboolean marked;
- if ((block->block > ptr) || ((block->block + MS_BLOCK_SIZE) <= ptr))
+ if ((MS_BLOCK_FOR_BLOCK_INFO (block) > ptr) || ((MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) <= ptr))
continue;
SGEN_LOG (0, "major-ptr (block %p sz %d pin %d ref %d)\n",
- block->block, block->obj_size, block->pinned, block->has_references);
+ MS_BLOCK_FOR_BLOCK_INFO (block), block->obj_size, block->pinned, block->has_references);
idx = MS_BLOCK_OBJ_INDEX (ptr, block);
obj = (char*)MS_BLOCK_OBJ (block, idx);
start = i;
} else {
if (start >= 0) {
- sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
+ sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
start = -1;
}
}
#define LOAD_VTABLE SGEN_LOAD_VTABLE
-#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
+#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
+ if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
MS_SET_MARK_BIT ((block), __word, __bit); \
- if ((block)->has_references) \
- GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
+ if (sgen_gc_descr_has_references (desc)) \
+ GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
-#define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
+#define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
+ SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
if (!MS_MARK_BIT ((block), __word, __bit)) { \
MS_SET_MARK_BIT ((block), __word, __bit); \
- if ((block)->has_references) \
- GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
- binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
- INC_NUM_MAJOR_OBJECTS_MARKED (); \
- } \
- } while (0)
-#define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
- int __word, __bit; \
- gboolean __was_marked; \
- SGEN_ASSERT (9, MS_OBJ_ALLOCED ((obj), (block)), "object %p not allocated", obj); \
- MS_CALC_MARK_BIT (__word, __bit, (obj)); \
- MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
- if (!__was_marked) { \
- if ((block)->has_references) \
- GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
+ if (sgen_gc_descr_has_references (desc)) \
+ GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
block = MS_BLOCK_FOR_OBJ (obj);
block->has_pinned = TRUE;
- MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
+ MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
}
#include "sgen-major-copy-object.h"
-#ifdef SGEN_PARALLEL_MARK
-static void
-major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
-{
- mword objsize;
- MSBlockInfo *block;
- MonoVTable *vt;
-
- HEAVY_STAT (++stat_copy_object_called_major);
-
- SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
- if (sgen_ptr_in_nursery (obj)) {
- int word, bit;
- gboolean has_references;
- void *destination;
- mword vtable_word = *(mword*)obj;
- vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-
- if (vtable_word & SGEN_FORWARDED_BIT) {
- *ptr = (void*)vt;
- return;
- }
-
- if (vtable_word & SGEN_PINNED_BIT)
- return;
-
- /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
- if (sgen_nursery_is_to_space (obj))
- return;
-
- HEAVY_STAT (++stat_objects_copied_major);
-
- do_copy_object:
- objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
- has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
-
- destination = sgen_minor_collector.par_alloc_for_promotion (vt, obj, objsize, has_references);
- if (G_UNLIKELY (!destination)) {
- if (!sgen_ptr_in_nursery (obj)) {
- int size_index;
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
- evacuate_block_obj_sizes [size_index] = FALSE;
- }
-
- sgen_parallel_pin_or_update (ptr, obj, vt, queue);
- sgen_set_pinned_from_failed_allocation (objsize);
- return;
- }
-
- if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
- gboolean was_marked;
-
- par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
- obj = destination;
- *ptr = obj;
-
- /*
- * FIXME: If we make major_alloc_object() give
- * us the block info, too, we won't have to
- * re-fetch it here.
- *
- * FIXME (2): We should rework this to avoid all those nursery checks.
- */
- /*
- * For the split nursery allocator the object
- * might still be in the nursery despite
- * having being promoted, in which case we
- * can't mark it.
- */
- if (!sgen_ptr_in_nursery (obj)) {
- block = MS_BLOCK_FOR_OBJ (obj);
- MS_CALC_MARK_BIT (word, bit, obj);
- SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
- MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
- binary_protocol_mark (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
- }
- } else {
- /*
- * FIXME: We have allocated destination, but
- * we cannot use it. Give it back to the
- * allocator.
- */
- *(void**)destination = NULL;
-
- vtable_word = *(mword*)obj;
- g_assert (vtable_word & SGEN_FORWARDED_BIT);
-
- obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-
- *ptr = obj;
-
- HEAVY_STAT (++stat_slots_allocated_in_vain);
- }
- } else {
-#ifdef FIXED_HEAP
- if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
-#else
- mword vtable_word = *(mword*)obj;
- vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-
- /* see comment in the non-parallel version below */
- if (vtable_word & SGEN_FORWARDED_BIT) {
- *ptr = (void*)vt;
- return;
- }
- objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
-
- if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
-#endif
- {
- int size_index;
-
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
-
- if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
- if (block->is_to_space)
- return;
-
-#ifdef FIXED_HEAP
- {
- mword vtable_word = *(mword*)obj;
- vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-
- if (vtable_word & SGEN_FORWARDED_BIT) {
- *ptr = (void*)vt;
- return;
- }
- }
-#endif
-
- HEAVY_STAT (++stat_major_objects_evacuated);
- goto do_copy_object;
- }
-
- MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
- } else {
- LOSObject *bigobj = sgen_los_header_for_object (obj);
- mword size_word = bigobj->size;
-#ifdef FIXED_HEAP
- mword vtable_word = *(mword*)obj;
- vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-#endif
- if (size_word & 1)
- return;
- binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
- if (SGEN_CAS_PTR ((void*)&bigobj->size, (void*)(size_word | 1), (void*)size_word) == (void*)size_word) {
- if (SGEN_VTABLE_HAS_REFERENCES (vt))
- GRAY_OBJECT_ENQUEUE (queue, obj);
- } else {
- g_assert (sgen_los_object_is_pinned (obj));
- }
- }
- }
-}
-#else
#ifdef SGEN_HAVE_CONCURRENT_MARK
static void
-major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
+major_copy_or_mark_object_with_evacuation_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
{
+ SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
+ SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_is_worker_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
+
g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
if (!sgen_ptr_in_nursery (obj)) {
-#ifdef FIXED_HEAP
- if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
-#else
mword objsize;
objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
- if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
-#endif
- {
+ if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
- MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
+ MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
} else {
if (sgen_los_object_is_pinned (obj))
return;
sgen_los_pin_object (obj);
if (SGEN_OBJECT_HAS_REFERENCES (obj))
- GRAY_OBJECT_ENQUEUE (queue, obj);
+ GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
INC_NUM_MAJOR_OBJECTS_MARKED ();
}
}
}
#endif
-static void
-major_copy_or_mark_object (void **ptr, void *obj, SgenGrayQueue *queue)
+static long long
+major_get_and_reset_num_major_objects_marked (void)
{
- MSBlockInfo *block;
-
- HEAVY_STAT (++stat_copy_object_called_major);
-
- SGEN_ASSERT (9, obj, "null object from pointer %p", ptr);
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
- if (sgen_ptr_in_nursery (obj)) {
- int word, bit;
- char *forwarded, *old_obj;
-
- if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- *ptr = forwarded;
- return;
- }
- if (SGEN_OBJECT_IS_PINNED (obj))
- return;
-
- /* An object in the nursery To Space has already been copied and grayed. Nothing to do. */
- if (sgen_nursery_is_to_space (obj))
- return;
-
- HEAVY_STAT (++stat_objects_copied_major);
-
- do_copy_object:
- old_obj = obj;
- obj = copy_object_no_checks (obj, queue);
- if (G_UNLIKELY (old_obj == obj)) {
- /*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
- if (!sgen_ptr_in_nursery (obj)) {
- int size_index;
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
- evacuate_block_obj_sizes [size_index] = FALSE;
- MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
- }
- return;
- }
- *ptr = obj;
-
- /*
- * FIXME: See comment for copy_object_no_checks(). If
- * we have that, we can let the allocation function
- * give us the block info, too, and we won't have to
- * re-fetch it.
- *
- * FIXME (2): We should rework this to avoid all those nursery checks.
- */
- /*
- * For the split nursery allocator the object might
- * still be in the nursery despite having being
- * promoted, in which case we can't mark it.
- */
- if (!sgen_ptr_in_nursery (obj)) {
- block = MS_BLOCK_FOR_OBJ (obj);
- MS_CALC_MARK_BIT (word, bit, obj);
- SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
- MS_SET_MARK_BIT (block, word, bit);
- binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
- }
- } else {
- char *forwarded;
-#ifdef FIXED_HEAP
- if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+ long long num = num_major_objects_marked;
+ num_major_objects_marked = 0;
+ return num;
#else
- mword objsize;
-
- /*
- * If we have don't have a fixed heap we cannot know
- * whether an object is in the LOS or in the small
- * object major heap without checking its size. To do
- * that, however, we need to know that we actually
- * have a valid object, not a forwarding pointer, so
- * we have to do this check first.
- */
- if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- *ptr = forwarded;
- return;
- }
-
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
-
- if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
-#endif
- {
- int size_index;
- gboolean evacuate;
-
- block = MS_BLOCK_FOR_OBJ (obj);
- size_index = block->obj_size_index;
- evacuate = evacuate_block_obj_sizes [size_index];
-
-#ifdef FIXED_HEAP
- /*
- * We could also check for !block->has_pinned
- * here, but it would only make an uncommon case
- * faster, namely objects that are in blocks
- * whose slot sizes are evacuated but which have
- * pinned objects.
- */
- if (evacuate && (forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- *ptr = forwarded;
- return;
- }
+ return 0;
#endif
+}
- if (evacuate && !block->has_pinned) {
- g_assert (!SGEN_OBJECT_IS_PINNED (obj));
- if (block->is_to_space)
- return;
- HEAVY_STAT (++stat_major_objects_evacuated);
- goto do_copy_object;
- } else {
- MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
- }
- } else {
- if (sgen_los_object_is_pinned (obj))
- return;
- binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
-
-#ifdef ENABLE_DTRACE
- if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
- MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
- MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
- }
-#endif
+#ifdef HEAVY_STATISTICS
+static guint64 stat_optimized_copy;
+static guint64 stat_optimized_copy_nursery;
+static guint64 stat_optimized_copy_nursery_forwarded;
+static guint64 stat_optimized_copy_nursery_pinned;
+static guint64 stat_optimized_copy_major;
+static guint64 stat_optimized_copy_major_small_fast;
+static guint64 stat_optimized_copy_major_small_slow;
+static guint64 stat_optimized_copy_major_large;
+static guint64 stat_optimized_copy_major_forwarded;
+static guint64 stat_optimized_copy_major_small_evacuate;
+static guint64 stat_optimized_major_scan;
+static guint64 stat_optimized_major_scan_no_refs;
+
+static guint64 stat_drain_prefetch_fills;
+static guint64 stat_drain_prefetch_fill_failures;
+static guint64 stat_drain_loops;
+#endif
+
+static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
+
+#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_no_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_no_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
+
+#define COPY_OR_MARK_WITH_EVACUATION
+#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_with_evacuation
+#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_with_evacuation
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
+#include "sgen-marksweep-drain-gray-stack.h"
- sgen_los_pin_object (obj);
- if (SGEN_OBJECT_HAS_REFERENCES (obj))
- GRAY_OBJECT_ENQUEUE (queue, obj);
+static gboolean
+drain_gray_stack (ScanCopyContext ctx)
+{
+ gboolean evacuation = FALSE;
+ int i;
+ for (i = 0; i < num_block_obj_sizes; ++i) {
+ if (evacuate_block_obj_sizes [i]) {
+ evacuation = TRUE;
+ break;
}
}
+
+ if (evacuation)
+ return drain_gray_stack_with_evacuation (ctx);
+ else
+ return drain_gray_stack_no_evacuation (ctx);
}
+
+#ifdef SGEN_HAVE_CONCURRENT_MARK
+#include "sgen-marksweep-scan-object-concurrent.h"
#endif
static void
major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object (ptr, *ptr, queue);
+ major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
}
#ifdef SGEN_HAVE_CONCURRENT_MARK
static void
major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
-}
-
-static long long
-major_get_and_reset_num_major_objects_marked (void)
-{
-#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
- long long num = num_major_objects_marked;
- num_major_objects_marked = 0;
- return num;
-#else
- return 0;
-#endif
+ major_copy_or_mark_object_with_evacuation_concurrent (ptr, *ptr, queue);
}
#endif
-#include "sgen-major-scan-object.h"
-
-#ifdef SGEN_HAVE_CONCURRENT_MARK
-#define SCAN_FOR_CONCURRENT_MARK
-#include "sgen-major-scan-object.h"
-#undef SCAN_FOR_CONCURRENT_MARK
-#endif
-
static void
mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
{
- int i;
+ void **entry, **end;
int last_index = -1;
- if (!block->pin_queue_num_entries)
+ if (block->pin_queue_first_entry == block->pin_queue_last_entry)
return;
block->has_pinned = TRUE;
- for (i = 0; i < block->pin_queue_num_entries; ++i) {
- int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
- SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", block->pin_queue_start [i], index, MS_BLOCK_FREE / block->obj_size);
+ entry = sgen_pinning_get_entry (block->pin_queue_first_entry);
+ end = sgen_pinning_get_entry (block->pin_queue_last_entry);
+
+ for (; entry < end; ++entry) {
+ int index = MS_BLOCK_OBJ_INDEX (*entry, block);
+ char *obj;
+ SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, MS_BLOCK_FREE / block->obj_size);
if (index == last_index)
continue;
- MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
+ obj = MS_BLOCK_OBJ (block, index);
+ MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
last_index = index;
}
}
ms_sweep (void)
{
int i;
- MSBlockInfo **iter;
+ MSBlockInfo *block;
/* statistics for evacuation */
int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
}
/* traverse all blocks, free and zero unmarked objects */
- iter = &all_blocks;
- while (*iter) {
- MSBlockInfo *block = *iter;
+ FOREACH_BLOCK (block) {
int count;
gboolean have_live = FALSE;
gboolean has_pinned;
slots_available [obj_size_index] += count;
}
- iter = &block->next;
-
/*
* If there are free slots in the block, add
* the block to the corresponding free list.
* Blocks without live objects are removed from the
* block list and freed.
*/
- *iter = block->next;
+ DELETE_BLOCK_IN_FOREACH ();
binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
-#ifdef FIXED_HEAP
ms_free_block (block);
-#else
- ms_free_block (block->block);
-
- sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
-#endif
--num_major_sections;
}
- }
+ } END_FOREACH_BLOCK;
+ sgen_pointer_queue_remove_nulls (&allocated_blocks);
for (i = 0; i < num_block_obj_sizes; ++i) {
float usage = (float)slots_used [i] / (float)slots_available [i];
// Sweep all unswept blocks
if (lazy_sweep) {
- MSBlockInfo **iter;
+ MSBlockInfo *block;
MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
- iter = &all_blocks;
- while (*iter) {
- MSBlockInfo *block = *iter;
-
+ FOREACH_BLOCK (block) {
sweep_block (block, TRUE);
-
- iter = &block->next;
- }
+ } END_FOREACH_BLOCK;
MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
}
}
static void
-major_finish_major_collection (void)
+major_finish_major_collection (ScannedObjectCounts *counts)
{
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+ if (binary_protocol_is_enabled ()) {
+ counts->num_scanned_objects = scanned_objects_list.next_slot;
+
+ sgen_pointer_queue_sort_uniq (&scanned_objects_list);
+ counts->num_unique_scanned_objects = scanned_objects_list.next_slot;
+
+ sgen_pointer_queue_clear (&scanned_objects_list);
+ }
+#endif
}
-#if !defined(FIXED_HEAP) && SIZEOF_VOID_P != 8
+#if SIZEOF_VOID_P != 8
static int
compare_pointers (const void *va, const void *vb) {
char *a = *(char**)va, *b = *(char**)vb;
static void
major_have_computer_minor_collection_allowance (void)
{
-#ifndef FIXED_HEAP
size_t section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
g_assert (have_swept);
++stat_major_blocks_freed_individual;
#endif
}
-#endif
}
static void
MSBlockInfo *block;
FOREACH_BLOCK (block) {
- block->pin_queue_start = sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
- &block->pin_queue_num_entries);
+ sgen_find_optimized_pin_queue_area (MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SKIP, MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE,
+ &block->pin_queue_first_entry, &block->pin_queue_last_entry);
} END_FOREACH_BLOCK;
}
static gboolean
major_handle_gc_param (const char *opt)
{
-#ifdef FIXED_HEAP
- if (g_str_has_prefix (opt, "major-heap-size=")) {
- const char *arg = strchr (opt, '=') + 1;
- size_t size;
- if (!mono_gc_parse_environment_string_extract_number (arg, &size))
- return FALSE;
- ms_heap_num_blocks = (size + MS_BLOCK_SIZE - 1) / MS_BLOCK_SIZE;
- g_assert (ms_heap_num_blocks > 0);
- return TRUE;
- } else
-#endif
if (g_str_has_prefix (opt, "evacuation-threshold=")) {
const char *arg = strchr (opt, '=') + 1;
int percentage = atoi (arg);
{
fprintf (stderr,
""
-#ifdef FIXED_HEAP
- " major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
-#endif
" evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
" (no-)lazy-sweep\n"
);
major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
{
MSBlockInfo *block;
+ gboolean has_references;
- FOREACH_BLOCK (block) {
- if (block->has_references)
- callback ((mword)block->block, MS_BLOCK_SIZE);
+ FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
+ if (has_references)
+ callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
} END_FOREACH_BLOCK;
}
#ifdef HEAVY_STATISTICS
-extern long long marked_cards;
-extern long long scanned_cards;
-extern long long scanned_objects;
-extern long long remarked_cards;
+extern guint64 marked_cards;
+extern guint64 scanned_cards;
+extern guint64 scanned_objects;
+extern guint64 remarked_cards;
#endif
#define CARD_WORDS_PER_BLOCK (CARDS_PER_BLOCK / SIZEOF_VOID_P)
major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
{
MSBlockInfo *block;
+ gboolean has_references;
ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
#ifdef SGEN_HAVE_CONCURRENT_MARK
g_assert (!mod_union);
#endif
- FOREACH_BLOCK (block) {
+ FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
int block_obj_size;
char *block_start;
- if (!block->has_references)
+ if (!has_references)
continue;
block_obj_size = block->obj_size;
- block_start = block->block;
+ block_start = MS_BLOCK_FOR_BLOCK_INFO (block);
if (block_obj_size >= CARD_SIZE_IN_BYTES) {
guint8 *cards;
end = block_start + MS_BLOCK_SIZE;
base = sgen_card_table_align_pointer (obj);
+ cards += MS_BLOCK_SKIP >> CARD_BITS;
+
while (obj < end) {
size_t card_offset;
}
card_data_end = card_data + CARDS_PER_BLOCK;
+ card_data += MS_BLOCK_SKIP >> CARD_BITS;
+
for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
size_t index;
size_t idx = card_data - card_base;
}
HEAVY_STAT (++scanned_objects);
- scan_func (obj, queue);
+ scan_func (obj, sgen_obj_get_descriptor (obj), queue);
next_small:
obj += block_obj_size;
}
major_count_cards (long long *num_total_cards, long long *num_marked_cards)
{
MSBlockInfo *block;
+ gboolean has_references;
long long total_cards = 0;
long long marked_cards = 0;
- FOREACH_BLOCK (block) {
- guint8 *cards = sgen_card_table_get_card_scan_address ((mword) block->block);
+ FOREACH_BLOCK_HAS_REFERENCES (block, has_references) {
+ guint8 *cards = sgen_card_table_get_card_scan_address ((mword) MS_BLOCK_FOR_BLOCK_INFO (block));
int i;
- if (!block->has_references)
+ if (!has_references)
continue;
total_cards += CARDS_PER_BLOCK;
size_t num_cards;
block->cardtable_mod_union = sgen_card_table_update_mod_union (block->cardtable_mod_union,
- block->block, MS_BLOCK_SIZE, &num_cards);
+ MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE, &num_cards);
SGEN_ASSERT (0, num_cards == CARDS_PER_BLOCK, "Number of cards calculation is wrong");
} END_FOREACH_BLOCK;
major_get_cardtable_mod_union_for_object (char *obj)
{
MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
- return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (block->block)) >> CARD_BITS];
+ return &block->cardtable_mod_union [(obj - (char*)sgen_card_table_align_pointer (MS_BLOCK_FOR_BLOCK_INFO (block))) >> CARD_BITS];
}
#endif
lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
}
-#ifdef SGEN_PARALLEL_MARK
-static void*
-major_alloc_worker_data (void)
-{
- /* FIXME: free this when the workers come down */
- MSBlockInfo ***lists = malloc (sizeof (MSBlockInfo**) * MS_BLOCK_TYPE_MAX);
- alloc_free_block_lists (lists);
- return lists;
-}
-
-static void
-major_init_worker_thread (void *data)
-{
- MSBlockInfo ***lists = data;
- int i;
-
- g_assert (lists && lists != free_block_lists);
- for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
- int j;
- for (j = 0; j < num_block_obj_sizes; ++j)
- g_assert (!lists [i][j]);
- }
-
-#ifdef HAVE_KW_THREAD
- workers_free_block_lists = data;
-#else
- mono_native_tls_set_value (workers_free_block_lists_key, data);
-#endif
-}
-
-static void
-major_reset_worker_data (void *data)
-{
- MSBlockInfo ***lists = data;
- int i;
- for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
- int j;
- for (j = 0; j < num_block_obj_sizes; ++j)
- lists [i][j] = NULL;
- }
-}
-#endif
-
#undef pthread_create
static void
static void
sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurrent)
#else // SGEN_HAVE_CONCURRENT_MARK
-#ifdef SGEN_PARALLEL_MARK
-#ifdef FIXED_HEAP
-void
-sgen_marksweep_fixed_par_init (SgenMajorCollector *collector)
-#else // FIXED_HEAP
-void
-sgen_marksweep_par_init (SgenMajorCollector *collector)
-#endif // FIXED_HEAP
-#else // SGEN_PARALLEL_MARK
-#ifdef FIXED_HEAP
-void
-sgen_marksweep_fixed_init (SgenMajorCollector *collector)
-#else // FIXED_HEAP
#error unknown configuration
-#endif // FIXED_HEAP
-#endif // SGEN_PARALLEL_MARK
#endif // SGEN_HAVE_CONCURRENT_MARK
{
int i;
-#ifndef FIXED_HEAP
sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
-#endif
num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
-#ifdef SGEN_PARALLEL_MARK
- LOCK_INIT (ms_block_list_mutex);
-#endif
-
- mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
- mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
- mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
- mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
+ mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
+ mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
+ mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
+ mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
#if SIZEOF_VOID_P != 8
- mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_ideal);
- mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_less_ideal);
- mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed_individual);
- mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced_less_ideal);
-#endif
-
-#ifdef SGEN_PARALLEL_MARK
-#ifndef HAVE_KW_THREAD
- mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
-#endif
+ mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
+ mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
+ mono_counters_register ("# major blocks freed individually", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_individual);
+ mono_counters_register ("# major blocks allocated less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced_less_ideal);
#endif
collector->section_size = MAJOR_SECTION_SIZE;
-#ifdef SGEN_PARALLEL_MARK
- collector->is_parallel = TRUE;
- collector->alloc_worker_data = major_alloc_worker_data;
- collector->init_worker_thread = major_init_worker_thread;
- collector->reset_worker_data = major_reset_worker_data;
-#else
- collector->is_parallel = FALSE;
-#endif
+
#ifdef SGEN_HAVE_CONCURRENT_MARK
concurrent_mark = is_concurrent;
if (is_concurrent) {
collector->is_concurrent = TRUE;
collector->want_synchronous_collection = &want_evacuation;
- collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
} else
#endif
{
collector->is_concurrent = FALSE;
collector->want_synchronous_collection = NULL;
}
+ collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
collector->supports_cardtable = TRUE;
collector->have_swept = &have_swept;
collector->alloc_degraded = major_alloc_degraded;
collector->alloc_object = major_alloc_object;
-#ifdef SGEN_PARALLEL_MARK
- collector->par_alloc_object = major_par_alloc_object;
-#endif
collector->free_pinned_object = free_pinned_object;
collector->iterate_objects = major_iterate_objects;
collector->free_non_pinned_object = major_free_non_pinned_object;
collector->count_cards = major_count_cards;
collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
- collector->major_ops.scan_object = major_scan_object;
+ collector->major_ops.scan_object = major_scan_object_with_evacuation;
#ifdef SGEN_HAVE_CONCURRENT_MARK
if (is_concurrent) {
collector->major_concurrent_ops.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
- collector->major_concurrent_ops.scan_object = major_scan_object_concurrent;
+ collector->major_concurrent_ops.scan_object = major_scan_object_no_mark_concurrent;
collector->major_concurrent_ops.scan_vtype = major_scan_vtype_concurrent;
}
#endif
+#if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
+ /* FIXME: this will not work with evacuation or the split nursery. */
+ if (!is_concurrent)
+ collector->drain_gray_stack = drain_gray_stack;
+
+#ifdef HEAVY_STATISTICS
+ mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
+ mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
+ mono_counters_register ("Optimized copy nursery forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_forwarded);
+ mono_counters_register ("Optimized copy nursery pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery_pinned);
+ mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
+ mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
+ mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
+ mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
+ mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
+ mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
+
+ mono_counters_register ("Gray stack drain loops", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_loops);
+ mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
+ mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
+#endif
+#endif
+
+#ifdef SGEN_HEAVY_BINARY_PROTOCOL
+ mono_mutex_init (&scanned_objects_list_lock);
+#endif
+
+ SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");
+
/*cardtable requires major pages to be 8 cards aligned*/
g_assert ((MS_BLOCK_SIZE % (8 * CARD_SIZE_IN_BYTES)) == 0);
}