#define MS_BLOCK_FOR_BLOCK_INFO(b) ((char*)(b))
-#define MS_BLOCK_OBJ(b,i) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i))
+#define MS_BLOCK_OBJ(b,i) ((GCObject *)(MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (b)->obj_size * (i)))
#define MS_BLOCK_OBJ_FOR_SIZE(b,i,obj_size) (MS_BLOCK_FOR_BLOCK_INFO(b) + MS_BLOCK_SKIP + (obj_size) * (i))
#define MS_BLOCK_DATA_FOR_OBJ(o) ((char*)((mword)(o) & ~(mword)(MS_BLOCK_SIZE - 1)))
static gboolean *evacuate_block_obj_sizes;
static float evacuation_threshold = 0.666f;
-static float concurrent_evacuation_threshold = 0.666f;
-static gboolean want_evacuation = FALSE;
static gboolean lazy_sweep = FALSE;
#define BLOCK_IS_TAGGED_CHECKING(bl) SGEN_POINTER_IS_TAGGED_2 ((bl))
#define BLOCK_TAG_CHECKING(bl) SGEN_POINTER_TAG_2 ((bl))
-#define BLOCK_UNTAG(bl) SGEN_POINTER_UNTAG_12 ((bl))
+#define BLOCK_UNTAG(bl) ((MSBlockInfo *)SGEN_POINTER_UNTAG_12 ((bl)))
#define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
size_t __index; \
SGEN_ASSERT (0, sgen_is_world_stopped () && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
- (bl) = allocated_blocks.data [__index]; \
+ (bl) = (MSBlockInfo *)allocated_blocks.data [__index]; \
(hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); \
(bl) = BLOCK_UNTAG ((bl));
#define END_FOREACH_BLOCK_NO_LOCK } }
static guint64 stat_major_blocks_alloced = 0;
static guint64 stat_major_blocks_freed = 0;
static guint64 stat_major_blocks_lazy_swept = 0;
-static guint64 stat_major_objects_evacuated = 0;
#if SIZEOF_VOID_P != 8
static guint64 stat_major_blocks_freed_ideal = 0;
if (!binary_protocol_is_enabled ())
return;
- mono_mutex_lock (&scanned_objects_list_lock);
+ mono_os_mutex_lock (&scanned_objects_list_lock);
sgen_pointer_queue_add (&scanned_objects_list, ptr);
- mono_mutex_unlock (&scanned_objects_list_lock);
+ mono_os_mutex_unlock (&scanned_objects_list_lock);
}
#endif
{
char *start;
if (nursery_align)
- start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
+ start = (char *)sgen_alloc_os_memory_aligned (nursery_size, nursery_align, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery");
else
- start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
+ start = (char *)sgen_alloc_os_memory (nursery_size, (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE), "nursery");
return start;
}
*/
int alloc_num = MS_BLOCK_ALLOC_NUM;
for (;;) {
- p = sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE,
- alloc_num == 1 ? "major heap section" : NULL);
+ p = (char *)sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * alloc_num, MS_BLOCK_SIZE,
+ (SgenAllocFlags)(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE),
+ alloc_num == 1 ? "major heap section" : NULL);
if (p)
break;
alloc_num >>= 1;
MSBlockInfo *old;
do {
block->next_free = old = free_blocks [size_index];
- } while (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], block, old) != old);
+ } while (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], block, old) != old);
}
static void major_finish_sweep_checking (void);
* Blocks that are to-space are not evacuated from. During an major collection
* blocks are allocated for two reasons: evacuating objects from the nursery and
* evacuating them from major blocks marked for evacuation. In both cases we don't
- * want further evacuation.
+ * want further evacuation. We also don't want to evacuate objects allocated during
+ * the concurrent mark since it would add pointless stress on the finishing pause.
*/
- info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD);
- info->state = (info->is_to_space || sgen_concurrent_collection_in_progress ()) ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
+ info->is_to_space = (sgen_get_current_collection_generation () == GENERATION_OLD) || sgen_concurrent_collection_in_progress ();
+ info->state = info->is_to_space ? BLOCK_STATE_MARKING : BLOCK_STATE_SWEPT;
SGEN_ASSERT (6, !sweep_in_progress () || info->state == BLOCK_STATE_SWEPT, "How do we add a new block to be swept while sweeping?");
info->cardtable_mod_union = NULL;
add_free_block (free_blocks, size_index, info);
/*
- * This is the only place where the `allocated_blocks` array can potentially grow.
- * We need to make sure concurrent sweep isn't running when that happens, so in that
- * specific case we just wait for sweep to finish.
+ * Adding to the allocated_blocks array is racy with the removal of nulls when
+ * sweeping. We wait for sweep to finish to avoid that.
+ *
+ * The memory barrier here and in `sweep_job_func()` are required because we need
+ * `allocated_blocks` synchronized between this and the sweep thread.
*/
- if (sgen_pointer_queue_will_grow (&allocated_blocks))
- major_finish_sweep_checking ();
+ major_finish_sweep_checking ();
+ mono_memory_barrier ();
sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
}
static gboolean
-obj_is_from_pinned_alloc (char *ptr)
+ptr_is_from_pinned_alloc (char *ptr)
{
MSBlockInfo *block;
next_free_slot = *(void**)obj;
if (next_free_slot) {
- block->free_list = next_free_slot;
+ block->free_list = (gpointer *)next_free_slot;
return obj;
}
next_free_block = block->next_free;
- if (SGEN_CAS_PTR ((gpointer)&free_blocks [size_index], next_free_block, block) != block)
+ if (SGEN_CAS_PTR ((volatile gpointer *)&free_blocks [size_index], next_free_block, block) != block)
goto retry;
block->free_list = NULL;
return obj;
}
-static void*
-alloc_obj (GCVTable *vtable, size_t size, gboolean pinned, gboolean has_references)
+static GCObject*
+alloc_obj (GCVTable vtable, size_t size, gboolean pinned, gboolean has_references)
{
int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
MSBlockInfo * volatile * free_blocks = FREE_BLOCKS (pinned, has_references);
obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
- *(GCVTable**)obj = vtable;
+ /* FIXME: assumes object layout */
+ *(GCVTable*)obj = vtable;
- return obj;
+ return (GCObject *)obj;
}
-static void*
-major_alloc_object (GCVTable *vtable, size_t size, gboolean has_references)
+static GCObject*
+major_alloc_object (GCVTable vtable, size_t size, gboolean has_references)
{
return alloc_obj (vtable, size, FALSE, has_references);
}
* single thread and has the GC lock, so we don't need an extra lock.
*/
static void
-free_object (char *obj, size_t size, gboolean pinned)
+free_object (GCObject *obj, size_t size, gboolean pinned)
{
MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
int word, bit;
}
static void
-major_free_non_pinned_object (char *obj, size_t size)
+major_free_non_pinned_object (GCObject *obj, size_t size)
{
free_object (obj, size, FALSE);
}
/* size is a multiple of SGEN_ALLOC_ALIGN */
-static void*
-major_alloc_small_pinned_obj (GCVTable *vtable, size_t size, gboolean has_references)
+static GCObject*
+major_alloc_small_pinned_obj (GCVTable vtable, size_t size, gboolean has_references)
{
void *res;
sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
res = alloc_obj (vtable, size, TRUE, has_references);
}
- return res;
+ return (GCObject *)res;
}
static void
-free_pinned_object (char *obj, size_t size)
+free_pinned_object (GCObject *obj, size_t size)
{
free_object (obj, size, TRUE);
}
/*
* size is already rounded up and we hold the GC lock.
*/
-static void*
-major_alloc_degraded (GCVTable *vtable, size_t size)
+static GCObject*
+major_alloc_degraded (GCVTable vtable, size_t size)
{
- void *obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
+ GCObject *obj;
+
+ obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
if (G_LIKELY (obj)) {
HEAVY_STAT (++stat_objects_alloced_degraded);
HEAVY_STAT (stat_bytes_alloced_degraded += size);
* been marked or copied.
*/
static gboolean
-major_is_object_live (char *obj)
+major_is_object_live (GCObject *obj)
{
MSBlockInfo *block;
int word, bit;
if (sgen_ptr_in_nursery (obj))
return FALSE;
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
+ objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size (obj));
/* LOS */
if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
*start = NULL;
for (i = 0; i <= count; ++i) {
- if (ptr >= MS_BLOCK_OBJ (block, i) && ptr < MS_BLOCK_OBJ (block, i + 1)) {
- *start = MS_BLOCK_OBJ (block, i);
+ if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
+ *start = (char *)MS_BLOCK_OBJ (block, i);
break;
}
}
}
static gboolean
-try_set_sweep_state (int new, int expected)
+try_set_sweep_state (int new_, int expected)
{
- int old = SGEN_CAS (&sweep_state, new, expected);
+ int old = SGEN_CAS (&sweep_state, new_, expected);
return old == expected;
}
static void
-set_sweep_state (int new, int expected)
+set_sweep_state (int new_, int expected)
{
- gboolean success = try_set_sweep_state (new, expected);
+ gboolean success = try_set_sweep_state (new_, expected);
SGEN_ASSERT (0, success, "Could not set sweep state.");
}
continue;
}
if (MS_OBJ_ALLOCED (obj, block))
- callback ((char*)obj, block->obj_size, data);
+ callback ((GCObject*)obj, block->obj_size, data);
}
} END_FOREACH_BLOCK_NO_LOCK;
}
}
-static GCVTable*
+static GCVTable
major_describe_pointer (char *ptr)
{
MSBlockInfo *block;
int idx;
char *obj;
gboolean live;
- GCVTable *vtable;
+ GCVTable vtable;
int w, b;
gboolean marked;
idx = MS_BLOCK_OBJ_INDEX (ptr, block);
obj = (char*)MS_BLOCK_OBJ (block, idx);
live = MS_OBJ_ALLOCED (obj, block);
- vtable = live ? (GCVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
+ vtable = live ? SGEN_LOAD_VTABLE ((GCObject*)obj) : NULL;
MS_CALC_MARK_BIT (w, b, obj);
marked = MS_MARK_BIT (block, w, b);
SGEN_LOG (0, "dead-object");
} else {
if (live)
- SGEN_LOG (0, "interior-ptr offset %td", ptr - obj);
+ SGEN_LOG (0, "interior-ptr offset %zd", ptr - obj);
else
- SGEN_LOG (0, "dead-interior-ptr offset %td", ptr - obj);
+ SGEN_LOG (0, "dead-interior-ptr offset %zd", ptr - obj);
}
SGEN_LOG (0, " marked %d)\n", marked ? 1 : 0);
major_dump_heap (FILE *heap_dump_file)
{
MSBlockInfo *block;
- int *slots_available = alloca (sizeof (int) * num_block_obj_sizes);
- int *slots_used = alloca (sizeof (int) * num_block_obj_sizes);
+ int *slots_available = (int *)alloca (sizeof (int) * num_block_obj_sizes);
+ int *slots_used = (int *)alloca (sizeof (int) * num_block_obj_sizes);
int i;
for (i = 0; i < num_block_obj_sizes; ++i)
start = i;
} else {
if (start >= 0) {
- sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
+ sgen_dump_occupied ((char *)MS_BLOCK_OBJ (block, start), (char *)MS_BLOCK_OBJ (block, i), MS_BLOCK_FOR_BLOCK_INFO (block));
start = -1;
}
}
else if (!allocate)
return NULL;
mod_union = sgen_card_table_alloc_mod_union (MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
- other = SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL);
+ other = (guint8 *)SGEN_CAS_PTR ((gpointer*)&block->cardtable_mod_union, mod_union, NULL);
if (!other) {
SGEN_ASSERT (0, block->cardtable_mod_union == mod_union, "Why did CAS not replace?");
return mod_union;
* Mark the mod-union card for `ptr`, which must be a reference within the object `obj`.
*/
static void
-mark_mod_union_card (GCObject *obj, void **ptr)
+mark_mod_union_card (GCObject *obj, void **ptr, GCObject *value_obj)
{
- int type = sgen_obj_get_descriptor ((char*)obj) & DESC_TYPE_MASK;
+ int type = sgen_obj_get_descriptor (obj) & DESC_TYPE_MASK;
if (sgen_safe_object_is_small (obj, type)) {
guint8 *card_byte = major_get_cardtable_mod_union_for_reference ((char*)ptr);
SGEN_ASSERT (0, MS_BLOCK_FOR_OBJ (obj) == MS_BLOCK_FOR_OBJ (ptr), "How can an object and a reference inside it not be in the same block?");
} else {
sgen_los_mark_mod_union_card (obj, ptr);
}
+
+ binary_protocol_mod_union_remset (obj, ptr, value_obj, SGEN_LOAD_VTABLE (value_obj));
+}
+
+static inline gboolean
+major_block_is_evacuating (MSBlockInfo *block)
+{
+ if (evacuate_block_obj_sizes [block->obj_size_index] &&
+ !block->has_pinned &&
+ !block->is_to_space)
+ return TRUE;
+ return FALSE;
}
#define LOAD_VTABLE SGEN_LOAD_VTABLE
MS_SET_MARK_BIT ((block), __word, __bit); \
if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
- binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
+ binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
MS_SET_MARK_BIT ((block), __word, __bit); \
if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
- binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((GCObject*)(obj))); \
+ binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
static void
-pin_major_object (char *obj, SgenGrayQueue *queue)
+pin_major_object (GCObject *obj, SgenGrayQueue *queue)
{
MSBlockInfo *block;
#include "sgen-major-copy-object.h"
-static void
-major_copy_or_mark_object_concurrent (void **ptr, void *obj, SgenGrayQueue *queue)
-{
- SGEN_ASSERT (9, sgen_concurrent_collection_in_progress (), "Why are we scanning concurrently when there's no concurrent collection on?");
- SGEN_ASSERT (9, !sgen_workers_are_working () || sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "We must not scan from two threads at the same time!");
-
- g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
-
- if (!sgen_ptr_in_nursery (obj)) {
- mword objsize;
-
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)obj));
-
- if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE) {
- MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
- MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
- } else {
- if (sgen_los_object_is_pinned (obj))
- return;
-
- binary_protocol_mark (obj, SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size (obj));
-
- sgen_los_pin_object (obj);
- if (SGEN_OBJECT_HAS_REFERENCES (obj))
- GRAY_OBJECT_ENQUEUE (queue, obj, sgen_obj_get_descriptor (obj));
- INC_NUM_MAJOR_OBJECTS_MARKED ();
- }
- }
-}
-
static long long
major_get_and_reset_num_major_objects_marked (void)
{
static guint64 stat_drain_loops;
#endif
-static void major_scan_object_with_evacuation (char *start, mword desc, SgenGrayQueue *queue);
-
#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_no_evacuation
#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_no_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_no_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
#include "sgen-marksweep-drain-gray-stack.h"
+#undef COPY_OR_MARK_WITH_EVACUATION
+#define COPY_OR_MARK_CONCURRENT
+#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_concurrent
+#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_concurrent
+#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_concurrent
+#include "sgen-marksweep-drain-gray-stack.h"
+
static gboolean
-drain_gray_stack (ScanCopyContext ctx)
+drain_gray_stack (SgenGrayQueue *queue)
{
gboolean evacuation = FALSE;
int i;
}
if (evacuation)
- return drain_gray_stack_with_evacuation (ctx);
+ return drain_gray_stack_with_evacuation (queue);
else
- return drain_gray_stack_no_evacuation (ctx);
+ return drain_gray_stack_no_evacuation (queue);
}
#include "sgen-marksweep-scan-object-concurrent.h"
static void
-major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
+major_copy_or_mark_object_canonical (GCObject **ptr, SgenGrayQueue *queue)
{
major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
}
static void
-major_copy_or_mark_object_concurrent_canonical (void **ptr, SgenGrayQueue *queue)
+major_copy_or_mark_object_concurrent_canonical (GCObject **ptr, SgenGrayQueue *queue)
{
major_copy_or_mark_object_concurrent (ptr, *ptr, queue);
}
static void
-major_copy_or_mark_object_concurrent_finish_canonical (void **ptr, SgenGrayQueue *queue)
+major_copy_or_mark_object_concurrent_finish_canonical (GCObject **ptr, SgenGrayQueue *queue)
{
- major_copy_or_mark_object_no_evacuation (ptr, *ptr, queue);
+ major_copy_or_mark_object_with_evacuation (ptr, *ptr, queue);
}
static void
for (; entry < end; ++entry) {
int index = MS_BLOCK_OBJ_INDEX (*entry, block);
- char *obj;
+ GCObject *obj;
SGEN_ASSERT (9, index >= 0 && index < MS_BLOCK_FREE / block->obj_size, "invalid object %p index %d max-index %d", *entry, index, (int)(MS_BLOCK_FREE / block->obj_size));
if (index == last_index)
continue;
memset (obj, 0, obj_size);
}
*(void**)obj = block->free_list;
- block->free_list = obj;
+ block->free_list = (void **)obj;
}
}
}
void *next = *(void**)block->free_list;
*(void**)block->free_list = reversed;
reversed = block->free_list;
- block->free_list = next;
+ block->free_list = (void **)next;
}
- block->free_list = reversed;
+ block->free_list = (void **)reversed;
mono_memory_write_barrier ();
}
sgen_pointer_queue_remove_nulls (&allocated_blocks);
+ mono_memory_barrier ();
sweep_finish ();
static void
sweep_finish (void)
{
- mword total_evacuate_heap = 0;
- mword total_evacuate_saved = 0;
int i;
for (i = 0; i < num_block_obj_sizes; ++i) {
} else {
evacuate_block_obj_sizes [i] = FALSE;
}
- {
- mword total_bytes = block_obj_sizes [i] * sweep_slots_available [i];
- total_evacuate_heap += total_bytes;
- if (evacuate_block_obj_sizes [i])
- total_evacuate_saved += total_bytes - block_obj_sizes [i] * sweep_slots_used [i];
- }
}
- want_evacuation = (float)total_evacuate_saved / (float)total_evacuate_heap > (1 - concurrent_evacuation_threshold);
-
set_sweep_state (SWEEP_STATE_SWEPT, SWEEP_STATE_COMPACTING);
}
static int count_nonpinned_nonref;
static void
-count_nonpinned_callback (char *obj, size_t size, void *data)
+count_nonpinned_callback (GCObject *obj, size_t size, void *data)
{
- GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+ GCVTable vtable = LOAD_VTABLE (obj);
if (SGEN_VTABLE_HAS_REFERENCES (vtable))
++count_nonpinned_ref;
}
static void
-count_pinned_callback (char *obj, size_t size, void *data)
+count_pinned_callback (GCObject *obj, size_t size, void *data)
{
- GCVTable *vtable = (GCVTable*)LOAD_VTABLE (obj);
+ GCVTable vtable = LOAD_VTABLE (obj);
if (SGEN_VTABLE_HAS_REFERENCES (vtable))
++count_pinned_ref;
goto next_object;
}
+ GCObject *object = (GCObject*)obj;
+
if (small_objects) {
HEAVY_STAT (++scanned_objects);
- scan_func (obj, sgen_obj_get_descriptor (obj), queue);
+ scan_func (object, sgen_obj_get_descriptor (object), queue);
} else {
size_t offset = sgen_card_table_get_card_offset (obj, block_start);
- sgen_cardtable_scan_object (obj, block_obj_size, card_base + offset, mod_union, ctx);
+ sgen_cardtable_scan_object (object, block_obj_size, card_base + offset, mod_union, ctx);
}
next_object:
obj += block_obj_size;
sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
- block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ block_obj_sizes = (int *)sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
- evacuate_block_obj_sizes = sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ evacuate_block_obj_sizes = (gboolean *)sgen_alloc_internal_dynamic (sizeof (gboolean) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
for (i = 0; i < num_block_obj_sizes; ++i)
evacuate_block_obj_sizes [i] = FALSE;
- sweep_slots_available = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
- sweep_slots_used = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
- sweep_num_blocks = sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ sweep_slots_available = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ sweep_slots_used = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ sweep_num_blocks = (size_t *)sgen_alloc_internal_dynamic (sizeof (size_t) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
/*
{
*/
for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
- free_block_lists [i] = sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
+ free_block_lists [i] = (MSBlockInfo *volatile *)sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES, TRUE);
for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_alloced);
mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed);
mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_lazy_swept);
- mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_objects_evacuated);
#if SIZEOF_VOID_P != 8
mono_counters_register ("# major blocks freed ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_ideal);
mono_counters_register ("# major blocks freed less ideally", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_major_blocks_freed_less_ideal);
concurrent_mark = is_concurrent;
collector->is_concurrent = is_concurrent;
collector->needs_thread_pool = is_concurrent || concurrent_sweep;
- if (is_concurrent)
- collector->want_synchronous_collection = &want_evacuation;
- else
- collector->want_synchronous_collection = NULL;
collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
collector->supports_cardtable = TRUE;
collector->pin_objects = major_pin_objects;
collector->pin_major_object = pin_major_object;
collector->scan_card_table = major_scan_card_table;
- collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
+ collector->iterate_live_block_ranges = major_iterate_live_block_ranges;
if (is_concurrent) {
collector->update_cardtable_mod_union = update_cardtable_mod_union;
- collector->get_cardtable_mod_union_for_object = major_get_cardtable_mod_union_for_reference;
+ collector->get_cardtable_mod_union_for_reference = major_get_cardtable_mod_union_for_reference;
}
collector->init_to_space = major_init_to_space;
collector->sweep = major_sweep;
collector->start_major_collection = major_start_major_collection;
collector->finish_major_collection = major_finish_major_collection;
collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
- collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
+ collector->ptr_is_from_pinned_alloc = ptr_is_from_pinned_alloc;
collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
collector->get_num_major_sections = get_num_major_sections;
collector->get_bytes_survived_last_sweep = get_bytes_survived_last_sweep;
collector->major_ops_serial.copy_or_mark_object = major_copy_or_mark_object_canonical;
collector->major_ops_serial.scan_object = major_scan_object_with_evacuation;
+ collector->major_ops_serial.drain_gray_stack = drain_gray_stack;
if (is_concurrent) {
collector->major_ops_concurrent_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
- collector->major_ops_concurrent_start.scan_object = major_scan_object_no_mark_concurrent_start;
-
- collector->major_ops_concurrent.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
- collector->major_ops_concurrent.scan_object = major_scan_object_no_mark_concurrent;
+ collector->major_ops_concurrent_start.scan_object = major_scan_object_concurrent;
+ collector->major_ops_concurrent_start.drain_gray_stack = drain_gray_stack_concurrent;
collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
- collector->major_ops_concurrent_finish.scan_object = major_scan_object_no_evacuation;
+ collector->major_ops_concurrent_finish.scan_object = major_scan_object_with_evacuation;
collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_concurrent_finish;
+ collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack;
}
-#if !defined (FIXED_HEAP) && !defined (SGEN_PARALLEL_MARK)
- if (!is_concurrent)
- collector->drain_gray_stack = drain_gray_stack;
-
#ifdef HEAVY_STATISTICS
mono_counters_register ("Optimized copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy);
mono_counters_register ("Optimized copy nursery", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_nursery);
mono_counters_register ("Optimized copy major", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major);
mono_counters_register ("Optimized copy major small fast", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_fast);
mono_counters_register ("Optimized copy major small slow", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_slow);
+ mono_counters_register ("Optimized copy major small evacuate", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_small_evacuate);
mono_counters_register ("Optimized copy major large", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_copy_major_large);
mono_counters_register ("Optimized major scan", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan);
mono_counters_register ("Optimized major scan no refs", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_optimized_major_scan_no_refs);
mono_counters_register ("Gray stack prefetch fills", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fills);
mono_counters_register ("Gray stack prefetch failures", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_drain_prefetch_fill_failures);
#endif
-#endif
#ifdef SGEN_HEAVY_BINARY_PROTOCOL
- mono_mutex_init (&scanned_objects_list_lock);
+ mono_os_mutex_init (&scanned_objects_list_lock);
#endif
SGEN_ASSERT (0, SGEN_MAX_SMALL_OBJ_SIZE <= MS_BLOCK_FREE / 2, "MAX_SMALL_OBJ_SIZE must be at most MS_BLOCK_FREE / 2");