}
void
-sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
+sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags, gboolean fail_on_canaries)
{
while (start < end) {
size_t size;
}
if (!sgen_client_object_is_array_fill ((GCObject*)obj)) {
- CHECK_CANARY_FOR_OBJECT (obj);
+ CHECK_CANARY_FOR_OBJECT ((GCObject*)obj, fail_on_canaries);
size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
- callback (obj, size, data);
+ callback ((GCObject*)obj, size, data);
CANARIFY_SIZE (size);
} else {
size = ALIGN_UP (safe_object_get_size ((GCObject*)obj));
do {
int i;
for (i = 0; i != max_objs; ++i) {
- char *obj;
- mword desc;
+ GCObject *obj;
+ SgenDescriptor desc;
GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
if (!obj)
return TRUE;
sgen_nursery_allocator_prepare_for_pinning ();
while (start < end) {
- void *obj_to_pin = NULL;
+ GCObject *obj_to_pin = NULL;
size_t obj_to_pin_size = 0;
- mword desc;
+ SgenDescriptor desc;
addr = *start;
* either.
*/
if (!sgen_client_object_is_array_fill ((GCObject*)search_start)) {
- CHECK_CANARY_FOR_OBJECT (search_start);
+ CHECK_CANARY_FOR_OBJECT (search_start, TRUE);
CANARIFY_SIZE (canarified_obj_size);
if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
/* This is the object we're looking for. */
- obj_to_pin = search_start;
+ obj_to_pin = (GCObject*)search_start;
obj_to_pin_size = canarified_obj_size;
break;
}
* If this is a dummy array marking the beginning of a nursery
* fragment, we don't pin it.
*/
- if (sgen_client_object_is_array_fill ((GCObject*)obj_to_pin))
+ if (sgen_client_object_is_array_fill (obj_to_pin))
goto next_pin_queue_entry;
/*
* when we can't promote an object because we're out of memory.
*/
void
-sgen_pin_object (void *object, GrayQueue *queue)
+sgen_pin_object (GCObject *object, GrayQueue *queue)
{
/*
* All pinned objects are assumed to have been staged, so we need to stage as well.
{
int count = 0;
-#ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
+ SGEN_ASSERT (0, ((mword)start & (SIZEOF_VOID_P - 1)) == 0, "Why are we scanning for references in unaligned memory ?");
+
+#if defined(VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE) && !defined(_WIN64)
VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
#endif
while (start < end) {
- if (*start >= start_nursery && *start < end_nursery) {
- /*
- * *start can point to the middle of an object
- * note: should we handle pointing at the end of an object?
- * pinning in C# code disallows pointing at the end of an object
- * but there is some small chance that an optimizing C compiler
- * may keep the only reference to an object by pointing
- * at the end of it. We ignore this small chance for now.
- * Pointers to the end of an object are indistinguishable
- * from pointers to the start of the next object in memory
- * so if we allow that we'd need to pin two objects...
- * We queue the pointer in an array, the
- * array will then be sorted and uniqued. This way
- * we can coalesce several pinning pointers and it should
- * be faster since we'd do a memory scan with increasing
- * addresses. Note: we can align the address to the allocation
- * alignment, so the unique process is more effective.
- */
- mword addr = (mword)*start;
- addr &= ~(ALLOC_ALIGN - 1);
- if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
- SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
- sgen_pin_stage_ptr ((void*)addr);
- binary_protocol_pin_stage (start, (void*)addr);
- count++;
- }
-
- /*
- * FIXME: It seems we're registering objects from all over the heap
- * (at least from the nursery and the LOS), but we're only
- * registering pinned addresses in the nursery. What's up with
- * that?
- *
- * Also, why wouldn't we register addresses once the pinning queue
- * is sorted and uniqued?
- */
- if (ptr_in_nursery ((void*)addr))
- sgen_pin_stats_register_address ((char*)addr, pin_type);
+ /*
+ * *start can point to the middle of an object
+ * note: should we handle pointing at the end of an object?
+ * pinning in C# code disallows pointing at the end of an object
+ * but there is some small chance that an optimizing C compiler
+ * may keep the only reference to an object by pointing
+ * at the end of it. We ignore this small chance for now.
+ * Pointers to the end of an object are indistinguishable
+ * from pointers to the start of the next object in memory
+ * so if we allow that we'd need to pin two objects...
+ * We queue the pointer in an array, the
+ * array will then be sorted and uniqued. This way
+ * we can coalesce several pinning pointers and it should
+ * be faster since we'd do a memory scan with increasing
+ * addresses. Note: we can align the address to the allocation
+ * alignment, so the unique process is more effective.
+ */
+ mword addr = (mword)*start;
+ addr &= ~(ALLOC_ALIGN - 1);
+ if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
+ SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
+ sgen_pin_stage_ptr ((void*)addr);
+ binary_protocol_pin_stage (start, (void*)addr);
+ sgen_pin_stats_register_address ((char*)addr, pin_type);
+ count++;
}
start++;
}
}
static void
-unpin_objects_from_queue (SgenGrayQueue *queue)
-{
- for (;;) {
- char *addr;
- mword desc;
- GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
- if (!addr)
- break;
- g_assert (SGEN_OBJECT_IS_PINNED (addr));
- SGEN_UNPIN_OBJECT (addr);
- }
-}
-
-static void
-single_arg_user_copy_or_mark (void **obj, void *gc_data)
+single_arg_user_copy_or_mark (GCObject **obj, void *gc_data)
{
ScanCopyContext *ctx = gc_data;
ctx->ops->copy_or_mark_object (obj, ctx->queue);
* This function is not thread-safe!
*/
static void
-precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
+precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, SgenDescriptor desc, ScanCopyContext ctx)
{
CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
SgenGrayQueue *queue = ctx.queue;
desc >>= ROOT_DESC_TYPE_SHIFT;
while (desc) {
if ((desc & 1) && *start_root) {
- copy_func (start_root, queue);
+ copy_func ((GCObject**)start_root, queue);
SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
}
desc >>= 1;
void **objptr = start_run;
while (bmap) {
if ((bmap & 1) && *objptr) {
- copy_func (objptr, queue);
+ copy_func ((GCObject**)objptr, queue);
SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
}
bmap >>= 1;
size_t i;
for (i = 0; i < fin_queue->next_slot; ++i) {
- void *obj = fin_queue->data [i];
+ GCObject *obj = fin_queue->data [i];
if (!obj)
continue;
SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
- copy_func (&fin_queue->data [i], queue);
+ copy_func ((GCObject**)&fin_queue->data [i], queue);
}
}
We must clear weak links that don't track resurrection before processing object ready for
finalization so they can be cleared before that.
*/
- sgen_null_link_in_range (generation, TRUE, ctx);
+ sgen_null_link_in_range (generation, ctx, FALSE);
if (generation == GENERATION_OLD)
- sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
+ sgen_null_link_in_range (GENERATION_NURSERY, ctx, FALSE);
/* walk the finalization queue and move also the objects that need to be
*/
g_assert (sgen_gray_object_queue_is_empty (queue));
for (;;) {
- sgen_null_link_in_range (generation, FALSE, ctx);
+ sgen_null_link_in_range (generation, ctx, TRUE);
if (generation == GENERATION_OLD)
- sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
+ sgen_null_link_in_range (GENERATION_NURSERY, ctx, TRUE);
if (sgen_gray_object_queue_is_empty (queue))
break;
sgen_drain_gray_stack (-1, ctx);
sgen_check_consistency ();
sgen_process_fin_stage_entries ();
- sgen_process_dislink_stage_entries ();
/* pin from pinned handles */
sgen_init_pinning ();
time_minor_scan_remsets += TV_ELAPSED (atv, btv);
SGEN_LOG (2, "Old generation scan: %ld usecs", TV_ELAPSED (atv, btv));
+ sgen_pin_stats_print_class_stats ();
+
sgen_drain_gray_stack (-1, ctx);
/* FIXME: Why do we do this at this specific, seemingly random, point? */
return needs_major;
}
-static void
-scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
-{
- /*
- * This is called on all objects in the nursery, including pinned ones, so we need
- * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
- */
- ctx->ops->scan_object (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
-}
-
-static void
-scan_nursery_objects (ScanCopyContext ctx)
-{
- sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
- (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
-}
-
typedef enum {
COPY_OR_MARK_FROM_ROOTS_SERIAL,
COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT,
} CopyOrMarkFromRootsMode;
static void
-major_copy_or_mark_from_roots (size_t *old_next_pin_slot, CopyOrMarkFromRootsMode mode, gboolean scan_whole_nursery, SgenObjectOperations *object_ops)
+major_copy_or_mark_from_roots (size_t *old_next_pin_slot, CopyOrMarkFromRootsMode mode, SgenObjectOperations *object_ops)
{
LOSObject *bigobj;
TV_DECLARE (atv);
SGEN_ASSERT (0, !!concurrent == !!concurrent_collection_in_progress, "We've been called with the wrong mode.");
- if (scan_whole_nursery)
- SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "Scanning whole nursery only makes sense when we're finishing a concurrent collection.");
-
- if (concurrent) {
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
/*This cleans up unused fragments */
sgen_nursery_allocator_prepare_for_pinning ();
}
sgen_process_fin_stage_entries ();
- sgen_process_dislink_stage_entries ();
TV_GETTIME (atv);
sgen_init_pinning ();
SGEN_LOG (6, "Collecting pinned addresses");
pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, ctx);
- if (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
- if (major_collector.is_concurrent) {
- /*
- * The concurrent major collector cannot evict
- * yet, so we need to pin cemented objects to
- * not break some asserts.
- *
- * FIXME: We could evict now!
- */
- sgen_pin_cemented_objects ();
- }
- }
-
sgen_optimize_pin_queue ();
sgen_client_collecting_major_1 ();
SGEN_LOG (6, "Pinning from large objects");
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
size_t dummy;
- if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
- binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((GCObject*)(bigobj->data))));
+ if (sgen_find_optimized_pin_queue_area ((char*)bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
+ binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (bigobj->data));
if (sgen_los_object_is_pinned (bigobj->data)) {
SGEN_ASSERT (0, mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, "LOS objects can only be pinned here after concurrent marking.");
}
sgen_los_pin_object (bigobj->data);
if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
- GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
- sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((GCObject*) bigobj->data));
+ GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor ((GCObject*)bigobj->data));
+ sgen_pin_stats_register_object (bigobj->data, safe_object_get_size (bigobj->data));
SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data,
sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (bigobj->data)),
(unsigned long)sgen_los_object_size (bigobj));
sgen_client_pinned_los_object (bigobj->data);
}
}
- /* second pass for the sections */
- /*
- * Concurrent mark never follows references into the nursery. In the start and
- * finish pauses we must scan live nursery objects, though.
- *
- * In the finish pause we do this conservatively by scanning all nursery objects.
- * Previously we would only scan pinned objects here. We assumed that all objects
- * that were pinned during the nursery collection immediately preceding this finish
- * mark would be pinned again here. Due to the way we get the stack end for the GC
- * thread, however, that's not necessarily the case: we scan part of the stack used
- * by the GC itself, which changes constantly, so pinning isn't entirely
- * deterministic.
- *
- * The split nursery also complicates things because non-pinned objects can survive
- * in the nursery. That's why we need to do a full scan of the nursery for it, too.
- *
- * In the future we shouldn't do a preceding nursery collection at all and instead
- * do the finish pause with promotion from the nursery.
- *
- * A further complication arises when we have late-pinned objects from the preceding
- * nursery collection. Those are the result of being out of memory when trying to
- * evacuate objects. They won't be found from the roots, so we just scan the whole
- * nursery.
- *
- * Non-concurrent mark evacuates from the nursery, so it's
- * sufficient to just scan pinned nursery objects.
- */
- if (scan_whole_nursery || mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT || (concurrent && sgen_minor_collector.is_split)) {
- scan_nursery_objects (ctx);
- } else {
- pin_objects_in_nursery (concurrent, ctx);
- if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
- sgen_check_nursery_objects_pinned (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
- }
+ pin_objects_in_nursery (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT, ctx);
+ if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
+ sgen_check_nursery_objects_pinned (mode != COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
if (old_next_pin_slot)
TV_GETTIME (atv);
time_major_scan_mod_union += TV_ELAPSED (btv, atv);
}
+
+ sgen_pin_stats_print_class_stats ();
}
static void
-major_finish_copy_or_mark (void)
+major_finish_copy_or_mark (CopyOrMarkFromRootsMode mode)
{
- if (!concurrent_collection_in_progress)
- return;
-
- /*
- * Prepare the pin queue for the next collection. Since pinning runs on the worker
- * threads we must wait for the jobs to finish before we can reset it.
- */
- sgen_workers_wait_for_jobs_finished ();
- sgen_finish_pinning ();
+ switch (mode) {
+ case COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT:
+ /*
+ * Prepare the pin queue for the next collection. Since pinning runs on the worker
+ * threads we must wait for the jobs to finish before we can reset it.
+ */
+ sgen_workers_wait_for_jobs_finished ();
+ sgen_finish_pinning ();
- sgen_pin_stats_reset ();
+ sgen_pin_stats_reset ();
- if (do_concurrent_checks)
- sgen_debug_check_nursery_is_clean ();
+ if (do_concurrent_checks)
+ sgen_debug_check_nursery_is_clean ();
+ break;
+ case COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT:
+ sgen_workers_wait_for_jobs_finished ();
+ break;
+ case COPY_OR_MARK_FROM_ROOTS_SERIAL:
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
static void
g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
- sgen_cement_reset ();
+ if (!concurrent)
+ sgen_cement_reset ();
if (concurrent) {
g_assert (major_collector.is_concurrent);
if (major_collector.start_major_collection)
major_collector.start_major_collection ();
- major_copy_or_mark_from_roots (old_next_pin_slot, concurrent ? COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT : COPY_OR_MARK_FROM_ROOTS_SERIAL, FALSE, object_ops);
- major_finish_copy_or_mark ();
+ major_copy_or_mark_from_roots (old_next_pin_slot, concurrent ? COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT : COPY_OR_MARK_FROM_ROOTS_SERIAL, object_ops);
+ major_finish_copy_or_mark (concurrent ? COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT : COPY_OR_MARK_FROM_ROOTS_SERIAL);
}
static void
-major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean forced, gboolean scan_whole_nursery)
+major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean forced)
{
ScannedObjectCounts counts;
SgenObjectOperations *object_ops;
+ mword fragment_total;
TV_DECLARE (atv);
TV_DECLARE (btv);
if (concurrent_collection_in_progress) {
object_ops = &major_collector.major_ops_concurrent_finish;
- major_copy_or_mark_from_roots (NULL, COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, scan_whole_nursery, object_ops);
+ major_copy_or_mark_from_roots (NULL, COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT, object_ops);
- major_finish_copy_or_mark ();
+ major_finish_copy_or_mark (COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT);
sgen_workers_join ();
#ifdef SGEN_DEBUG_INTERNAL_ALLOC
main_gc_thread = NULL;
#endif
-
- if (do_concurrent_checks)
- sgen_debug_check_nursery_is_clean ();
} else {
- SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
object_ops = &major_collector.major_ops_serial;
}
reset_heap_boundaries ();
sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
- if (!concurrent_collection_in_progress) {
- /* walk the pin_queue, build up the fragment list of free memory, unmark
- * pinned objects as we go, memzero() the empty fragments so they are ready for the
- * next allocations.
- */
- if (!sgen_build_nursery_fragments (nursery_section, NULL))
- degraded_mode = 1;
+ /* walk the pin_queue, build up the fragment list of free memory, unmark
+ * pinned objects as we go, memzero() the empty fragments so they are ready for the
+ * next allocations.
+ */
+ fragment_total = sgen_build_nursery_fragments (nursery_section, NULL);
+ if (!fragment_total)
+ degraded_mode = 1;
+ SGEN_LOG (4, "Free space in nursery after major %ld", (long)fragment_total);
- /* prepare the pin queue for the next collection */
- sgen_finish_pinning ();
+ if (do_concurrent_checks && concurrent_collection_in_progress)
+ sgen_debug_check_nursery_is_clean ();
+
+ /* prepare the pin queue for the next collection */
+ sgen_finish_pinning ();
- /* Clear TLABs for all threads */
- sgen_clear_tlabs ();
+ /* Clear TLABs for all threads */
+ sgen_clear_tlabs ();
- sgen_pin_stats_reset ();
- }
+ sgen_pin_stats_reset ();
sgen_cement_clear_below_threshold ();
TV_GETTIME (time_start);
major_start_collection (FALSE, &old_next_pin_slot);
- major_finish_collection (reason, old_next_pin_slot, forced, FALSE);
+ major_finish_collection (reason, old_next_pin_slot, forced);
TV_GETTIME (time_end);
gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
{
TV_DECLARE (total_start);
TV_DECLARE (total_end);
- gboolean late_pinned;
- SgenGrayQueue unpin_queue;
- memset (&unpin_queue, 0, sizeof (unpin_queue));
TV_GETTIME (total_start);
major_collector.update_cardtable_mod_union ();
sgen_los_update_cardtable_mod_union ();
- late_pinned = collect_nursery (&unpin_queue, TRUE);
-
if (mod_union_consistency_check)
sgen_check_mod_union_consistency ();
current_collection_generation = GENERATION_OLD;
- major_finish_collection ("finishing", -1, forced, late_pinned);
+ sgen_cement_reset ();
+ major_finish_collection ("finishing", -1, forced);
if (whole_heap_check_before_collection)
sgen_check_whole_heap (FALSE);
- unpin_objects_from_queue (&unpin_queue);
- sgen_gray_object_queue_deinit (&unpin_queue);
-
TV_GETTIME (total_end);
gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
major_finish_concurrent_collection (wait_to_finish);
oldest_generation_collected = GENERATION_OLD;
} else {
- sgen_workers_signal_start_nursery_collection_and_wait ();
-
major_update_concurrent_collection ();
if (generation_to_collect == GENERATION_NURSERY)
collect_nursery (NULL, FALSE);
-
- sgen_workers_signal_finish_nursery_collection ();
}
goto done;
* Return TRUE if @obj is ready to be finalized.
*/
static inline gboolean
-sgen_is_object_alive (void *object)
+sgen_is_object_alive (GCObject *object)
{
if (ptr_in_nursery (object))
return sgen_nursery_is_object_alive (object);
* are never alive during a minor collection.
*/
static inline int
-sgen_is_object_alive_and_on_current_collection (char *object)
+sgen_is_object_alive_and_on_current_collection (GCObject *object)
{
if (ptr_in_nursery (object))
return sgen_nursery_is_object_alive (object);
gboolean
-sgen_gc_is_object_ready_for_finalization (void *object)
+sgen_gc_is_object_ready_for_finalization (GCObject *object)
{
return !sgen_is_object_alive (object);
}
}
gboolean
-sgen_object_is_live (void *obj)
+sgen_object_is_live (GCObject *obj)
{
return sgen_is_object_alive_and_on_current_collection (obj);
}
/* FIXME: batch to reduce lock contention */
while (sgen_have_pending_finalizers ()) {
- void *obj;
+ GCObject *obj;
LOCK_GC;
* We do not coalesce roots.
*/
int
-sgen_register_root (char *start, size_t size, void *descr, int root_type)
+sgen_register_root (char *start, size_t size, SgenDescriptor descr, int root_type, int source, const char *msg)
{
RootRecord new_root;
int i;
if (root) {
size_t old_size = root->end_root - start;
root->end_root = start + size;
- g_assert (((root->root_desc != 0) && (descr != NULL)) ||
- ((root->root_desc == 0) && (descr == NULL)));
- root->root_desc = (mword)descr;
+ SGEN_ASSERT (0, !!root->root_desc == !!descr, "Can't change whether a root is precise or conservative.");
+ SGEN_ASSERT (0, root->source == source, "Can't change a root's source identifier.");
+ SGEN_ASSERT (0, !!root->msg == !!msg, "Can't change a root's message.");
+ root->root_desc = descr;
roots_size += size;
roots_size -= old_size;
UNLOCK_GC;
}
new_root.end_root = start + size;
- new_root.root_desc = (mword)descr;
+ new_root.root_desc = descr;
+ new_root.source = source;
+ new_root.msg = msg;
sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
roots_size += size;
- SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
+ SGEN_LOG (3, "Added root for range: %p-%p, descr: %llx (%d/%d bytes)", start, new_root.end_root, (long long)descr, (int)size, (int)roots_size);
UNLOCK_GC;
return TRUE;
{
SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (value)) : "null");
SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
- if (ptr_in_nursery (value))
+ if (ptr_in_nursery (value) || concurrent_collection_in_progress)
mono_gc_wbarrier_generic_nostore (ptr);
sgen_dummy_use (value);
}
InterlockedWritePointer (ptr, value);
- if (ptr_in_nursery (value))
+ if (ptr_in_nursery (value) || concurrent_collection_in_progress)
mono_gc_wbarrier_generic_nostore (ptr);
sgen_dummy_use (value);
return tot;
}
-GCObject*
-sgen_weak_link_get (void **link_addr)
-{
- void * volatile *link_addr_volatile;
- void *ptr;
- GCObject *obj;
- retry:
- link_addr_volatile = link_addr;
- ptr = (void*)*link_addr_volatile;
- /*
- * At this point we have a hidden pointer. If the GC runs
- * here, it will not recognize the hidden pointer as a
- * reference, and if the object behind it is not referenced
- * elsewhere, it will be freed. Once the world is restarted
- * we reveal the pointer, giving us a pointer to a freed
- * object. To make sure we don't return it, we load the
- * hidden pointer again. If it's still the same, we can be
- * sure the object reference is valid.
- */
- if (ptr)
- obj = (GCObject*) REVEAL_POINTER (ptr);
- else
- return NULL;
-
- mono_memory_barrier ();
-
- /*
- * During the second bridge processing step the world is
- * running again. That step processes all weak links once
- * more to null those that refer to dead objects. Before that
- * is completed, those links must not be followed, so we
- * conservatively wait for bridge processing when any weak
- * link is dereferenced.
- */
- sgen_client_bridge_wait_for_processing ();
-
- if ((void*)*link_addr_volatile != ptr)
- goto retry;
-
- return obj;
-}
-
gboolean
sgen_set_allow_synchronous_major (gboolean flag)
{
sgen_init_descriptors ();
sgen_init_gray_queues ();
sgen_init_allocator ();
+ sgen_init_gchandles ();
sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
sgen_nursery_size = DEFAULT_NURSERY_SIZE;
- if (major_collector.is_concurrent)
- cement_enabled = FALSE;
-
if (opts) {
gboolean usage_printed = FALSE;
alloc_nursery ();
- if (major_collector.is_concurrent && cement_enabled) {
- sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported on concurrent major collectors.");
- cement_enabled = FALSE;
- }
-
sgen_cement_init (cement_enabled);
if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
continue;
}
+ nursery_clear_policy = CLEAR_AT_GC;
do_concurrent_checks = TRUE;
} else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
do_dump_nursery_content = TRUE;
sgen_card_table_init (&remset);
+ sgen_register_root (NULL, 0, sgen_make_user_root_descriptor (sgen_mark_normal_gc_handles), ROOT_TYPE_NORMAL, MONO_ROOT_SOURCE_GC_HANDLE, "normal gc handles");
+
gc_initialized = 1;
}