sgen_init_pinning ();
SGEN_LOG (6, "Collecting pinned addresses");
pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, ctx);
-
+ if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
+ /* Pin cemented objects that were forced */
+ sgen_pin_cemented_objects ();
+ }
sgen_optimize_pin_queue ();
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+ /*
+ * Cemented objects that are in the pinned list will be marked. When
+ * marking concurrently we won't mark mod-union cards for these objects.
+ * Instead they will remain cemented until the next major collection,
+ * when we will recheck if they are still pinned in the roots.
+ */
+ sgen_cement_force_pinned ();
+ }
sgen_client_collecting_major_1 ();
major_collector.init_to_space ();
SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
- /*
- * The concurrent collector doesn't move objects, neither on
- * the major heap nor in the nursery, so we can mark even
- * before pinning has finished. For the non-concurrent
- * collector we start the workers after pinning.
- */
- if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
- if (precleaning_enabled) {
- ScanJob *sj;
- /* Mod union preclean job */
- sj = (ScanJob*)sgen_thread_pool_job_alloc ("preclean mod union cardtable", job_mod_union_preclean, sizeof (ScanJob));
- sj->ops = object_ops;
- sgen_workers_start_all_workers (object_ops, &sj->job);
- } else {
- sgen_workers_start_all_workers (object_ops, NULL);
- }
- gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
- } else if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
+ if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
if (sgen_workers_have_idle_work ()) {
+ /*
+ * We force the finish of the worker with the new object ops context
+ * which can also do copying. We need to have finished pinning.
+ */
sgen_workers_start_all_workers (object_ops, NULL);
sgen_workers_join ();
}
sgen_client_collecting_major_3 (&fin_ready_queue, &critical_fin_queue);
- /*
- * FIXME: is this the right context? It doesn't seem to contain a copy function
- * unless we're concurrent.
- */
- enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops, mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT);
+ enqueue_scan_from_roots_jobs (heap_start, heap_end, object_ops, FALSE);
TV_GETTIME (btv);
time_major_scan_roots += TV_ELAPSED (atv, btv);
+ /*
+ * We start the concurrent worker after pinning and after we scanned the roots
+ * in order to make sure that the worker does not finish before handling all
+ * the roots.
+ */
+ if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
+ if (precleaning_enabled) {
+ ScanJob *sj;
+ /* Mod union preclean job */
+ sj = (ScanJob*)sgen_thread_pool_job_alloc ("preclean mod union cardtable", job_mod_union_preclean, sizeof (ScanJob));
+ sj->ops = object_ops;
+ sgen_workers_start_all_workers (object_ops, &sj->job);
+ } else {
+ sgen_workers_start_all_workers (object_ops, NULL);
+ }
+ gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ }
+
if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
ScanJob *sj;
major_finish_copy_or_mark (CopyOrMarkFromRootsMode mode)
{
if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
- /*
- * Prepare the pin queue for the next collection. Since pinning runs on the worker
- * threads we must wait for the jobs to finish before we can reset it.
- */
- sgen_workers_wait_for_jobs_finished ();
sgen_finish_pinning ();
sgen_pin_stats_reset ();
struct _CementHashEntry {
GCObject *obj;
unsigned int count;
+ gboolean forced; /* if it should stay cemented after the finishing pause */
};
static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
void
sgen_cement_reset (void)
{
- memset (cement_hash, 0, sizeof (cement_hash));
+ int i;
+ for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
+ if (cement_hash [i].forced) {
+ cement_hash [i].forced = FALSE;
+ } else {
+ cement_hash [i].obj = NULL;
+ cement_hash [i].count = 0;
+ }
+ }
binary_protocol_cement_reset ();
}
+
+/*
+ * The pin_queue should be full and sorted, without entries from the cemented
+ * objects. We traverse the cement hash and check if each object is pinned in
+ * the pin_queue (the pin_queue contains entries between obj and obj+obj_len)
+ */
+void
+sgen_cement_force_pinned (void)
+{
+ int i;
+
+ if (!cement_enabled)
+ return;
+
+ for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
+ GCObject *obj = cement_hash [i].obj;
+ size_t index;
+ if (!obj)
+ continue;
+ if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD)
+ continue;
+ SGEN_ASSERT (0, !cement_hash [i].forced, "Why do we have a forced cemented object before forcing ?");
+
+ /* Returns the index of the target or of the first element greater than it */
+ index = sgen_pointer_queue_search (&pin_queue, obj);
+ if (index == pin_queue.next_slot)
+ continue;
+ SGEN_ASSERT (0, pin_queue.data [index] >= (gpointer)obj, "Binary search should return a pointer greater than the search target");
+ if (pin_queue.data [index] < (gpointer)((char*)obj + sgen_safe_object_get_size (obj)))
+ cement_hash [i].forced = TRUE;
+ }
+}
+
+gboolean
+sgen_cement_is_forced (GCObject *obj)
+{
+ guint hv = sgen_aligned_addr_hash (obj);
+ int i = SGEN_CEMENT_HASH (hv);
+
+ SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
+
+ if (!cement_enabled)
+ return FALSE;
+
+ if (!cement_hash [i].obj)
+ return FALSE;
+ if (cement_hash [i].obj != obj)
+ return FALSE;
+
+ return cement_hash [i].forced;
+}
+
gboolean
sgen_cement_lookup (GCObject *obj)
{