+/*
+ * sgen-fin-weak-hash.c: Finalizers and weak links.
+ *
+ * Author:
+ * Paolo Molaro (lupus@ximian.com)
+ * Rodrigo Kumpera (kumpera@gmail.com)
+ *
+ * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "config.h"
+#ifdef HAVE_SGEN_GC
+
+#include "metadata/sgen-gc.h"
+#include "metadata/sgen-gray.h"
+#include "metadata/sgen-protocol.h"
+#include "metadata/sgen-pointer-queue.h"
+#include "utils/dtrace.h"
+#include "utils/mono-counters.h"
+
+#define ptr_in_nursery sgen_ptr_in_nursery
+
+typedef SgenGrayQueue GrayQueue;
+
+int num_ready_finalizers = 0;
+static int no_finalize = 0;
+
#define DISLINK_OBJECT(l) (REVEAL_POINTER (*(void**)(l)))
-#define DISLINK_TRACK(l) ((~(gulong)(*(void**)(l))) & 1)
+#define DISLINK_TRACK(l) ((~(size_t)(*(void**)(l))) & 1)
/*
* The finalizable hash has the object as the key, the
static int
tagged_object_hash (MonoObject *o)
{
- return mono_object_hash (tagged_object_get_object (o));
+ return mono_aligned_addr_hash (tagged_object_get_object (o));
}
static gboolean
}
/* LOCKING: requires that the GC lock is held */
-static void
-collect_bridge_objects (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
+void
+sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
MonoObject *object;
gpointer dummy;
char *copy;
+ SgenPointerQueue moved_fin_objects;
+
+ sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
if (no_finalize)
return;
continue;
/* Object is a bridge object and major heap says it's dead */
- if (!((char*)object >= start && (char*)object < end && !major_collector.is_object_live ((char*)object)))
+ if (major_collector.is_object_live ((char*)object))
continue;
/* Nursery says the object is dead. */
SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
/* insert it into the major hash */
- sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL);
+ sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
- DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), object));
+ SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
continue;
- } else {
+ } else if (copy != (char*)object) {
/* update pointer */
- DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", copy, safe_name (copy), object));
- SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
+ SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+ /* register for reinsertion */
+ sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
+
+ SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
+
+ continue;
}
} SGEN_HASH_TABLE_FOREACH_END;
+
+ while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
+ sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
+ }
+
+ sgen_pointer_queue_free (&moved_fin_objects);
}
/* LOCKING: requires that the GC lock is held */
-static void
-finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
+void
+sgen_finalize_in_range (int generation, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
MonoObject *object;
gpointer dummy;
+ SgenPointerQueue moved_fin_objects;
+
+ sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
if (no_finalize)
return;
SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
int tag = tagged_object_get_tag (object);
object = tagged_object_get_object (object);
- if ((char*)object >= start && (char*)object < end && !major_collector.is_object_live ((char*)object)) {
+ if (!major_collector.is_object_live ((char*)object)) {
gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
MonoObject *copy = object;
copy_func ((void**)©, queue);
/* remove and put in fin_ready_list */
SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
num_ready_finalizers++;
- queue_finalization_entry (copy);
+ sgen_queue_finalization_entry (copy);
/* Make it survive */
- DEBUG (5, fprintf (gc_debug_file, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", copy, safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table)));
+ SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy, sgen_safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
continue;
} else {
if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
/* insert it into the major hash */
- sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL);
+ sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
- DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), object));
+ SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
continue;
- } else {
+ } else if (copy != object) {
/* update pointer */
- DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", copy, safe_name (copy), object));
- SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
+ SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+
+ /* register for reinsertion */
+ sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
+
+ SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
+
+ continue;
}
}
}
} SGEN_HASH_TABLE_FOREACH_END;
+
+ while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
+ sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
+ }
+
+ sgen_pointer_queue_free (&moved_fin_objects);
}
/* LOCKING: requires that the GC lock is held */
g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
if (user_data) {
- if (sgen_hash_table_replace (hash_table, obj, NULL))
- DEBUG (5, fprintf (gc_debug_file, "Added finalizer for object: %p (%s) (%d) to %s table\n", obj, obj->vtable->klass->name, hash_table->num_entries, generation_name (generation)));
+ if (sgen_hash_table_replace (hash_table, obj, NULL, NULL))
+ SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, obj->vtable->klass->name, hash_table->num_entries, sgen_generation_name (generation));
} else {
if (sgen_hash_table_remove (hash_table, obj, NULL))
- DEBUG (5, fprintf (gc_debug_file, "Removed finalizer for object: %p (%s) (%d)\n", obj, obj->vtable->klass->name, hash_table->num_entries));
+ SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, obj->vtable->klass->name, hash_table->num_entries);
}
}
+/*
+ * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
+ * up registering them. Otherwise we'd have to take the GC lock.
+ *
+ * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
+ * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
+ * `process_stage_entries()` is called to drain it. A garbage collection will also drain
+ * the queues via the same function. That implies that `add_stage_entry()`, since it
+ * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
+ * though it doesn't have to make progress while the queue is drained. In fact, once it
+ * detects that the queue is being drained, it blocks until the draining is done.
+ *
+ * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
+ * entries for the same location might get switched, resulting in the earlier one being
+ * committed and the later one ignored.
+ *
+ * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
+ * currently being drained. Each entry has a state:
+ *
+ * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
+ *
+ * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
+ *
+ * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
+ * draining round.
+ *
+ * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
+ * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
+ * we can't leave them in place because new entries might be inserted before them, including
+ * from the same thread, violating causality. An alternative would be not to reset
+ * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
+ * but that can potentially waste the whole queue.
+ *
+ * State transitions:
+ *
+ * | from | to | filler? | drainer? |
+ * +---------+---------+---------+----------+
+ * | FREE | BUSY | X | |
+ * | BUSY | FREE | X | |
+ * | BUSY | USED | X | |
+ * | BUSY | INVALID | | X |
+ * | USED | FREE | | X |
+ * | INVALID | FREE | X | |
+ *
+ * `next_entry` can be incremented either by the filler thread that set the corresponding
+ * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
+ * other thread wasn't allowed to increment, it would block on the first filler thread.
+ *
+ * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
+ * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
+ * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
+ */
+
#define STAGE_ENTRY_FREE 0
#define STAGE_ENTRY_BUSY 1
#define STAGE_ENTRY_USED 2
+#define STAGE_ENTRY_INVALID 3
typedef struct {
- gint32 state;
+ volatile gint32 state;
MonoObject *obj;
void *user_data;
} StageEntry;
static volatile gint32 next_fin_stage_entry = 0;
static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
+/*
+ * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
+ * garbage collection. In that case, the world is already stopped and there's only one
+ * thread operating on the queue.
+ */
+static void
+lock_stage_for_processing (volatile gint32 *next_entry)
+{
+ *next_entry = -1;
+}
+
+/*
+ * When processing is triggered by an overflow, we don't want to take the GC lock
+ * immediately, and then set `next_index` to `-1`, because another thread might have drained
+ * the queue in the mean time. Instead, we make sure the overflow is still there, we
+ * atomically set `next_index`, and only once that happened do we take the GC lock.
+ */
+static gboolean
+try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
+{
+ gint32 old = *next_entry;
+ if (old < num_entries)
+ return FALSE;
+ return InterlockedCompareExchange (next_entry, -1, old) == old;
+}
+
/* LOCKING: requires that the GC lock is held */
static void
-process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*))
+process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*, int))
{
int i;
- int num_registered = 0;
- int num_busy = 0;
+
+ /*
+ * This can happen if after setting `next_index` to `-1` in
+ * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
+ * queue and reset `next_entry`.
+ *
+ * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
+ */
+ if (*next_entry != -1)
+ return;
for (i = 0; i < num_entries; ++i) {
- gint32 state = entries [i].state;
+ gint32 state;
- if (state == STAGE_ENTRY_BUSY)
- ++num_busy;
+ retry:
+ state = entries [i].state;
- if (state != STAGE_ENTRY_USED ||
- InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_USED) != STAGE_ENTRY_USED) {
+ switch (state) {
+ case STAGE_ENTRY_FREE:
+ case STAGE_ENTRY_INVALID:
+ continue;
+ case STAGE_ENTRY_BUSY:
+ /* BUSY -> INVALID */
+ /*
+ * This must be done atomically, because the filler thread can set
+ * the entry to `USED`, in which case we must process it, so we must
+ * detect that eventuality.
+ */
+ if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
+ goto retry;
continue;
+ case STAGE_ENTRY_USED:
+ break;
+ default:
+ SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
+ break;
}
- process_func (entries [i].obj, entries [i].user_data);
+ /* state is USED */
+
+ process_func (entries [i].obj, entries [i].user_data, i);
entries [i].obj = NULL;
entries [i].user_data = NULL;
mono_memory_write_barrier ();
+ /* USED -> FREE */
+ /*
+ * This transition only happens here, so we don't have to do it atomically.
+ */
entries [i].state = STAGE_ENTRY_FREE;
-
- ++num_registered;
}
- *next_entry = 0;
+ mono_memory_write_barrier ();
- /* g_print ("stage busy %d reg %d\n", num_busy, num_registered); */
+ *next_entry = 0;
}
-static gboolean
+#ifdef HEAVY_STATISTICS
+static guint64 stat_overflow_abort = 0;
+static guint64 stat_wait_for_processing = 0;
+static guint64 stat_increment_other_thread = 0;
+static guint64 stat_index_decremented = 0;
+static guint64 stat_entry_invalidated = 0;
+static guint64 stat_success = 0;
+#endif
+
+static int
add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
{
- gint32 index;
-
- do {
- do {
- index = *next_entry;
- if (index >= num_entries)
- return FALSE;
- } while (InterlockedCompareExchange (next_entry, index + 1, index) != index);
-
+ gint32 index, new_next_entry, old_next_entry;
+ gint32 previous_state;
+
+ retry:
+ for (;;) {
+ index = *next_entry;
+ if (index >= num_entries) {
+ HEAVY_STAT (++stat_overflow_abort);
+ return -1;
+ }
+ if (index < 0) {
+ /*
+ * Backed-off waiting is way more efficient than even using a
+ * dedicated lock for this.
+ */
+ while ((index = *next_entry) < 0) {
+ /*
+ * This seems like a good value. Determined by timing
+ * sgen-weakref-stress.exe.
+ */
+ g_usleep (200);
+ HEAVY_STAT (++stat_wait_for_processing);
+ }
+ continue;
+ }
+ /* FREE -> BUSY */
+ if (entries [index].state != STAGE_ENTRY_FREE ||
+ InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
+ /*
+ * If we can't get the entry it must be because another thread got
+ * it first. We don't want to wait for that thread to increment
+ * `next_entry`, so we try to do it ourselves. Whether we succeed
+ * or not, we start over.
+ */
+ if (*next_entry == index) {
+ InterlockedCompareExchange (next_entry, index + 1, index);
+ //g_print ("tried increment for other thread\n");
+ HEAVY_STAT (++stat_increment_other_thread);
+ }
+ continue;
+ }
+ /* state is BUSY now */
+ mono_memory_write_barrier ();
/*
- * We don't need a write barrier here. *next_entry is just a
- * help for finding an index, its value is irrelevant for
- * correctness.
+ * Incrementing `next_entry` must happen after setting the state to `BUSY`.
+ * If it were the other way around, it would be possible that after a filler
+ * incremented the index, other threads fill up the queue, the queue is
+ * drained, the original filler finally fills in the slot, but `next_entry`
+ * ends up at the start of the queue, and new entries are written in the
+ * queue in front of, not behind, the original filler's entry.
+ *
+ * We don't actually require that the CAS succeeds, but we do require that
+ * the value of `next_entry` is not lower than our index. Since the drainer
+ * sets it to `-1`, that also takes care of the case that the drainer is
+ * currently running.
*/
- } while (entries [index].state != STAGE_ENTRY_FREE ||
- InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE);
+ old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
+ if (old_next_entry < index) {
+ /* BUSY -> FREE */
+ /* INVALID -> FREE */
+ /*
+ * The state might still be `BUSY`, or the drainer could have set it
+ * to `INVALID`. In either case, there's no point in CASing. Set
+ * it to `FREE` and start over.
+ */
+ entries [index].state = STAGE_ENTRY_FREE;
+ HEAVY_STAT (++stat_index_decremented);
+ continue;
+ }
+ break;
+ }
+
+ SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
entries [index].obj = obj;
entries [index].user_data = user_data;
mono_memory_write_barrier ();
- entries [index].state = STAGE_ENTRY_USED;
+ new_next_entry = *next_entry;
+ mono_memory_read_barrier ();
+ /* BUSY -> USED */
+ /*
+ * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
+ * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
+ * latter case, we reset it to `FREE` and start over.
+ */
+ previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
+ if (previous_state == STAGE_ENTRY_BUSY) {
+ SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
+ HEAVY_STAT (++stat_success);
+ return index;
+ }
+
+ SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
+ entries [index].obj = NULL;
+ entries [index].user_data = NULL;
+ mono_memory_write_barrier ();
+ /* INVALID -> FREE */
+ entries [index].state = STAGE_ENTRY_FREE;
+
+ HEAVY_STAT (++stat_entry_invalidated);
- return TRUE;
+ goto retry;
}
/* LOCKING: requires that the GC lock is held */
static void
-process_fin_stage_entry (MonoObject *obj, void *user_data)
+process_fin_stage_entry (MonoObject *obj, void *user_data, int index)
{
if (ptr_in_nursery (obj))
register_for_finalization (obj, user_data, GENERATION_NURSERY);
}
/* LOCKING: requires that the GC lock is held */
-static void
-process_fin_stage_entries (void)
+void
+sgen_process_fin_stage_entries (void)
{
+ lock_stage_for_processing (&next_fin_stage_entry);
process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
}
void
mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
{
- while (!add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data)) {
- LOCK_GC;
- process_fin_stage_entries ();
- UNLOCK_GC;
+ while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
+ if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
+ LOCK_GC;
+ process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
+ UNLOCK_GC;
+ }
}
}
/* remove and put in out_array */
SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
out_array [count ++] = object;
- DEBUG (5, fprintf (gc_debug_file, "Collecting object for finalization: %p (%s) (%d/%d)\n", object, safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table)));
+ SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object, sgen_safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
if (count == out_size)
return count;
continue;
int result;
LOCK_GC;
- process_fin_stage_entries ();
+ sgen_process_fin_stage_entries ();
result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
if (result < out_size) {
result += finalizers_for_domain (domain, out_array + result, out_size - result,
if (!obj) {
if (sgen_hash_table_remove (hash_table, link, NULL)) {
- DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d) from %s table\n",
- link, hash_table->num_entries, generation_name (generation)));
+ SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
+ link, hash_table->num_entries, sgen_generation_name (generation));
}
return;
}
- sgen_hash_table_replace (hash_table, link, NULL);
- DEBUG (5, fprintf (gc_debug_file, "Added dislink for object: %p (%s) at %p to %s table\n",
- obj, obj->vtable->klass->name, link, generation_name (generation)));
+ sgen_hash_table_replace (hash_table, link, NULL, NULL);
+ SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
+ obj, obj->vtable->klass->name, link, sgen_generation_name (generation));
}
/* LOCKING: requires that the GC lock is held */
-static void
-null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, GrayQueue *queue)
+void
+sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
void **link;
gpointer dummy;
SgenHashTable *hash = get_dislink_hash_table (generation);
SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
char *object;
- gboolean track = DISLINK_TRACK (link);
+ gboolean track;
+
+ /*
+ We null a weak link before unregistering it, so it's possible that a thread is
+ suspended right in between setting the content to null and staging the unregister.
+
+ The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
+
+ We should simply skip the entry as the staged removal will take place during the next GC.
+ */
+ if (!*link) {
+ SGEN_LOG (5, "Dislink %p was externally nullified", link);
+ continue;
+ }
+ track = DISLINK_TRACK (link);
/*
* Tracked references are processed after
* finalization handling whereas standard weak
*/
if (track != before_finalization) {
object = DISLINK_OBJECT (link);
+ /*
+ We should guard against a null object been hidden. This can sometimes happen.
+ */
+ if (!object) {
+ SGEN_LOG (5, "Dislink %p with a hidden null object", link);
+ continue;
+ }
- if (object >= start && object < end && !major_collector.is_object_live (object)) {
+ if (!major_collector.is_object_live (object)) {
if (sgen_gc_is_object_ready_for_finalization (object)) {
*link = NULL;
- DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", link, object));
+ binary_protocol_dislink_update (link, NULL, 0, 0);
+ SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
continue;
} else {
g_assert (copy);
*link = HIDE_POINTER (copy, track);
add_or_remove_disappearing_link ((MonoObject*)copy, link, GENERATION_OLD);
+ binary_protocol_dislink_update (link, copy, track, 0);
- DEBUG (5, fprintf (gc_debug_file, "Upgraded dislink at %p to major because object %p moved to %p\n", link, object, copy));
+ SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
continue;
} else {
*link = HIDE_POINTER (copy, track);
- DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", link, DISLINK_OBJECT (link)));
+ binary_protocol_dislink_update (link, copy, track, 0);
+ SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
}
}
}
}
/* LOCKING: requires that the GC lock is held */
-static void
-null_links_for_domain (MonoDomain *domain, int generation)
+void
+sgen_null_links_for_domain (MonoDomain *domain, int generation)
{
void **link;
gpointer dummy;
SgenHashTable *hash = get_dislink_hash_table (generation);
SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
char *object = DISLINK_OBJECT (link);
- if (object && !((MonoObject*)object)->vtable) {
- gboolean free = TRUE;
- if (*link) {
- *link = NULL;
- free = FALSE;
- /*
- * This can happen if finalizers are not ran, i.e. Environment.Exit ()
- * is called from finalizer like in finalizer-abort.cs.
- */
- DEBUG (5, fprintf (gc_debug_file, "Disappearing link %p not freed", link));
- }
+ if (object)
+ SGEN_ASSERT (0, ((MonoObject*)object)->vtable, "Can't have objects without vtables.");
+
+ if (*link && object && ((MonoObject*)object)->vtable->domain == domain) {
+ *link = NULL;
+ binary_protocol_dislink_update (link, NULL, 0, 0);
+ /*
+ * This can happen if finalizers are not ran, i.e. Environment.Exit ()
+ * is called from finalizer like in finalizer-abort.cs.
+ */
+ SGEN_LOG (5, "Disappearing link %p not freed", link);
- SGEN_HASH_TABLE_FOREACH_REMOVE (free);
+ /*
+ * FIXME: Why don't we free the entry here?
+ */
+ SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE);
continue;
}
} SGEN_HASH_TABLE_FOREACH_END;
}
-static void
-remove_finalizers_for_domain (MonoDomain *domain, int generation)
+/* LOCKING: requires that the GC lock is held */
+void
+sgen_null_links_with_predicate (int generation, WeakLinkAlivePredicateFunc predicate, void *data)
+{
+ void **link;
+ gpointer dummy;
+ SgenHashTable *hash = get_dislink_hash_table (generation);
+ SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
+ char *object = DISLINK_OBJECT (link);
+ mono_bool is_alive;
+
+ if (!*link)
+ continue;
+ is_alive = predicate ((MonoObject*)object, data);
+
+ if (!is_alive) {
+ *link = NULL;
+ binary_protocol_dislink_update (link, NULL, 0, 0);
+ SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
+ SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
+ continue;
+ }
+ } SGEN_HASH_TABLE_FOREACH_END;
+}
+
+void
+sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation)
{
SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
MonoObject *object;
gpointer dummy;
SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
- int tag = tagged_object_get_tag (object);
object = tagged_object_get_object (object);
if (mono_object_domain (object) == domain) {
- DEBUG (5, fprintf (gc_debug_file, "Unregistering finalizer for object: %p (%s)\n", object, safe_name (object)));
+ SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object, sgen_safe_name (object));
- SGEN_HASH_TABLE_FOREACH_REMOVE (free);
+ SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
continue;
}
} SGEN_HASH_TABLE_FOREACH_END;
/* LOCKING: requires that the GC lock is held */
static void
-process_dislink_stage_entry (MonoObject *obj, void *_link)
+process_dislink_stage_entry (MonoObject *obj, void *_link, int index)
{
void **link = _link;
+ if (index >= 0)
+ binary_protocol_dislink_process_staged (link, obj, index);
+
add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
if (obj) {
static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
/* LOCKING: requires that the GC lock is held */
-static void
-process_dislink_stage_entries (void)
+void
+sgen_process_dislink_stage_entries (void)
{
+ lock_stage_for_processing (&next_dislink_stage_entry);
process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
}
-static void
-mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
+void
+sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
{
+
+#ifdef ENABLE_DTRACE
+ if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
+ MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
+ MONO_GC_WEAK_UPDATE ((mword)link,
+ *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
+ (mword)obj,
+ obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
+ obj ? vt->klass->name_space : NULL,
+ obj ? vt->klass->name : NULL,
+ track ? 1 : 0);
+ }
+#endif
+
if (obj)
*link = HIDE_POINTER (obj, track);
else
#if 1
if (in_gc) {
- process_dislink_stage_entry (obj, link);
+ binary_protocol_dislink_update (link, obj, track, 0);
+ process_dislink_stage_entry (obj, link, -1);
} else {
- while (!add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) {
- LOCK_GC;
- process_dislink_stage_entries ();
- UNLOCK_GC;
+ int index;
+ binary_protocol_dislink_update (link, obj, track, 1);
+ while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
+ if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
+ LOCK_GC;
+ process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
+ UNLOCK_GC;
+ }
}
+ binary_protocol_dislink_update_staged (link, obj, track, index);
}
#else
if (!in_gc)
LOCK_GC;
- process_dislink_stage_entry (obj, link);
+ binary_protocol_dislink_update (link, obj, track, 0);
+ process_dislink_stage_entry (obj, link, -1);
if (!in_gc)
UNLOCK_GC;
#endif
}
+
+void
+sgen_init_fin_weak_hash (void)
+{
+#ifdef HEAVY_STATISTICS
+ mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
+ mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
+ mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
+ mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
+ mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
+ mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
+#endif
+}
+
+#endif /* HAVE_SGEN_GC */