2 * sgen-fin-weak-hash.c: Finalizers and weak links.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include "mono/sgen/sgen-gc.h"
31 #include "mono/sgen/sgen-gray.h"
32 #include "mono/sgen/sgen-protocol.h"
33 #include "mono/sgen/sgen-pointer-queue.h"
34 #include "mono/sgen/sgen-client.h"
35 #include "mono/utils/mono-membar.h"
37 #define ptr_in_nursery sgen_ptr_in_nursery
39 typedef SgenGrayQueue GrayQueue;
41 static int no_finalize = 0;
43 #define DISLINK_OBJECT(l) (REVEAL_POINTER (*(void**)(l)))
44 #define DISLINK_TRACK(l) ((~(size_t)(*(void**)(l))) & 1)
47 * The finalizable hash has the object as the key, the
48 * disappearing_link hash, has the link address as key.
50 * Copyright 2011 Xamarin Inc.
53 #define TAG_MASK ((mword)0x1)
55 static inline GCObject*
56 tagged_object_get_object (GCObject *object)
58 return (GCObject*)(((mword)object) & ~TAG_MASK);
62 tagged_object_get_tag (GCObject *object)
64 return ((mword)object) & TAG_MASK;
67 static inline GCObject*
68 tagged_object_apply (void *object, int tag_bits)
70 return (GCObject*)((mword)object | (mword)tag_bits);
74 tagged_object_hash (GCObject *o)
76 return sgen_aligned_addr_hash (tagged_object_get_object (o));
80 tagged_object_equals (GCObject *a, GCObject *b)
82 return tagged_object_get_object (a) == tagged_object_get_object (b);
85 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
86 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
89 get_finalize_entry_hash_table (int generation)
92 case GENERATION_NURSERY: return &minor_finalizable_hash;
93 case GENERATION_OLD: return &major_finalizable_hash;
94 default: g_assert_not_reached ();
98 #define BRIDGE_OBJECT_MARKED 0x1
100 /* LOCKING: requires that the GC lock is held */
102 sgen_mark_bridge_object (GCObject *obj)
104 SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
106 sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
109 /* LOCKING: requires that the GC lock is held */
111 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
113 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
114 GrayQueue *queue = ctx.queue;
115 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
117 gpointer dummy G_GNUC_UNUSED;
119 SgenPointerQueue moved_fin_objects;
121 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
126 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
127 int tag = tagged_object_get_tag (object);
128 object = tagged_object_get_object (object);
130 /* Bridge code told us to ignore this one */
131 if (tag == BRIDGE_OBJECT_MARKED)
134 /* Object is a bridge object and major heap says it's dead */
135 if (major_collector.is_object_live (object))
138 /* Nursery says the object is dead. */
139 if (!sgen_gc_is_object_ready_for_finalization (object))
142 if (!sgen_client_bridge_is_bridge_object (object))
146 copy_func (©, queue);
148 sgen_client_bridge_register_finalized_object (copy);
150 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
151 /* remove from the list */
152 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
154 /* insert it into the major hash */
155 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
157 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
160 } else if (copy != object) {
162 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
164 /* register for reinsertion */
165 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
167 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
171 } SGEN_HASH_TABLE_FOREACH_END;
173 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
174 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
177 sgen_pointer_queue_free (&moved_fin_objects);
181 /* LOCKING: requires that the GC lock is held */
183 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
185 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
186 GrayQueue *queue = ctx.queue;
187 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
189 gpointer dummy G_GNUC_UNUSED;
190 SgenPointerQueue moved_fin_objects;
192 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
196 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
197 int tag = tagged_object_get_tag (object);
198 object = tagged_object_get_object (object);
199 if (!major_collector.is_object_live (object)) {
200 gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
201 GCObject *copy = object;
202 copy_func (©, queue);
204 /* remove and put in fin_ready_list */
205 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
206 sgen_queue_finalization_entry (copy);
207 /* Make it survive */
208 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
211 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
212 /* remove from the list */
213 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
215 /* insert it into the major hash */
216 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
218 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
221 } else if (copy != object) {
223 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
225 /* register for reinsertion */
226 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
228 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
234 } SGEN_HASH_TABLE_FOREACH_END;
236 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
237 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
240 sgen_pointer_queue_free (&moved_fin_objects);
243 /* LOCKING: requires that the GC lock is held */
245 register_for_finalization (GCObject *obj, void *user_data, int generation)
247 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
253 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
254 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
255 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
258 if (sgen_hash_table_remove (hash_table, obj, NULL)) {
259 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
260 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
266 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
267 * up registering them. Otherwise we'd have to take the GC lock.
269 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
270 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
271 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
272 * the queues via the same function. That implies that `add_stage_entry()`, since it
273 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
274 * though it doesn't have to make progress while the queue is drained. In fact, once it
275 * detects that the queue is being drained, it blocks until the draining is done.
277 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
278 * entries for the same location might get switched, resulting in the earlier one being
279 * committed and the later one ignored.
281 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
282 * currently being drained. Each entry has a state:
284 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
286 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
288 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
291 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
292 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
293 * we can't leave them in place because new entries might be inserted before them, including
294 * from the same thread, violating causality. An alternative would be not to reset
295 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
296 * but that can potentially waste the whole queue.
300 * | from | to | filler? | drainer? |
301 * +---------+---------+---------+----------+
302 * | FREE | BUSY | X | |
303 * | BUSY | FREE | X | |
304 * | BUSY | USED | X | |
305 * | BUSY | INVALID | | X |
306 * | USED | FREE | | X |
307 * | INVALID | FREE | X | |
309 * `next_entry` can be incremented either by the filler thread that set the corresponding
310 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
311 * other thread wasn't allowed to increment, it would block on the first filler thread.
313 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
314 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
315 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
318 #define STAGE_ENTRY_FREE 0
319 #define STAGE_ENTRY_BUSY 1
320 #define STAGE_ENTRY_USED 2
321 #define STAGE_ENTRY_INVALID 3
324 volatile gint32 state;
329 #define NUM_FIN_STAGE_ENTRIES 1024
331 static volatile gint32 next_fin_stage_entry = 0;
332 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
335 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
336 * garbage collection. In that case, the world is already stopped and there's only one
337 * thread operating on the queue.
340 lock_stage_for_processing (volatile gint32 *next_entry)
346 * When processing is triggered by an overflow, we don't want to take the GC lock
347 * immediately, and then set `next_index` to `-1`, because another thread might have drained
348 * the queue in the mean time. Instead, we make sure the overflow is still there, we
349 * atomically set `next_index`, and only once that happened do we take the GC lock.
352 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
354 gint32 old = *next_entry;
355 if (old < num_entries)
357 return InterlockedCompareExchange (next_entry, -1, old) == old;
360 /* LOCKING: requires that the GC lock is held */
362 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
367 * This can happen if after setting `next_index` to `-1` in
368 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
369 * queue and reset `next_entry`.
371 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
373 if (*next_entry != -1)
376 for (i = 0; i < num_entries; ++i) {
380 state = entries [i].state;
383 case STAGE_ENTRY_FREE:
384 case STAGE_ENTRY_INVALID:
386 case STAGE_ENTRY_BUSY:
387 /* BUSY -> INVALID */
389 * This must be done atomically, because the filler thread can set
390 * the entry to `USED`, in which case we must process it, so we must
391 * detect that eventuality.
393 if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
396 case STAGE_ENTRY_USED:
399 SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
405 process_func (entries [i].obj, entries [i].user_data, i);
407 entries [i].obj = NULL;
408 entries [i].user_data = NULL;
410 mono_memory_write_barrier ();
414 * This transition only happens here, so we don't have to do it atomically.
416 entries [i].state = STAGE_ENTRY_FREE;
419 mono_memory_write_barrier ();
424 #ifdef HEAVY_STATISTICS
425 static guint64 stat_overflow_abort = 0;
426 static guint64 stat_wait_for_processing = 0;
427 static guint64 stat_increment_other_thread = 0;
428 static guint64 stat_index_decremented = 0;
429 static guint64 stat_entry_invalidated = 0;
430 static guint64 stat_success = 0;
434 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
436 gint32 index, new_next_entry, old_next_entry;
437 gint32 previous_state;
442 if (index >= num_entries) {
443 HEAVY_STAT (++stat_overflow_abort);
448 * Backed-off waiting is way more efficient than even using a
449 * dedicated lock for this.
451 while ((index = *next_entry) < 0) {
453 * This seems like a good value. Determined by timing
454 * sgen-weakref-stress.exe.
457 HEAVY_STAT (++stat_wait_for_processing);
462 if (entries [index].state != STAGE_ENTRY_FREE ||
463 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
465 * If we can't get the entry it must be because another thread got
466 * it first. We don't want to wait for that thread to increment
467 * `next_entry`, so we try to do it ourselves. Whether we succeed
468 * or not, we start over.
470 if (*next_entry == index) {
471 InterlockedCompareExchange (next_entry, index + 1, index);
472 //g_print ("tried increment for other thread\n");
473 HEAVY_STAT (++stat_increment_other_thread);
477 /* state is BUSY now */
478 mono_memory_write_barrier ();
480 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
481 * If it were the other way around, it would be possible that after a filler
482 * incremented the index, other threads fill up the queue, the queue is
483 * drained, the original filler finally fills in the slot, but `next_entry`
484 * ends up at the start of the queue, and new entries are written in the
485 * queue in front of, not behind, the original filler's entry.
487 * We don't actually require that the CAS succeeds, but we do require that
488 * the value of `next_entry` is not lower than our index. Since the drainer
489 * sets it to `-1`, that also takes care of the case that the drainer is
492 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
493 if (old_next_entry < index) {
495 /* INVALID -> FREE */
497 * The state might still be `BUSY`, or the drainer could have set it
498 * to `INVALID`. In either case, there's no point in CASing. Set
499 * it to `FREE` and start over.
501 entries [index].state = STAGE_ENTRY_FREE;
502 HEAVY_STAT (++stat_index_decremented);
508 SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
510 entries [index].obj = obj;
511 entries [index].user_data = user_data;
513 mono_memory_write_barrier ();
515 new_next_entry = *next_entry;
516 mono_memory_read_barrier ();
519 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
520 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
521 * latter case, we reset it to `FREE` and start over.
523 previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
524 if (previous_state == STAGE_ENTRY_BUSY) {
525 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
526 HEAVY_STAT (++stat_success);
530 SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
531 entries [index].obj = NULL;
532 entries [index].user_data = NULL;
533 mono_memory_write_barrier ();
534 /* INVALID -> FREE */
535 entries [index].state = STAGE_ENTRY_FREE;
537 HEAVY_STAT (++stat_entry_invalidated);
542 /* LOCKING: requires that the GC lock is held */
544 process_fin_stage_entry (GCObject *obj, void *user_data, int index)
546 if (ptr_in_nursery (obj))
547 register_for_finalization (obj, user_data, GENERATION_NURSERY);
549 register_for_finalization (obj, user_data, GENERATION_OLD);
552 /* LOCKING: requires that the GC lock is held */
554 sgen_process_fin_stage_entries (void)
556 lock_stage_for_processing (&next_fin_stage_entry);
557 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
561 sgen_object_register_for_finalization (GCObject *obj, void *user_data)
563 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
564 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
566 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
572 /* LOCKING: requires that the GC lock is held */
574 finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
577 gpointer dummy G_GNUC_UNUSED;
580 if (no_finalize || !out_size || !out_array)
583 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
584 object = tagged_object_get_object (object);
586 if (predicate (object, user_data)) {
587 /* remove and put in out_array */
588 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
589 out_array [count ++] = object;
590 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
591 if (count == out_size)
595 } SGEN_HASH_TABLE_FOREACH_END;
600 * sgen_gather_finalizers_if:
601 * @predicate: predicate function
602 * @user_data: predicate function data argument
603 * @out_array: output array
604 * @out_size: size of output array
606 * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
607 * of stored items. Can be called repeteadly until it returns 0.
609 * The items are removed from the finalizer data structure, so the caller is supposed
612 * @out_array me be on the stack, or registered as a root, to allow the GC to know the
613 * objects are still alive.
616 sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
621 sgen_process_fin_stage_entries ();
622 result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
623 if (result < out_size) {
624 result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
625 &major_finalizable_hash);
632 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
633 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
635 static SgenHashTable*
636 get_dislink_hash_table (int generation)
638 switch (generation) {
639 case GENERATION_NURSERY: return &minor_disappearing_link_hash;
640 case GENERATION_OLD: return &major_disappearing_link_hash;
641 default: g_assert_not_reached ();
645 /* LOCKING: assumes the GC lock is held */
647 add_or_remove_disappearing_link (GCObject *obj, void **link, int generation)
649 SgenHashTable *hash_table = get_dislink_hash_table (generation);
652 if (sgen_hash_table_remove (hash_table, link, NULL)) {
653 SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
654 link, hash_table->num_entries, sgen_generation_name (generation));
659 sgen_hash_table_replace (hash_table, link, NULL, NULL);
660 SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
661 obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE_UNCHECKED (obj)), link, sgen_generation_name (generation));
664 /* LOCKING: requires that the GC lock is held */
666 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
668 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
669 GrayQueue *queue = ctx.queue;
671 gpointer dummy G_GNUC_UNUSED;
672 SgenHashTable *hash = get_dislink_hash_table (generation);
674 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
679 We null a weak link before unregistering it, so it's possible that a thread is
680 suspended right in between setting the content to null and staging the unregister.
682 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
684 We should simply skip the entry as the staged removal will take place during the next GC.
687 SGEN_LOG (5, "Dislink %p was externally nullified", link);
691 track = DISLINK_TRACK (link);
693 * Tracked references are processed after
694 * finalization handling whereas standard weak
695 * references are processed before. If an
696 * object is still not marked after finalization
697 * handling it means that it either doesn't have
698 * a finalizer or the finalizer has already run,
699 * so we must null a tracking reference.
701 if (track != before_finalization) {
702 object = DISLINK_OBJECT (link);
704 We should guard against a null object been hidden. This can sometimes happen.
707 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
711 if (!major_collector.is_object_live (object)) {
712 if (sgen_gc_is_object_ready_for_finalization (object)) {
714 binary_protocol_dislink_update (link, NULL, 0, 0);
715 SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
716 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
719 GCObject *copy = object;
720 copy_func (©, queue);
722 /* Update pointer if it's moved. If the object
723 * has been moved out of the nursery, we need to
724 * remove the link from the minor hash table to
727 * FIXME: what if an object is moved earlier?
730 if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
731 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
734 *link = HIDE_POINTER (copy, track);
735 add_or_remove_disappearing_link (copy, link, GENERATION_OLD);
736 binary_protocol_dislink_update (link, copy, track, 0);
738 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
742 *link = HIDE_POINTER (copy, track);
743 binary_protocol_dislink_update (link, copy, track, 0);
744 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
749 } SGEN_HASH_TABLE_FOREACH_END;
752 /* LOCKING: requires that the GC lock is held */
754 sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation)
757 gpointer dummy G_GNUC_UNUSED;
758 SgenHashTable *hash = get_dislink_hash_table (generation);
759 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
760 char *object = DISLINK_OBJECT (link);
765 if (predicate ((GCObject*)object, data)) {
767 binary_protocol_dislink_update (link, NULL, 0, 0);
768 SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
769 SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE /* TRUE */);
772 } SGEN_HASH_TABLE_FOREACH_END;
776 sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
778 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
780 gpointer dummy G_GNUC_UNUSED;
782 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
783 object = tagged_object_get_object (object);
785 if (predicate (object, user_data)) {
786 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
789 } SGEN_HASH_TABLE_FOREACH_END;
792 /* LOCKING: requires that the GC lock is held */
794 process_dislink_stage_entry (GCObject *obj, void *_link, int index)
799 binary_protocol_dislink_process_staged (link, obj, index);
801 add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
802 add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
804 if (ptr_in_nursery (obj))
805 add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
807 add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
811 #define NUM_DISLINK_STAGE_ENTRIES 1024
813 static volatile gint32 next_dislink_stage_entry = 0;
814 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
816 /* LOCKING: requires that the GC lock is held */
818 sgen_process_dislink_stage_entries (void)
820 lock_stage_for_processing (&next_dislink_stage_entry);
821 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
825 sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc)
828 *link = HIDE_POINTER (obj, track);
834 binary_protocol_dislink_update (link, obj, track, 0);
835 process_dislink_stage_entry (obj, link, -1);
838 binary_protocol_dislink_update (link, obj, track, 1);
839 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
840 if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
842 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
846 binary_protocol_dislink_update_staged (link, obj, track, index);
851 binary_protocol_dislink_update (link, obj, track, 0);
852 process_dislink_stage_entry (obj, link, -1);
859 sgen_init_fin_weak_hash (void)
861 #ifdef HEAVY_STATISTICS
862 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
863 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
864 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
865 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
866 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
867 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
871 #endif /* HAVE_SGEN_GC */