2 * sgen-fin-weak-hash.c: Finalizers and weak links.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include "metadata/sgen-gc.h"
31 #include "metadata/sgen-gray.h"
32 #include "metadata/sgen-protocol.h"
33 #include "metadata/sgen-pointer-queue.h"
34 #include "utils/dtrace.h"
35 #include "utils/mono-counters.h"
37 #define ptr_in_nursery sgen_ptr_in_nursery
39 typedef SgenGrayQueue GrayQueue;
41 int num_ready_finalizers = 0;
42 static int no_finalize = 0;
44 #define DISLINK_OBJECT(l) (REVEAL_POINTER (*(void**)(l)))
45 #define DISLINK_TRACK(l) ((~(size_t)(*(void**)(l))) & 1)
48 * The finalizable hash has the object as the key, the
49 * disappearing_link hash, has the link address as key.
51 * Copyright 2011 Xamarin Inc.
54 #define TAG_MASK ((mword)0x1)
56 static inline MonoObject*
57 tagged_object_get_object (MonoObject *object)
59 return (MonoObject*)(((mword)object) & ~TAG_MASK);
63 tagged_object_get_tag (MonoObject *object)
65 return ((mword)object) & TAG_MASK;
68 static inline MonoObject*
69 tagged_object_apply (void *object, int tag_bits)
71 return (MonoObject*)((mword)object | (mword)tag_bits);
75 tagged_object_hash (MonoObject *o)
77 return mono_aligned_addr_hash (tagged_object_get_object (o));
81 tagged_object_equals (MonoObject *a, MonoObject *b)
83 return tagged_object_get_object (a) == tagged_object_get_object (b);
86 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
87 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
90 get_finalize_entry_hash_table (int generation)
93 case GENERATION_NURSERY: return &minor_finalizable_hash;
94 case GENERATION_OLD: return &major_finalizable_hash;
95 default: g_assert_not_reached ();
99 #define BRIDGE_OBJECT_MARKED 0x1
101 /* LOCKING: requires that the GC lock is held */
103 sgen_mark_bridge_object (MonoObject *obj)
105 SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
107 sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
110 /* LOCKING: requires that the GC lock is held */
112 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
114 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
115 GrayQueue *queue = ctx.queue;
116 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
118 gpointer dummy G_GNUC_UNUSED;
120 SgenPointerQueue moved_fin_objects;
122 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
127 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
128 int tag = tagged_object_get_tag (object);
129 object = tagged_object_get_object (object);
131 /* Bridge code told us to ignore this one */
132 if (tag == BRIDGE_OBJECT_MARKED)
135 /* Object is a bridge object and major heap says it's dead */
136 if (major_collector.is_object_live ((char*)object))
139 /* Nursery says the object is dead. */
140 if (!sgen_gc_is_object_ready_for_finalization (object))
143 if (!sgen_is_bridge_object (object))
146 copy = (char*)object;
147 copy_func ((void**)©, queue);
149 sgen_bridge_register_finalized_object ((MonoObject*)copy);
151 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
152 /* remove from the list */
153 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
155 /* insert it into the major hash */
156 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
158 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
161 } else if (copy != (char*)object) {
163 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
165 /* register for reinsertion */
166 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
168 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
172 } SGEN_HASH_TABLE_FOREACH_END;
174 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
175 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
178 sgen_pointer_queue_free (&moved_fin_objects);
182 /* LOCKING: requires that the GC lock is held */
184 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
186 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
187 GrayQueue *queue = ctx.queue;
188 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
190 gpointer dummy G_GNUC_UNUSED;
191 SgenPointerQueue moved_fin_objects;
193 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
197 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
198 int tag = tagged_object_get_tag (object);
199 object = tagged_object_get_object (object);
200 if (!major_collector.is_object_live ((char*)object)) {
201 gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
202 MonoObject *copy = object;
203 copy_func ((void**)©, queue);
205 /* remove and put in fin_ready_list */
206 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
207 num_ready_finalizers++;
208 sgen_queue_finalization_entry (copy);
209 /* Make it survive */
210 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy, sgen_safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
213 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
214 /* remove from the list */
215 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
217 /* insert it into the major hash */
218 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
220 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
223 } else if (copy != object) {
225 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
227 /* register for reinsertion */
228 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
230 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
236 } SGEN_HASH_TABLE_FOREACH_END;
238 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
239 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
242 sgen_pointer_queue_free (&moved_fin_objects);
245 /* LOCKING: requires that the GC lock is held */
247 register_for_finalization (MonoObject *obj, void *user_data, int generation)
249 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
254 g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
257 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL))
258 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, obj->vtable->klass->name, hash_table->num_entries, sgen_generation_name (generation));
260 if (sgen_hash_table_remove (hash_table, obj, NULL))
261 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, obj->vtable->klass->name, hash_table->num_entries);
266 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
267 * up registering them. Otherwise we'd have to take the GC lock.
269 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
270 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
271 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
272 * the queues via the same function. That implies that `add_stage_entry()`, since it
273 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
274 * though it doesn't have to make progress while the queue is drained. In fact, once it
275 * detects that the queue is being drained, it blocks until the draining is done.
277 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
278 * entries for the same location might get switched, resulting in the earlier one being
279 * committed and the later one ignored.
281 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
282 * currently being drained. Each entry has a state:
284 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
286 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
288 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
291 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
292 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
293 * we can't leave them in place because new entries might be inserted before them, including
294 * from the same thread, violating causality. An alternative would be not to reset
295 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
296 * but that can potentially waste the whole queue.
300 * | from | to | filler? | drainer? |
301 * +---------+---------+---------+----------+
302 * | FREE | BUSY | X | |
303 * | BUSY | FREE | X | |
304 * | BUSY | USED | X | |
305 * | BUSY | INVALID | | X |
306 * | USED | FREE | | X |
307 * | INVALID | FREE | X | |
309 * `next_entry` can be incremented either by the filler thread that set the corresponding
310 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
311 * other thread wasn't allowed to increment, it would block on the first filler thread.
313 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
314 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
315 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
318 #define STAGE_ENTRY_FREE 0
319 #define STAGE_ENTRY_BUSY 1
320 #define STAGE_ENTRY_USED 2
321 #define STAGE_ENTRY_INVALID 3
324 volatile gint32 state;
329 #define NUM_FIN_STAGE_ENTRIES 1024
331 static volatile gint32 next_fin_stage_entry = 0;
332 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
335 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
336 * garbage collection. In that case, the world is already stopped and there's only one
337 * thread operating on the queue.
340 lock_stage_for_processing (volatile gint32 *next_entry)
346 * When processing is triggered by an overflow, we don't want to take the GC lock
347 * immediately, and then set `next_index` to `-1`, because another thread might have drained
348 * the queue in the mean time. Instead, we make sure the overflow is still there, we
349 * atomically set `next_index`, and only once that happened do we take the GC lock.
352 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
354 gint32 old = *next_entry;
355 if (old < num_entries)
357 return InterlockedCompareExchange (next_entry, -1, old) == old;
360 /* LOCKING: requires that the GC lock is held */
362 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*, int))
367 * This can happen if after setting `next_index` to `-1` in
368 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
369 * queue and reset `next_entry`.
371 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
373 if (*next_entry != -1)
376 for (i = 0; i < num_entries; ++i) {
380 state = entries [i].state;
383 case STAGE_ENTRY_FREE:
384 case STAGE_ENTRY_INVALID:
386 case STAGE_ENTRY_BUSY:
387 /* BUSY -> INVALID */
389 * This must be done atomically, because the filler thread can set
390 * the entry to `USED`, in which case we must process it, so we must
391 * detect that eventuality.
393 if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
396 case STAGE_ENTRY_USED:
399 SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
405 process_func (entries [i].obj, entries [i].user_data, i);
407 entries [i].obj = NULL;
408 entries [i].user_data = NULL;
410 mono_memory_write_barrier ();
414 * This transition only happens here, so we don't have to do it atomically.
416 entries [i].state = STAGE_ENTRY_FREE;
419 mono_memory_write_barrier ();
424 #ifdef HEAVY_STATISTICS
425 static guint64 stat_overflow_abort = 0;
426 static guint64 stat_wait_for_processing = 0;
427 static guint64 stat_increment_other_thread = 0;
428 static guint64 stat_index_decremented = 0;
429 static guint64 stat_entry_invalidated = 0;
430 static guint64 stat_success = 0;
434 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
436 gint32 index, new_next_entry, old_next_entry;
437 gint32 previous_state;
442 if (index >= num_entries) {
443 HEAVY_STAT (++stat_overflow_abort);
448 * Backed-off waiting is way more efficient than even using a
449 * dedicated lock for this.
451 while ((index = *next_entry) < 0) {
453 * This seems like a good value. Determined by timing
454 * sgen-weakref-stress.exe.
457 HEAVY_STAT (++stat_wait_for_processing);
462 if (entries [index].state != STAGE_ENTRY_FREE ||
463 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
465 * If we can't get the entry it must be because another thread got
466 * it first. We don't want to wait for that thread to increment
467 * `next_entry`, so we try to do it ourselves. Whether we succeed
468 * or not, we start over.
470 if (*next_entry == index) {
471 InterlockedCompareExchange (next_entry, index + 1, index);
472 //g_print ("tried increment for other thread\n");
473 HEAVY_STAT (++stat_increment_other_thread);
477 /* state is BUSY now */
478 mono_memory_write_barrier ();
480 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
481 * If it were the other way around, it would be possible that after a filler
482 * incremented the index, other threads fill up the queue, the queue is
483 * drained, the original filler finally fills in the slot, but `next_entry`
484 * ends up at the start of the queue, and new entries are written in the
485 * queue in front of, not behind, the original filler's entry.
487 * We don't actually require that the CAS succeeds, but we do require that
488 * the value of `next_entry` is not lower than our index. Since the drainer
489 * sets it to `-1`, that also takes care of the case that the drainer is
492 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
493 if (old_next_entry < index) {
495 /* INVALID -> FREE */
497 * The state might still be `BUSY`, or the drainer could have set it
498 * to `INVALID`. In either case, there's no point in CASing. Set
499 * it to `FREE` and start over.
501 entries [index].state = STAGE_ENTRY_FREE;
502 HEAVY_STAT (++stat_index_decremented);
508 SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
510 entries [index].obj = obj;
511 entries [index].user_data = user_data;
513 mono_memory_write_barrier ();
515 new_next_entry = *next_entry;
516 mono_memory_read_barrier ();
519 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
520 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
521 * latter case, we reset it to `FREE` and start over.
523 previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
524 if (previous_state == STAGE_ENTRY_BUSY) {
525 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
526 HEAVY_STAT (++stat_success);
530 SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
531 entries [index].obj = NULL;
532 entries [index].user_data = NULL;
533 mono_memory_write_barrier ();
534 /* INVALID -> FREE */
535 entries [index].state = STAGE_ENTRY_FREE;
537 HEAVY_STAT (++stat_entry_invalidated);
542 /* LOCKING: requires that the GC lock is held */
544 process_fin_stage_entry (MonoObject *obj, void *user_data, int index)
546 if (ptr_in_nursery (obj))
547 register_for_finalization (obj, user_data, GENERATION_NURSERY);
549 register_for_finalization (obj, user_data, GENERATION_OLD);
552 /* LOCKING: requires that the GC lock is held */
554 sgen_process_fin_stage_entries (void)
556 lock_stage_for_processing (&next_fin_stage_entry);
557 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
561 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
563 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
564 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
566 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
572 /* LOCKING: requires that the GC lock is held */
574 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
575 SgenHashTable *hash_table)
578 gpointer dummy G_GNUC_UNUSED;
581 if (no_finalize || !out_size || !out_array)
584 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
585 object = tagged_object_get_object (object);
587 if (mono_object_domain (object) == domain) {
588 /* remove and put in out_array */
589 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
590 out_array [count ++] = object;
591 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object, sgen_safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
592 if (count == out_size)
596 } SGEN_HASH_TABLE_FOREACH_END;
601 * mono_gc_finalizers_for_domain:
602 * @domain: the unloading appdomain
603 * @out_array: output array
604 * @out_size: size of output array
606 * Store inside @out_array up to @out_size objects that belong to the unloading
607 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
608 * until it returns 0.
609 * The items are removed from the finalizer data structure, so the caller is supposed
611 * @out_array should be on the stack to allow the GC to know the objects are still alive.
614 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
619 sgen_process_fin_stage_entries ();
620 result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
621 if (result < out_size) {
622 result += finalizers_for_domain (domain, out_array + result, out_size - result,
623 &major_finalizable_hash);
630 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
631 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
633 static SgenHashTable*
634 get_dislink_hash_table (int generation)
636 switch (generation) {
637 case GENERATION_NURSERY: return &minor_disappearing_link_hash;
638 case GENERATION_OLD: return &major_disappearing_link_hash;
639 default: g_assert_not_reached ();
643 /* LOCKING: assumes the GC lock is held */
645 add_or_remove_disappearing_link (MonoObject *obj, void **link, int generation)
647 SgenHashTable *hash_table = get_dislink_hash_table (generation);
650 if (sgen_hash_table_remove (hash_table, link, NULL)) {
651 SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
652 link, hash_table->num_entries, sgen_generation_name (generation));
657 sgen_hash_table_replace (hash_table, link, NULL, NULL);
658 SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
659 obj, obj->vtable->klass->name, link, sgen_generation_name (generation));
662 /* LOCKING: requires that the GC lock is held */
664 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
666 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
667 GrayQueue *queue = ctx.queue;
669 gpointer dummy G_GNUC_UNUSED;
670 SgenHashTable *hash = get_dislink_hash_table (generation);
672 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
677 We null a weak link before unregistering it, so it's possible that a thread is
678 suspended right in between setting the content to null and staging the unregister.
680 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
682 We should simply skip the entry as the staged removal will take place during the next GC.
685 SGEN_LOG (5, "Dislink %p was externally nullified", link);
689 track = DISLINK_TRACK (link);
691 * Tracked references are processed after
692 * finalization handling whereas standard weak
693 * references are processed before. If an
694 * object is still not marked after finalization
695 * handling it means that it either doesn't have
696 * a finalizer or the finalizer has already run,
697 * so we must null a tracking reference.
699 if (track != before_finalization) {
700 object = DISLINK_OBJECT (link);
702 We should guard against a null object been hidden. This can sometimes happen.
705 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
709 if (!major_collector.is_object_live (object)) {
710 if (sgen_gc_is_object_ready_for_finalization (object)) {
712 binary_protocol_dislink_update (link, NULL, 0, 0);
713 SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
714 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
718 copy_func ((void**)©, queue);
720 /* Update pointer if it's moved. If the object
721 * has been moved out of the nursery, we need to
722 * remove the link from the minor hash table to
725 * FIXME: what if an object is moved earlier?
728 if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
729 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
732 *link = HIDE_POINTER (copy, track);
733 add_or_remove_disappearing_link ((MonoObject*)copy, link, GENERATION_OLD);
734 binary_protocol_dislink_update (link, copy, track, 0);
736 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
740 *link = HIDE_POINTER (copy, track);
741 binary_protocol_dislink_update (link, copy, track, 0);
742 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
747 } SGEN_HASH_TABLE_FOREACH_END;
750 /* LOCKING: requires that the GC lock is held */
752 sgen_null_links_for_domain (MonoDomain *domain, int generation)
755 gpointer dummy G_GNUC_UNUSED;
756 SgenHashTable *hash = get_dislink_hash_table (generation);
757 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
758 char *object = DISLINK_OBJECT (link);
761 SGEN_ASSERT (0, ((MonoObject*)object)->vtable, "Can't have objects without vtables.");
763 if (*link && object && ((MonoObject*)object)->vtable->domain == domain) {
765 binary_protocol_dislink_update (link, NULL, 0, 0);
767 * This can happen if finalizers are not ran, i.e. Environment.Exit ()
768 * is called from finalizer like in finalizer-abort.cs.
770 SGEN_LOG (5, "Disappearing link %p not freed", link);
773 * FIXME: Why don't we free the entry here?
775 SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE);
779 } SGEN_HASH_TABLE_FOREACH_END;
782 /* LOCKING: requires that the GC lock is held */
784 sgen_null_links_with_predicate (int generation, WeakLinkAlivePredicateFunc predicate, void *data)
787 gpointer dummy G_GNUC_UNUSED;
788 SgenHashTable *hash = get_dislink_hash_table (generation);
789 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
790 char *object = DISLINK_OBJECT (link);
795 is_alive = predicate ((MonoObject*)object, data);
799 binary_protocol_dislink_update (link, NULL, 0, 0);
800 SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
801 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
804 } SGEN_HASH_TABLE_FOREACH_END;
808 sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation)
810 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
812 gpointer dummy G_GNUC_UNUSED;
814 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
815 object = tagged_object_get_object (object);
817 if (mono_object_domain (object) == domain) {
818 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object, sgen_safe_name (object));
820 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
823 } SGEN_HASH_TABLE_FOREACH_END;
826 /* LOCKING: requires that the GC lock is held */
828 process_dislink_stage_entry (MonoObject *obj, void *_link, int index)
833 binary_protocol_dislink_process_staged (link, obj, index);
835 add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
836 add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
838 if (ptr_in_nursery (obj))
839 add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
841 add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
845 #define NUM_DISLINK_STAGE_ENTRIES 1024
847 static volatile gint32 next_dislink_stage_entry = 0;
848 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
850 /* LOCKING: requires that the GC lock is held */
852 sgen_process_dislink_stage_entries (void)
854 lock_stage_for_processing (&next_dislink_stage_entry);
855 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
859 sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
863 if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
864 MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
865 MONO_GC_WEAK_UPDATE ((mword)link,
866 *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
868 obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
869 obj ? vt->klass->name_space : NULL,
870 obj ? vt->klass->name : NULL,
876 *link = HIDE_POINTER (obj, track);
882 binary_protocol_dislink_update (link, obj, track, 0);
883 process_dislink_stage_entry (obj, link, -1);
886 binary_protocol_dislink_update (link, obj, track, 1);
887 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
888 if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
890 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
894 binary_protocol_dislink_update_staged (link, obj, track, index);
899 binary_protocol_dislink_update (link, obj, track, 0);
900 process_dislink_stage_entry (obj, link, -1);
907 sgen_init_fin_weak_hash (void)
909 #ifdef HEAVY_STATISTICS
910 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
911 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
912 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
913 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
914 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
915 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
919 #endif /* HAVE_SGEN_GC */