2 * sgen-fin-weak-hash.c: Finalizers and weak links.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include "mono/sgen/sgen-gc.h"
31 #include "mono/sgen/sgen-gray.h"
32 #include "mono/sgen/sgen-protocol.h"
33 #include "mono/sgen/sgen-pointer-queue.h"
34 #include "mono/sgen/sgen-client.h"
35 #include "mono/sgen/gc-internal-agnostic.h"
36 #include "mono/utils/mono-membar.h"
38 #ifndef SGEN_WITHOUT_MONO
39 #include "mono/metadata/gc-internal.h"
42 #define ptr_in_nursery sgen_ptr_in_nursery
44 typedef SgenGrayQueue GrayQueue;
46 static int no_finalize = 0;
49 * The finalizable hash has the object as the key, the
50 * disappearing_link hash, has the link address as key.
52 * Copyright 2011 Xamarin Inc.
55 #define TAG_MASK ((mword)0x1)
57 static inline GCObject*
58 tagged_object_get_object (GCObject *object)
60 return (GCObject*)(((mword)object) & ~TAG_MASK);
64 tagged_object_get_tag (GCObject *object)
66 return ((mword)object) & TAG_MASK;
69 static inline GCObject*
70 tagged_object_apply (void *object, int tag_bits)
72 return (GCObject*)((mword)object | (mword)tag_bits);
76 tagged_object_hash (GCObject *o)
78 return sgen_aligned_addr_hash (tagged_object_get_object (o));
82 tagged_object_equals (GCObject *a, GCObject *b)
84 return tagged_object_get_object (a) == tagged_object_get_object (b);
87 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
88 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
91 get_finalize_entry_hash_table (int generation)
94 case GENERATION_NURSERY: return &minor_finalizable_hash;
95 case GENERATION_OLD: return &major_finalizable_hash;
96 default: g_assert_not_reached ();
100 #define BRIDGE_OBJECT_MARKED 0x1
102 /* LOCKING: requires that the GC lock is held */
104 sgen_mark_bridge_object (GCObject *obj)
106 SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
108 sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
111 /* LOCKING: requires that the GC lock is held */
113 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
115 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
116 GrayQueue *queue = ctx.queue;
117 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
119 gpointer dummy G_GNUC_UNUSED;
121 SgenPointerQueue moved_fin_objects;
123 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
128 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
129 int tag = tagged_object_get_tag (object);
130 object = tagged_object_get_object (object);
132 /* Bridge code told us to ignore this one */
133 if (tag == BRIDGE_OBJECT_MARKED)
136 /* Object is a bridge object and major heap says it's dead */
137 if (major_collector.is_object_live (object))
140 /* Nursery says the object is dead. */
141 if (!sgen_gc_is_object_ready_for_finalization (object))
144 if (!sgen_client_bridge_is_bridge_object (object))
148 copy_func (©, queue);
150 sgen_client_bridge_register_finalized_object (copy);
152 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
153 /* remove from the list */
154 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
156 /* insert it into the major hash */
157 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
159 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
162 } else if (copy != object) {
164 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
166 /* register for reinsertion */
167 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
169 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
173 } SGEN_HASH_TABLE_FOREACH_END;
175 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
176 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
179 sgen_pointer_queue_free (&moved_fin_objects);
183 /* LOCKING: requires that the GC lock is held */
185 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
187 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
188 GrayQueue *queue = ctx.queue;
189 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
191 gpointer dummy G_GNUC_UNUSED;
192 SgenPointerQueue moved_fin_objects;
194 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
198 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
199 int tag = tagged_object_get_tag (object);
200 object = tagged_object_get_object (object);
201 if (!major_collector.is_object_live (object)) {
202 gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
203 GCObject *copy = object;
204 copy_func (©, queue);
206 /* remove and put in fin_ready_list */
207 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
208 sgen_queue_finalization_entry (copy);
209 /* Make it survive */
210 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
213 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
214 /* remove from the list */
215 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
217 /* insert it into the major hash */
218 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
220 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
223 } else if (copy != object) {
225 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
227 /* register for reinsertion */
228 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
230 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
236 } SGEN_HASH_TABLE_FOREACH_END;
238 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
239 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
242 sgen_pointer_queue_free (&moved_fin_objects);
245 /* LOCKING: requires that the GC lock is held */
247 register_for_finalization (GCObject *obj, void *user_data, int generation)
249 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
255 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
256 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
257 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
260 if (sgen_hash_table_remove (hash_table, obj, NULL)) {
261 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
262 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
268 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
269 * up registering them. Otherwise we'd have to take the GC lock.
271 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
272 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
273 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
274 * the queues via the same function. That implies that `add_stage_entry()`, since it
275 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
276 * though it doesn't have to make progress while the queue is drained. In fact, once it
277 * detects that the queue is being drained, it blocks until the draining is done.
279 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
280 * entries for the same location might get switched, resulting in the earlier one being
281 * committed and the later one ignored.
283 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
284 * currently being drained. Each entry has a state:
286 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
288 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
290 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
293 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
294 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
295 * we can't leave them in place because new entries might be inserted before them, including
296 * from the same thread, violating causality. An alternative would be not to reset
297 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
298 * but that can potentially waste the whole queue.
302 * | from | to | filler? | drainer? |
303 * +---------+---------+---------+----------+
304 * | FREE | BUSY | X | |
305 * | BUSY | FREE | X | |
306 * | BUSY | USED | X | |
307 * | BUSY | INVALID | | X |
308 * | USED | FREE | | X |
309 * | INVALID | FREE | X | |
311 * `next_entry` can be incremented either by the filler thread that set the corresponding
312 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
313 * other thread wasn't allowed to increment, it would block on the first filler thread.
315 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
316 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
317 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
320 #define STAGE_ENTRY_FREE 0
321 #define STAGE_ENTRY_BUSY 1
322 #define STAGE_ENTRY_USED 2
323 #define STAGE_ENTRY_INVALID 3
326 volatile gint32 state;
331 #define NUM_FIN_STAGE_ENTRIES 1024
333 static volatile gint32 next_fin_stage_entry = 0;
334 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
337 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
338 * garbage collection. In that case, the world is already stopped and there's only one
339 * thread operating on the queue.
342 lock_stage_for_processing (volatile gint32 *next_entry)
348 * When processing is triggered by an overflow, we don't want to take the GC lock
349 * immediately, and then set `next_index` to `-1`, because another thread might have drained
350 * the queue in the mean time. Instead, we make sure the overflow is still there, we
351 * atomically set `next_index`, and only once that happened do we take the GC lock.
354 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
356 gint32 old = *next_entry;
357 if (old < num_entries)
359 return InterlockedCompareExchange (next_entry, -1, old) == old;
362 /* LOCKING: requires that the GC lock is held */
364 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
369 * This can happen if after setting `next_index` to `-1` in
370 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
371 * queue and reset `next_entry`.
373 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
375 if (*next_entry != -1)
378 for (i = 0; i < num_entries; ++i) {
382 state = entries [i].state;
385 case STAGE_ENTRY_FREE:
386 case STAGE_ENTRY_INVALID:
388 case STAGE_ENTRY_BUSY:
389 /* BUSY -> INVALID */
391 * This must be done atomically, because the filler thread can set
392 * the entry to `USED`, in which case we must process it, so we must
393 * detect that eventuality.
395 if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
398 case STAGE_ENTRY_USED:
401 SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
407 process_func (entries [i].obj, entries [i].user_data, i);
409 entries [i].obj = NULL;
410 entries [i].user_data = NULL;
412 mono_memory_write_barrier ();
416 * This transition only happens here, so we don't have to do it atomically.
418 entries [i].state = STAGE_ENTRY_FREE;
421 mono_memory_write_barrier ();
426 #ifdef HEAVY_STATISTICS
427 static guint64 stat_overflow_abort = 0;
428 static guint64 stat_wait_for_processing = 0;
429 static guint64 stat_increment_other_thread = 0;
430 static guint64 stat_index_decremented = 0;
431 static guint64 stat_entry_invalidated = 0;
432 static guint64 stat_success = 0;
436 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
438 gint32 index, new_next_entry, old_next_entry;
439 gint32 previous_state;
444 if (index >= num_entries) {
445 HEAVY_STAT (++stat_overflow_abort);
450 * Backed-off waiting is way more efficient than even using a
451 * dedicated lock for this.
453 while ((index = *next_entry) < 0) {
455 * This seems like a good value. Determined by timing
456 * sgen-weakref-stress.exe.
459 HEAVY_STAT (++stat_wait_for_processing);
464 if (entries [index].state != STAGE_ENTRY_FREE ||
465 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
467 * If we can't get the entry it must be because another thread got
468 * it first. We don't want to wait for that thread to increment
469 * `next_entry`, so we try to do it ourselves. Whether we succeed
470 * or not, we start over.
472 if (*next_entry == index) {
473 InterlockedCompareExchange (next_entry, index + 1, index);
474 //g_print ("tried increment for other thread\n");
475 HEAVY_STAT (++stat_increment_other_thread);
479 /* state is BUSY now */
480 mono_memory_write_barrier ();
482 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
483 * If it were the other way around, it would be possible that after a filler
484 * incremented the index, other threads fill up the queue, the queue is
485 * drained, the original filler finally fills in the slot, but `next_entry`
486 * ends up at the start of the queue, and new entries are written in the
487 * queue in front of, not behind, the original filler's entry.
489 * We don't actually require that the CAS succeeds, but we do require that
490 * the value of `next_entry` is not lower than our index. Since the drainer
491 * sets it to `-1`, that also takes care of the case that the drainer is
494 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
495 if (old_next_entry < index) {
497 /* INVALID -> FREE */
499 * The state might still be `BUSY`, or the drainer could have set it
500 * to `INVALID`. In either case, there's no point in CASing. Set
501 * it to `FREE` and start over.
503 entries [index].state = STAGE_ENTRY_FREE;
504 HEAVY_STAT (++stat_index_decremented);
510 SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
512 entries [index].obj = obj;
513 entries [index].user_data = user_data;
515 mono_memory_write_barrier ();
517 new_next_entry = *next_entry;
518 mono_memory_read_barrier ();
521 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
522 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
523 * latter case, we reset it to `FREE` and start over.
525 previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
526 if (previous_state == STAGE_ENTRY_BUSY) {
527 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
528 HEAVY_STAT (++stat_success);
532 SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
533 entries [index].obj = NULL;
534 entries [index].user_data = NULL;
535 mono_memory_write_barrier ();
536 /* INVALID -> FREE */
537 entries [index].state = STAGE_ENTRY_FREE;
539 HEAVY_STAT (++stat_entry_invalidated);
544 /* LOCKING: requires that the GC lock is held */
546 process_fin_stage_entry (GCObject *obj, void *user_data, int index)
548 if (ptr_in_nursery (obj))
549 register_for_finalization (obj, user_data, GENERATION_NURSERY);
551 register_for_finalization (obj, user_data, GENERATION_OLD);
554 /* LOCKING: requires that the GC lock is held */
556 sgen_process_fin_stage_entries (void)
558 lock_stage_for_processing (&next_fin_stage_entry);
559 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
563 sgen_object_register_for_finalization (GCObject *obj, void *user_data)
565 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
566 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
568 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
574 /* LOCKING: requires that the GC lock is held */
576 finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
579 gpointer dummy G_GNUC_UNUSED;
582 if (no_finalize || !out_size || !out_array)
585 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
586 object = tagged_object_get_object (object);
588 if (predicate (object, user_data)) {
589 /* remove and put in out_array */
590 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
591 out_array [count ++] = object;
592 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
593 if (count == out_size)
597 } SGEN_HASH_TABLE_FOREACH_END;
602 * sgen_gather_finalizers_if:
603 * @predicate: predicate function
604 * @user_data: predicate function data argument
605 * @out_array: output array
606 * @out_size: size of output array
608 * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
609 * of stored items. Can be called repeteadly until it returns 0.
611 * The items are removed from the finalizer data structure, so the caller is supposed
614 * @out_array me be on the stack, or registered as a root, to allow the GC to know the
615 * objects are still alive.
618 sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
623 sgen_process_fin_stage_entries ();
624 result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
625 if (result < out_size) {
626 result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
627 &major_finalizable_hash);
635 sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
637 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
639 gpointer dummy G_GNUC_UNUSED;
641 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
642 object = tagged_object_get_object (object);
644 if (predicate (object, user_data)) {
645 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
648 } SGEN_HASH_TABLE_FOREACH_END;
653 #ifdef HEAVY_STATISTICS
654 static volatile guint64 stat_gc_handles_allocated = 0;
655 static volatile guint64 stat_gc_handles_max_allocated = 0;
658 #define BUCKETS (32 - MONO_GC_HANDLE_TYPE_SHIFT)
659 #define MIN_BUCKET_BITS (5)
660 #define MIN_BUCKET_SIZE (1 << MIN_BUCKET_BITS)
663 * A table of GC handle data, implementing a simple lock-free bitmap allocator.
665 * 'entries' is an array of pointers to buckets of increasing size. The first
666 * bucket has size 'MIN_BUCKET_SIZE', and each bucket is twice the size of the
669 * |-------|-- MIN_BUCKET_SIZE
671 * [1] -> xxxxxxxxxxxxxxxx
672 * [2] -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
675 * The size of the spine, 'BUCKETS', is chosen so that the maximum number of
676 * entries is no less than the maximum index value of a GC handle.
678 * Each entry in a bucket is a pointer with two tag bits: if
679 * 'GC_HANDLE_OCCUPIED' returns true for a slot, then the slot is occupied; if
680 * so, then 'GC_HANDLE_VALID' gives whether the entry refers to a valid (1) or
681 * NULL (0) object reference. If the reference is valid, then the pointer is an
682 * object pointer. If the reference is NULL, and 'GC_HANDLE_TYPE_IS_WEAK' is
683 * true for 'type', then the pointer is a domain pointer--this allows us to
684 * retrieve the domain ID of an expired weak reference.
686 * Finally, 'slot_hint' denotes the position of the last allocation, so that the
687 * whole array needn't be searched on every allocation.
691 volatile gpointer *volatile entries [BUCKETS];
692 volatile guint32 capacity;
693 volatile guint32 slot_hint;
698 bucket_size (guint index)
700 return 1 << (index + MIN_BUCKET_BITS);
703 /* Computes floor(log2(index + MIN_BUCKET_SIZE)) - 1, giving the index
704 * of the bucket containing a slot.
707 index_bucket (guint index)
710 return CHAR_BIT * sizeof (index) - __builtin_clz (index + MIN_BUCKET_SIZE) - 1 - MIN_BUCKET_BITS;
713 index += MIN_BUCKET_SIZE;
718 return count - 1 - MIN_BUCKET_BITS;
723 bucketize (guint index, guint *bucket, guint *offset)
725 *bucket = index_bucket (index);
726 *offset = index - bucket_size (*bucket) + MIN_BUCKET_SIZE;
729 static inline gboolean
730 try_set_slot (volatile gpointer *slot, MonoObject *obj, gpointer old, GCHandleType type)
733 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_OBJECT_POINTER (obj, GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
734 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_DOMAIN_POINTER (mono_domain_get (), GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
737 /* Try to claim a slot by setting its occupied bit. */
738 static inline gboolean
739 try_occupy_slot (HandleData *handles, guint bucket, guint offset, MonoObject *obj, gboolean track)
741 volatile gpointer *link_addr = &(handles->entries [bucket] [offset]);
742 if (MONO_GC_HANDLE_OCCUPIED (*link_addr))
744 return try_set_slot (link_addr, obj, NULL, handles->type);
747 #define EMPTY_HANDLE_DATA(type) { { NULL }, 0, 0, (type) }
749 /* weak and weak-track arrays will be allocated in malloc memory
751 static HandleData gc_handles [] = {
752 EMPTY_HANDLE_DATA (HANDLE_WEAK),
753 EMPTY_HANDLE_DATA (HANDLE_WEAK_TRACK),
754 EMPTY_HANDLE_DATA (HANDLE_NORMAL),
755 EMPTY_HANDLE_DATA (HANDLE_PINNED)
759 gc_handles_for_type (GCHandleType type)
761 g_assert (type < HANDLE_TYPE_MAX);
762 return &gc_handles [type];
765 /* This assumes that the world is stopped. */
767 sgen_mark_normal_gc_handles (void *addr, SgenUserMarkFunc mark_func, void *gc_data)
769 HandleData *handles = gc_handles_for_type (HANDLE_NORMAL);
770 size_t bucket, offset;
771 const guint max_bucket = index_bucket (handles->capacity);
772 for (bucket = 0; bucket < max_bucket; ++bucket) {
773 volatile gpointer *entries = handles->entries [bucket];
774 for (offset = 0; offset < bucket_size (bucket); ++offset) {
775 volatile gpointer *entry = &entries [offset];
776 gpointer hidden = *entry;
777 gpointer revealed = MONO_GC_REVEAL_POINTER (hidden, FALSE);
778 if (!MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden))
780 mark_func ((MonoObject **)&revealed, gc_data);
782 *entry = MONO_GC_HANDLE_OBJECT_POINTER (revealed, FALSE);
788 handle_data_find_unset (HandleData *handles, guint32 begin, guint32 end)
791 gint delta = begin < end ? +1 : -1;
792 for (index = begin; index < end; index += delta) {
793 guint bucket, offset;
794 volatile gpointer *entries;
795 bucketize (index, &bucket, &offset);
796 entries = handles->entries [bucket];
798 if (!MONO_GC_HANDLE_OCCUPIED (entries [offset]))
804 /* Adds a bucket if necessary and possible. */
806 handle_data_grow (HandleData *handles, guint32 old_capacity)
808 const guint new_bucket = index_bucket (old_capacity);
809 const guint32 growth = bucket_size (new_bucket);
810 const guint32 new_capacity = old_capacity + growth;
812 const size_t new_bucket_size = sizeof (**handles->entries) * growth;
813 if (handles->capacity >= new_capacity)
815 entries = g_malloc0 (new_bucket_size);
816 if (handles->type == HANDLE_PINNED)
817 sgen_register_root ((char *)entries, new_bucket_size, SGEN_DESCRIPTOR_NULL, ROOT_TYPE_PINNED, MONO_ROOT_SOURCE_GC_HANDLE, "pinned gc handles");
818 if (InterlockedCompareExchangePointer ((volatile gpointer *)&handles->entries [new_bucket], entries, NULL) == NULL) {
819 if (InterlockedCompareExchange ((volatile gint32 *)&handles->capacity, new_capacity, old_capacity) != old_capacity)
820 g_assert_not_reached ();
821 handles->slot_hint = old_capacity;
822 mono_memory_write_barrier ();
825 /* Someone beat us to the allocation. */
826 if (handles->type == HANDLE_PINNED)
827 sgen_deregister_root ((char *)entries);
832 alloc_handle (HandleData *handles, MonoObject *obj, gboolean track)
836 guint bucket, offset;
839 if (!handles->capacity)
840 handle_data_grow (handles, 0);
842 capacity = handles->capacity;
843 slot_hint = handles->slot_hint;
844 index = handle_data_find_unset (handles, slot_hint, capacity);
846 index = handle_data_find_unset (handles, 0, slot_hint);
848 handle_data_grow (handles, capacity);
851 handles->slot_hint = index;
852 bucketize (index, &bucket, &offset);
853 if (!try_occupy_slot (handles, bucket, offset, obj, track))
855 #ifdef HEAVY_STATISTICS
856 InterlockedIncrement64 ((volatile gint64 *)&stat_gc_handles_allocated);
857 if (stat_gc_handles_allocated > stat_gc_handles_max_allocated)
858 stat_gc_handles_max_allocated = stat_gc_handles_allocated;
860 if (obj && MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type))
861 binary_protocol_dislink_add ((gpointer)&handles->entries [bucket] [offset], obj, track);
862 /* Ensure that a GC handle cannot be given to another thread without the slot having been set. */
863 mono_memory_write_barrier ();
864 #ifndef DISABLE_PERFCOUNTERS
865 mono_perfcounters->gc_num_handles++;
867 res = MONO_GC_HANDLE (index, handles->type);
868 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handles->type, res, obj);
873 object_older_than (GCObject *object, int generation)
875 return generation == GENERATION_NURSERY && !sgen_ptr_in_nursery (object);
879 * Maps a function over all GC handles.
880 * This assumes that the world is stopped!
883 sgen_gchandle_iterate (GCHandleType handle_type, int max_generation, gpointer callback(GCObject*, GCHandleType, gpointer), gpointer user)
885 HandleData *handle_data = gc_handles_for_type (handle_type);
886 size_t bucket, offset;
887 guint max_bucket = index_bucket (handle_data->capacity);
888 /* If a new bucket has been allocated, but the capacity has not yet been
889 * increased, nothing can yet have been allocated in the bucket because the
890 * world is stopped, so we shouldn't miss any handles during iteration.
892 for (bucket = 0; bucket < max_bucket; ++bucket) {
893 volatile gpointer *entries = handle_data->entries [bucket];
894 for (offset = 0; offset < bucket_size (bucket); ++offset) {
895 gpointer hidden = entries [offset];
898 /* Table must contain no garbage pointers. */
899 gboolean occupied = MONO_GC_HANDLE_OCCUPIED (hidden);
900 g_assert (hidden ? occupied : !occupied);
901 if (!occupied || !MONO_GC_HANDLE_VALID (hidden))
903 revealed = MONO_GC_REVEAL_POINTER (hidden, MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type));
905 if (object_older_than (revealed, max_generation))
907 result = callback (revealed, handle_type, user);
909 g_assert (MONO_GC_HANDLE_OCCUPIED (result));
911 HEAVY_STAT (InterlockedDecrement64 ((volatile gint64 *)&stat_gc_handles_allocated));
912 entries [offset] = result;
919 * @obj: managed object to get a handle for
920 * @pinned: whether the object should be pinned
922 * This returns a handle that wraps the object, this is used to keep a
923 * reference to a managed object from the unmanaged world and preventing the
924 * object from being disposed.
926 * If @pinned is false the address of the object can not be obtained, if it is
927 * true the address of the object can be obtained. This will also pin the
928 * object so it will not be possible by a moving garbage collector to move the
931 * Returns: a handle that can be used to access the object from
935 mono_gchandle_new (MonoObject *obj, gboolean pinned)
937 return alloc_handle (gc_handles_for_type (pinned ? HANDLE_PINNED : HANDLE_NORMAL), obj, FALSE);
941 * mono_gchandle_new_weakref:
942 * @obj: managed object to get a handle for
943 * @pinned: whether the object should be pinned
945 * This returns a weak handle that wraps the object, this is used to
946 * keep a reference to a managed object from the unmanaged world.
947 * Unlike the mono_gchandle_new the object can be reclaimed by the
948 * garbage collector. In this case the value of the GCHandle will be
951 * If @pinned is false the address of the object can not be obtained, if it is
952 * true the address of the object can be obtained. This will also pin the
953 * object so it will not be possible by a moving garbage collector to move the
956 * Returns: a handle that can be used to access the object from
960 mono_gchandle_new_weakref (MonoObject *obj, gboolean track_resurrection)
962 return alloc_handle (gc_handles_for_type (track_resurrection ? HANDLE_WEAK_TRACK : HANDLE_WEAK), obj, track_resurrection);
966 ensure_weak_links_accessible (void)
969 * During the second bridge processing step the world is
970 * running again. That step processes all weak links once
971 * more to null those that refer to dead objects. Before that
972 * is completed, those links must not be followed, so we
973 * conservatively wait for bridge processing when any weak
974 * link is dereferenced.
976 /* FIXME: A GC can occur after this check fails, in which case we
977 * should wait for bridge processing but would fail to do so.
979 if (G_UNLIKELY (bridge_processing_in_progress))
980 mono_gc_wait_for_bridge_processing ();
984 link_get (volatile gpointer *link_addr, gboolean is_weak)
986 void *volatile *link_addr_volatile;
990 link_addr_volatile = link_addr;
991 ptr = (void*)*link_addr_volatile;
993 * At this point we have a hidden pointer. If the GC runs
994 * here, it will not recognize the hidden pointer as a
995 * reference, and if the object behind it is not referenced
996 * elsewhere, it will be freed. Once the world is restarted
997 * we reveal the pointer, giving us a pointer to a freed
998 * object. To make sure we don't return it, we load the
999 * hidden pointer again. If it's still the same, we can be
1000 * sure the object reference is valid.
1002 if (ptr && MONO_GC_HANDLE_IS_OBJECT_POINTER (ptr))
1003 obj = (MonoObject *)MONO_GC_REVEAL_POINTER (ptr, is_weak);
1007 /* Note [dummy use]:
1009 * If a GC happens here, obj needs to be on the stack or in a
1010 * register, so we need to prevent this from being reordered
1013 mono_gc_dummy_use (obj);
1014 mono_memory_barrier ();
1017 ensure_weak_links_accessible ();
1019 if ((void*)*link_addr_volatile != ptr)
1026 * mono_gchandle_get_target:
1027 * @gchandle: a GCHandle's handle.
1029 * The handle was previously created by calling mono_gchandle_new or
1030 * mono_gchandle_new_weakref.
1032 * Returns a pointer to the MonoObject represented by the handle or
1033 * NULL for a collected object if using a weakref handle.
1036 mono_gchandle_get_target (guint32 gchandle)
1038 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1039 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1040 HandleData *handles = gc_handles_for_type (type);
1041 guint bucket, offset;
1042 g_assert (index < handles->capacity);
1043 bucketize (index, &bucket, &offset);
1044 return link_get (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
1048 sgen_gchandle_set_target (guint32 gchandle, GCObject *obj)
1050 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1051 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1052 HandleData *handles = gc_handles_for_type (type);
1053 gboolean track = handles->type == HANDLE_WEAK_TRACK;
1054 guint bucket, offset;
1057 g_assert (index < handles->capacity);
1058 bucketize (index, &bucket, &offset);
1061 slot = handles->entries [bucket] [offset];
1062 g_assert (MONO_GC_HANDLE_OCCUPIED (slot));
1063 if (!try_set_slot (&handles->entries [bucket] [offset], obj, slot, MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type)))
1065 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot))
1066 binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], track);
1068 binary_protocol_dislink_add ((gpointer)&handles->entries [bucket] [offset], obj, track);
1072 mono_gchandle_slot_domain (volatile gpointer *slot_addr, gboolean is_weak)
1078 if (!MONO_GC_HANDLE_OCCUPIED (slot))
1080 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
1081 MonoObject *obj = MONO_GC_REVEAL_POINTER (slot, is_weak);
1082 /* See note [dummy use]. */
1083 mono_gc_dummy_use (obj);
1084 if (*slot_addr != slot)
1086 return mono_object_domain (obj);
1088 domain = MONO_GC_REVEAL_POINTER (slot, is_weak);
1089 /* See note [dummy use]. */
1090 mono_gc_dummy_use (domain);
1091 if (*slot_addr != slot)
1097 gchandle_domain (guint32 gchandle) {
1098 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1099 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1100 HandleData *handles = gc_handles_for_type (type);
1101 guint bucket, offset;
1102 if (index >= handles->capacity)
1104 bucketize (index, &bucket, &offset);
1105 return mono_gchandle_slot_domain (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
1109 * mono_gchandle_is_in_domain:
1110 * @gchandle: a GCHandle's handle.
1111 * @domain: An application domain.
1113 * Returns: true if the object wrapped by the @gchandle belongs to the specific @domain.
1116 mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
1118 return domain->domain_id == gchandle_domain (gchandle)->domain_id;
1122 * mono_gchandle_free:
1123 * @gchandle: a GCHandle's handle.
1125 * Frees the @gchandle handle. If there are no outstanding
1126 * references, the garbage collector can reclaim the memory of the
1130 mono_gchandle_free (guint32 gchandle)
1132 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1133 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1134 HandleData *handles = gc_handles_for_type (type);
1135 guint bucket, offset;
1136 bucketize (index, &bucket, &offset);
1137 if (index < handles->capacity && MONO_GC_HANDLE_OCCUPIED (handles->entries [bucket] [offset])) {
1138 if (MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type))
1139 binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
1140 handles->entries [bucket] [offset] = NULL;
1141 HEAVY_STAT (InterlockedDecrement64 ((volatile gint64 *)&stat_gc_handles_allocated));
1143 /* print a warning? */
1145 #ifndef DISABLE_PERFCOUNTERS
1146 mono_perfcounters->gc_num_handles--;
1148 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handles->type, gchandle, NULL);
1152 * mono_gchandle_free_domain:
1153 * @unloading: domain that is unloading
1155 * Function used internally to cleanup any GC handle for objects belonging
1156 * to the specified domain during appdomain unload.
1159 mono_gchandle_free_domain (MonoDomain *unloading)
1162 /* All non-pinned handle types. */
1163 for (type = HANDLE_TYPE_MIN; type < HANDLE_PINNED; ++type) {
1164 const gboolean is_weak = MONO_GC_HANDLE_TYPE_IS_WEAK (type);
1166 HandleData *handles = gc_handles_for_type (type);
1167 guint32 capacity = handles->capacity;
1168 for (index = 0; index < capacity; ++index) {
1169 guint bucket, offset;
1171 bucketize (index, &bucket, &offset);
1172 MonoObject *obj = NULL;
1174 volatile gpointer *slot_addr = &handles->entries [bucket] [offset];
1175 /* NB: This should have the same behavior as mono_gchandle_slot_domain(). */
1178 if (!MONO_GC_HANDLE_OCCUPIED (slot))
1180 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
1181 obj = MONO_GC_REVEAL_POINTER (slot, is_weak);
1182 if (*slot_addr != slot)
1184 domain = mono_object_domain (obj);
1186 domain = MONO_GC_REVEAL_POINTER (slot, is_weak);
1188 if (unloading->domain_id == domain->domain_id) {
1189 if (MONO_GC_HANDLE_TYPE_IS_WEAK (type) && MONO_GC_REVEAL_POINTER (slot, is_weak))
1190 binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
1192 HEAVY_STAT (InterlockedDecrement64 ((volatile gint64 *)&stat_gc_handles_allocated));
1194 /* See note [dummy use]. */
1195 mono_gc_dummy_use (obj);
1202 * Returns whether to remove the link from its hash.
1205 null_link_if_necessary (GCObject *obj, GCHandleType handle_type, gpointer user)
1207 const gboolean is_weak = GC_HANDLE_TYPE_IS_WEAK (handle_type);
1208 ScanCopyContext *ctx = (ScanCopyContext *)user;
1209 GCObject *copy = obj;
1211 if (major_collector.is_object_live (obj))
1212 return MONO_GC_HANDLE_OBJECT_POINTER (obj, is_weak);
1214 gboolean obj_in_nursery = ptr_in_nursery (obj);
1215 if (sgen_get_current_collection_generation () == GENERATION_NURSERY && !obj_in_nursery)
1216 return GC_HANDLE_OBJECT_POINTER (obj);
1218 /* Clear link if object is ready for finalization. This check may be redundant wrt is_object_live(). */
1219 if (sgen_gc_is_object_ready_for_finalization (obj)) {
1220 /* binary_protocol_dislink_update (hidden_entry, entry, 0); */
1221 return MONO_GC_HANDLE_DOMAIN_POINTER (mono_object_domain (obj), is_weak);
1223 ctx->ops->copy_or_mark_object (©, ctx->queue);
1225 /* binary_protocol_dislink_update (hidden_entry, copy, handle_type == HANDLE_WEAK_TRACK); */
1226 /* Update link if object was moved. */
1227 return MONO_GC_HANDLE_OBJECT_POINTER (copy, is_weak);
1230 /* LOCKING: requires that the GC lock is held */
1232 sgen_null_link_in_range (int generation, ScanCopyContext ctx, gboolean track)
1234 sgen_gchandle_iterate (track ? HANDLE_WEAK_TRACK : HANDLE_WEAK, generation, null_link_if_necessary, &ctx);
1238 SgenObjectPredicateFunc predicate;
1240 } WeakLinkAlivePredicateClosure;
1243 null_link_if (GCObject *obj, GCHandleType handle_type, gpointer user)
1245 /* Strictly speaking, function pointers are not guaranteed to have the same size as data pointers. */
1246 WeakLinkAlivePredicateClosure *closure = (WeakLinkAlivePredicateClosure *)user;
1247 if (closure->predicate (obj, closure->data)) {
1248 /* binary_protocol_dislink_update (hidden_entry, NULL, 0); */
1251 return MONO_GC_HANDLE_OBJECT_POINTER (obj, GC_HANDLE_TYPE_IS_WEAK (handle_type));
1254 /* LOCKING: requires that the GC lock is held */
1256 sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation, gboolean track)
1258 WeakLinkAlivePredicateClosure closure = { predicate, data };
1259 sgen_gchandle_iterate (track ? HANDLE_WEAK_TRACK : HANDLE_WEAK, generation, null_link_if, &closure);
1263 sgen_init_fin_weak_hash (void)
1265 #ifdef HEAVY_STATISTICS
1266 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
1267 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
1268 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
1269 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
1270 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
1271 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
1273 mono_counters_register ("GC handles allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gc_handles_allocated);
1274 mono_counters_register ("max GC handles allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gc_handles_max_allocated);
1278 #endif /* HAVE_SGEN_GC */