2 * sgen-fin-weak-hash.c: Finalizers and weak links.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include "mono/sgen/sgen-gc.h"
31 #include "mono/sgen/sgen-gray.h"
32 #include "mono/sgen/sgen-protocol.h"
33 #include "mono/sgen/sgen-pointer-queue.h"
34 #include "mono/sgen/sgen-client.h"
35 #include "mono/sgen/gc-internal-agnostic.h"
36 #include "mono/utils/mono-membar.h"
38 #define ptr_in_nursery sgen_ptr_in_nursery
40 typedef SgenGrayQueue GrayQueue;
42 static int no_finalize = 0;
45 * The finalizable hash has the object as the key, the
46 * disappearing_link hash, has the link address as key.
48 * Copyright 2011 Xamarin Inc.
51 #define TAG_MASK ((mword)0x1)
53 static inline GCObject*
54 tagged_object_get_object (GCObject *object)
56 return (GCObject*)(((mword)object) & ~TAG_MASK);
60 tagged_object_get_tag (GCObject *object)
62 return ((mword)object) & TAG_MASK;
65 static inline GCObject*
66 tagged_object_apply (void *object, int tag_bits)
68 return (GCObject*)((mword)object | (mword)tag_bits);
72 tagged_object_hash (GCObject *o)
74 return sgen_aligned_addr_hash (tagged_object_get_object (o));
78 tagged_object_equals (GCObject *a, GCObject *b)
80 return tagged_object_get_object (a) == tagged_object_get_object (b);
83 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
84 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
87 get_finalize_entry_hash_table (int generation)
90 case GENERATION_NURSERY: return &minor_finalizable_hash;
91 case GENERATION_OLD: return &major_finalizable_hash;
92 default: g_assert_not_reached ();
96 #define BRIDGE_OBJECT_MARKED 0x1
98 /* LOCKING: requires that the GC lock is held */
100 sgen_mark_bridge_object (GCObject *obj)
102 SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
104 sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
107 /* LOCKING: requires that the GC lock is held */
109 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
111 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
112 GrayQueue *queue = ctx.queue;
113 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
115 gpointer dummy G_GNUC_UNUSED;
117 SgenPointerQueue moved_fin_objects;
119 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
124 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
125 int tag = tagged_object_get_tag (object);
126 object = tagged_object_get_object (object);
128 /* Bridge code told us to ignore this one */
129 if (tag == BRIDGE_OBJECT_MARKED)
132 /* Object is a bridge object and major heap says it's dead */
133 if (major_collector.is_object_live (object))
136 /* Nursery says the object is dead. */
137 if (!sgen_gc_is_object_ready_for_finalization (object))
140 if (!sgen_client_bridge_is_bridge_object (object))
144 copy_func (©, queue);
146 sgen_client_bridge_register_finalized_object (copy);
148 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
149 /* remove from the list */
150 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
152 /* insert it into the major hash */
153 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
155 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
158 } else if (copy != object) {
160 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
162 /* register for reinsertion */
163 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
165 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
169 } SGEN_HASH_TABLE_FOREACH_END;
171 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
172 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
175 sgen_pointer_queue_free (&moved_fin_objects);
179 /* LOCKING: requires that the GC lock is held */
181 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
183 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
184 GrayQueue *queue = ctx.queue;
185 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
187 gpointer dummy G_GNUC_UNUSED;
188 SgenPointerQueue moved_fin_objects;
190 sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
194 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
195 int tag = tagged_object_get_tag (object);
196 object = tagged_object_get_object (object);
197 if (!major_collector.is_object_live (object)) {
198 gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
199 GCObject *copy = object;
200 copy_func (©, queue);
202 /* remove and put in fin_ready_list */
203 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
204 sgen_queue_finalization_entry (copy);
205 /* Make it survive */
206 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
209 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
210 /* remove from the list */
211 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
213 /* insert it into the major hash */
214 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
216 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
219 } else if (copy != object) {
221 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
223 /* register for reinsertion */
224 sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
226 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
232 } SGEN_HASH_TABLE_FOREACH_END;
234 while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
235 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
238 sgen_pointer_queue_free (&moved_fin_objects);
241 /* LOCKING: requires that the GC lock is held */
243 register_for_finalization (GCObject *obj, void *user_data, int generation)
245 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
251 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
252 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
253 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
256 if (sgen_hash_table_remove (hash_table, obj, NULL)) {
257 GCVTable vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
258 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
264 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
265 * up registering them. Otherwise we'd have to take the GC lock.
267 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
268 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
269 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
270 * the queues via the same function. That implies that `add_stage_entry()`, since it
271 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
272 * though it doesn't have to make progress while the queue is drained. In fact, once it
273 * detects that the queue is being drained, it blocks until the draining is done.
275 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
276 * entries for the same location might get switched, resulting in the earlier one being
277 * committed and the later one ignored.
279 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
280 * currently being drained. Each entry has a state:
282 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
284 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
286 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
289 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
290 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
291 * we can't leave them in place because new entries might be inserted before them, including
292 * from the same thread, violating causality. An alternative would be not to reset
293 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
294 * but that can potentially waste the whole queue.
298 * | from | to | filler? | drainer? |
299 * +---------+---------+---------+----------+
300 * | FREE | BUSY | X | |
301 * | BUSY | FREE | X | |
302 * | BUSY | USED | X | |
303 * | BUSY | INVALID | | X |
304 * | USED | FREE | | X |
305 * | INVALID | FREE | X | |
307 * `next_entry` can be incremented either by the filler thread that set the corresponding
308 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
309 * other thread wasn't allowed to increment, it would block on the first filler thread.
311 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
312 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
313 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
316 #define STAGE_ENTRY_FREE 0
317 #define STAGE_ENTRY_BUSY 1
318 #define STAGE_ENTRY_USED 2
319 #define STAGE_ENTRY_INVALID 3
322 volatile gint32 state;
327 #define NUM_FIN_STAGE_ENTRIES 1024
329 static volatile gint32 next_fin_stage_entry = 0;
330 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
333 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
334 * garbage collection. In that case, the world is already stopped and there's only one
335 * thread operating on the queue.
338 lock_stage_for_processing (volatile gint32 *next_entry)
344 * When processing is triggered by an overflow, we don't want to take the GC lock
345 * immediately, and then set `next_index` to `-1`, because another thread might have drained
346 * the queue in the mean time. Instead, we make sure the overflow is still there, we
347 * atomically set `next_index`, and only once that happened do we take the GC lock.
350 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
352 gint32 old = *next_entry;
353 if (old < num_entries)
355 return InterlockedCompareExchange (next_entry, -1, old) == old;
358 /* LOCKING: requires that the GC lock is held */
360 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
365 * This can happen if after setting `next_index` to `-1` in
366 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
367 * queue and reset `next_entry`.
369 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
371 if (*next_entry != -1)
374 for (i = 0; i < num_entries; ++i) {
378 state = entries [i].state;
381 case STAGE_ENTRY_FREE:
382 case STAGE_ENTRY_INVALID:
384 case STAGE_ENTRY_BUSY:
385 /* BUSY -> INVALID */
387 * This must be done atomically, because the filler thread can set
388 * the entry to `USED`, in which case we must process it, so we must
389 * detect that eventuality.
391 if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
394 case STAGE_ENTRY_USED:
397 SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
403 process_func (entries [i].obj, entries [i].user_data, i);
405 entries [i].obj = NULL;
406 entries [i].user_data = NULL;
408 mono_memory_write_barrier ();
412 * This transition only happens here, so we don't have to do it atomically.
414 entries [i].state = STAGE_ENTRY_FREE;
417 mono_memory_write_barrier ();
422 #ifdef HEAVY_STATISTICS
423 static guint64 stat_overflow_abort = 0;
424 static guint64 stat_wait_for_processing = 0;
425 static guint64 stat_increment_other_thread = 0;
426 static guint64 stat_index_decremented = 0;
427 static guint64 stat_entry_invalidated = 0;
428 static guint64 stat_success = 0;
432 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
434 gint32 index, new_next_entry, old_next_entry;
435 gint32 previous_state;
440 if (index >= num_entries) {
441 HEAVY_STAT (++stat_overflow_abort);
446 * Backed-off waiting is way more efficient than even using a
447 * dedicated lock for this.
449 while ((index = *next_entry) < 0) {
451 * This seems like a good value. Determined by timing
452 * sgen-weakref-stress.exe.
455 HEAVY_STAT (++stat_wait_for_processing);
460 if (entries [index].state != STAGE_ENTRY_FREE ||
461 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
463 * If we can't get the entry it must be because another thread got
464 * it first. We don't want to wait for that thread to increment
465 * `next_entry`, so we try to do it ourselves. Whether we succeed
466 * or not, we start over.
468 if (*next_entry == index) {
469 InterlockedCompareExchange (next_entry, index + 1, index);
470 //g_print ("tried increment for other thread\n");
471 HEAVY_STAT (++stat_increment_other_thread);
475 /* state is BUSY now */
476 mono_memory_write_barrier ();
478 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
479 * If it were the other way around, it would be possible that after a filler
480 * incremented the index, other threads fill up the queue, the queue is
481 * drained, the original filler finally fills in the slot, but `next_entry`
482 * ends up at the start of the queue, and new entries are written in the
483 * queue in front of, not behind, the original filler's entry.
485 * We don't actually require that the CAS succeeds, but we do require that
486 * the value of `next_entry` is not lower than our index. Since the drainer
487 * sets it to `-1`, that also takes care of the case that the drainer is
490 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
491 if (old_next_entry < index) {
493 /* INVALID -> FREE */
495 * The state might still be `BUSY`, or the drainer could have set it
496 * to `INVALID`. In either case, there's no point in CASing. Set
497 * it to `FREE` and start over.
499 entries [index].state = STAGE_ENTRY_FREE;
500 HEAVY_STAT (++stat_index_decremented);
506 SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
508 entries [index].obj = obj;
509 entries [index].user_data = user_data;
511 mono_memory_write_barrier ();
513 new_next_entry = *next_entry;
514 mono_memory_read_barrier ();
517 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
518 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
519 * latter case, we reset it to `FREE` and start over.
521 previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
522 if (previous_state == STAGE_ENTRY_BUSY) {
523 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
524 HEAVY_STAT (++stat_success);
528 SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
529 entries [index].obj = NULL;
530 entries [index].user_data = NULL;
531 mono_memory_write_barrier ();
532 /* INVALID -> FREE */
533 entries [index].state = STAGE_ENTRY_FREE;
535 HEAVY_STAT (++stat_entry_invalidated);
540 /* LOCKING: requires that the GC lock is held */
542 process_fin_stage_entry (GCObject *obj, void *user_data, int index)
544 if (ptr_in_nursery (obj))
545 register_for_finalization (obj, user_data, GENERATION_NURSERY);
547 register_for_finalization (obj, user_data, GENERATION_OLD);
550 /* LOCKING: requires that the GC lock is held */
552 sgen_process_fin_stage_entries (void)
554 lock_stage_for_processing (&next_fin_stage_entry);
555 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
559 sgen_object_register_for_finalization (GCObject *obj, void *user_data)
561 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
562 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
564 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
570 /* LOCKING: requires that the GC lock is held */
572 finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
575 gpointer dummy G_GNUC_UNUSED;
578 if (no_finalize || !out_size || !out_array)
581 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
582 object = tagged_object_get_object (object);
584 if (predicate (object, user_data)) {
585 /* remove and put in out_array */
586 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
587 out_array [count ++] = object;
588 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
589 if (count == out_size)
593 } SGEN_HASH_TABLE_FOREACH_END;
598 * sgen_gather_finalizers_if:
599 * @predicate: predicate function
600 * @user_data: predicate function data argument
601 * @out_array: output array
602 * @out_size: size of output array
604 * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
605 * of stored items. Can be called repeteadly until it returns 0.
607 * The items are removed from the finalizer data structure, so the caller is supposed
610 * @out_array me be on the stack, or registered as a root, to allow the GC to know the
611 * objects are still alive.
614 sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
619 sgen_process_fin_stage_entries ();
620 result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
621 if (result < out_size) {
622 result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
623 &major_finalizable_hash);
631 sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
633 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
635 gpointer dummy G_GNUC_UNUSED;
637 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
638 object = tagged_object_get_object (object);
640 if (predicate (object, user_data)) {
641 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
644 } SGEN_HASH_TABLE_FOREACH_END;
649 #ifdef HEAVY_STATISTICS
650 static volatile guint64 stat_gc_handles_allocated = 0;
651 static volatile guint64 stat_gc_handles_max_allocated = 0;
654 #define BUCKETS (32 - MONO_GC_HANDLE_TYPE_SHIFT)
655 #define MIN_BUCKET_BITS (5)
656 #define MIN_BUCKET_SIZE (1 << MIN_BUCKET_BITS)
659 * A table of GC handle data, implementing a simple lock-free bitmap allocator.
661 * 'entries' is an array of pointers to buckets of increasing size. The first
662 * bucket has size 'MIN_BUCKET_SIZE', and each bucket is twice the size of the
665 * |-------|-- MIN_BUCKET_SIZE
667 * [1] -> xxxxxxxxxxxxxxxx
668 * [2] -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
671 * The size of the spine, 'BUCKETS', is chosen so that the maximum number of
672 * entries is no less than the maximum index value of a GC handle.
674 * Each entry in a bucket is a pointer with two tag bits: if
675 * 'GC_HANDLE_OCCUPIED' returns true for a slot, then the slot is occupied; if
676 * so, then 'GC_HANDLE_VALID' gives whether the entry refers to a valid (1) or
677 * NULL (0) object reference. If the reference is valid, then the pointer is an
678 * object pointer. If the reference is NULL, and 'GC_HANDLE_TYPE_IS_WEAK' is
679 * true for 'type', then the pointer is a metadata pointer--this allows us to
680 * retrieve the domain ID of an expired weak reference in Mono.
682 * Finally, 'slot_hint' denotes the position of the last allocation, so that the
683 * whole array needn't be searched on every allocation.
687 volatile gpointer *volatile entries [BUCKETS];
688 volatile guint32 capacity;
689 volatile guint32 slot_hint;
694 bucket_size (guint index)
696 return 1 << (index + MIN_BUCKET_BITS);
699 /* Computes floor(log2(index + MIN_BUCKET_SIZE)) - 1, giving the index
700 * of the bucket containing a slot.
703 index_bucket (guint index)
706 return CHAR_BIT * sizeof (index) - __builtin_clz (index + MIN_BUCKET_SIZE) - 1 - MIN_BUCKET_BITS;
709 index += MIN_BUCKET_SIZE;
714 return count - 1 - MIN_BUCKET_BITS;
719 bucketize (guint index, guint *bucket, guint *offset)
721 *bucket = index_bucket (index);
722 *offset = index - bucket_size (*bucket) + MIN_BUCKET_SIZE;
725 static inline gboolean
726 try_set_slot (volatile gpointer *slot, GCObject *obj, gpointer old, GCHandleType type)
729 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_OBJECT_POINTER (obj, GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
730 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_METADATA_POINTER (sgen_client_default_metadata (), GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
733 /* Try to claim a slot by setting its occupied bit. */
734 static inline gboolean
735 try_occupy_slot (HandleData *handles, guint bucket, guint offset, GCObject *obj, gboolean track)
737 volatile gpointer *link_addr = &(handles->entries [bucket] [offset]);
738 if (MONO_GC_HANDLE_OCCUPIED (*link_addr))
740 return try_set_slot (link_addr, obj, NULL, handles->type);
743 #define EMPTY_HANDLE_DATA(type) { { NULL }, 0, 0, (type) }
745 /* weak and weak-track arrays will be allocated in malloc memory
747 static HandleData gc_handles [] = {
748 EMPTY_HANDLE_DATA (HANDLE_WEAK),
749 EMPTY_HANDLE_DATA (HANDLE_WEAK_TRACK),
750 EMPTY_HANDLE_DATA (HANDLE_NORMAL),
751 EMPTY_HANDLE_DATA (HANDLE_PINNED)
755 gc_handles_for_type (GCHandleType type)
757 g_assert (type < HANDLE_TYPE_MAX);
758 return &gc_handles [type];
761 /* This assumes that the world is stopped. */
763 sgen_mark_normal_gc_handles (void *addr, SgenUserMarkFunc mark_func, void *gc_data)
765 HandleData *handles = gc_handles_for_type (HANDLE_NORMAL);
766 size_t bucket, offset;
767 const guint max_bucket = index_bucket (handles->capacity);
768 for (bucket = 0; bucket < max_bucket; ++bucket) {
769 volatile gpointer *entries = handles->entries [bucket];
770 for (offset = 0; offset < bucket_size (bucket); ++offset) {
771 volatile gpointer *entry = &entries [offset];
772 gpointer hidden = *entry;
773 gpointer revealed = MONO_GC_REVEAL_POINTER (hidden, FALSE);
774 if (!MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden))
776 mark_func ((MonoObject **)&revealed, gc_data);
778 *entry = MONO_GC_HANDLE_OBJECT_POINTER (revealed, FALSE);
784 handle_data_find_unset (HandleData *handles, guint32 begin, guint32 end)
787 gint delta = begin < end ? +1 : -1;
788 for (index = begin; index < end; index += delta) {
789 guint bucket, offset;
790 volatile gpointer *entries;
791 bucketize (index, &bucket, &offset);
792 entries = handles->entries [bucket];
794 if (!MONO_GC_HANDLE_OCCUPIED (entries [offset]))
800 /* Adds a bucket if necessary and possible. */
802 handle_data_grow (HandleData *handles, guint32 old_capacity)
804 const guint new_bucket = index_bucket (old_capacity);
805 const guint32 growth = bucket_size (new_bucket);
806 const guint32 new_capacity = old_capacity + growth;
808 const size_t new_bucket_size = sizeof (**handles->entries) * growth;
809 if (handles->capacity >= new_capacity)
811 entries = g_malloc0 (new_bucket_size);
812 if (handles->type == HANDLE_PINNED)
813 sgen_register_root ((char *)entries, new_bucket_size, SGEN_DESCRIPTOR_NULL, ROOT_TYPE_PINNED, MONO_ROOT_SOURCE_GC_HANDLE, "pinned gc handles");
814 if (InterlockedCompareExchangePointer ((volatile gpointer *)&handles->entries [new_bucket], entries, NULL) == NULL) {
815 if (InterlockedCompareExchange ((volatile gint32 *)&handles->capacity, new_capacity, old_capacity) != old_capacity)
816 g_assert_not_reached ();
817 handles->slot_hint = old_capacity;
818 mono_memory_write_barrier ();
821 /* Someone beat us to the allocation. */
822 if (handles->type == HANDLE_PINNED)
823 sgen_deregister_root ((char *)entries);
828 alloc_handle (HandleData *handles, GCObject *obj, gboolean track)
832 guint bucket, offset;
835 if (!handles->capacity)
836 handle_data_grow (handles, 0);
838 capacity = handles->capacity;
839 slot_hint = handles->slot_hint;
840 index = handle_data_find_unset (handles, slot_hint, capacity);
842 index = handle_data_find_unset (handles, 0, slot_hint);
844 handle_data_grow (handles, capacity);
847 handles->slot_hint = index;
848 bucketize (index, &bucket, &offset);
849 if (!try_occupy_slot (handles, bucket, offset, obj, track))
851 #ifdef HEAVY_STATISTICS
852 InterlockedIncrement64 ((volatile gint64 *)&stat_gc_handles_allocated);
853 if (stat_gc_handles_allocated > stat_gc_handles_max_allocated)
854 stat_gc_handles_max_allocated = stat_gc_handles_allocated;
856 if (obj && MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type))
857 binary_protocol_dislink_add ((gpointer)&handles->entries [bucket] [offset], obj, track);
858 /* Ensure that a GC handle cannot be given to another thread without the slot having been set. */
859 mono_memory_write_barrier ();
860 res = MONO_GC_HANDLE (index, handles->type);
861 sgen_client_gchandle_created (handles->type, obj, res);
866 object_older_than (GCObject *object, int generation)
868 return generation == GENERATION_NURSERY && !sgen_ptr_in_nursery (object);
872 * Maps a function over all GC handles.
873 * This assumes that the world is stopped!
876 sgen_gchandle_iterate (GCHandleType handle_type, int max_generation, gpointer callback(gpointer, GCHandleType, int, gpointer), gpointer user)
878 HandleData *handle_data = gc_handles_for_type (handle_type);
879 size_t bucket, offset;
880 guint max_bucket = index_bucket (handle_data->capacity);
881 /* If a new bucket has been allocated, but the capacity has not yet been
882 * increased, nothing can yet have been allocated in the bucket because the
883 * world is stopped, so we shouldn't miss any handles during iteration.
885 for (bucket = 0; bucket < max_bucket; ++bucket) {
886 volatile gpointer *entries = handle_data->entries [bucket];
887 for (offset = 0; offset < bucket_size (bucket); ++offset) {
888 gpointer hidden = entries [offset];
890 /* Table must contain no garbage pointers. */
891 gboolean occupied = MONO_GC_HANDLE_OCCUPIED (hidden);
892 g_assert (hidden ? occupied : !occupied);
893 if (!occupied) // || !MONO_GC_HANDLE_VALID (hidden))
895 result = callback (hidden, handle_type, max_generation, user);
897 SGEN_ASSERT (0, MONO_GC_HANDLE_OCCUPIED (result), "Why did the callback return an unoccupied entry?");
898 // FIXME: add the dislink_update protocol call here
900 // FIXME: enable this for weak links
901 //binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
902 HEAVY_STAT (InterlockedDecrement64 ((volatile gint64 *)&stat_gc_handles_allocated));
904 entries [offset] = result;
911 * @obj: managed object to get a handle for
912 * @pinned: whether the object should be pinned
914 * This returns a handle that wraps the object, this is used to keep a
915 * reference to a managed object from the unmanaged world and preventing the
916 * object from being disposed.
918 * If @pinned is false the address of the object can not be obtained, if it is
919 * true the address of the object can be obtained. This will also pin the
920 * object so it will not be possible by a moving garbage collector to move the
923 * Returns: a handle that can be used to access the object from
927 mono_gchandle_new (GCObject *obj, gboolean pinned)
929 return alloc_handle (gc_handles_for_type (pinned ? HANDLE_PINNED : HANDLE_NORMAL), obj, FALSE);
933 * mono_gchandle_new_weakref:
934 * @obj: managed object to get a handle for
935 * @pinned: whether the object should be pinned
937 * This returns a weak handle that wraps the object, this is used to
938 * keep a reference to a managed object from the unmanaged world.
939 * Unlike the mono_gchandle_new the object can be reclaimed by the
940 * garbage collector. In this case the value of the GCHandle will be
943 * If @pinned is false the address of the object can not be obtained, if it is
944 * true the address of the object can be obtained. This will also pin the
945 * object so it will not be possible by a moving garbage collector to move the
948 * Returns: a handle that can be used to access the object from
952 mono_gchandle_new_weakref (GCObject *obj, gboolean track_resurrection)
954 return alloc_handle (gc_handles_for_type (track_resurrection ? HANDLE_WEAK_TRACK : HANDLE_WEAK), obj, track_resurrection);
958 link_get (volatile gpointer *link_addr, gboolean is_weak)
960 void *volatile *link_addr_volatile;
964 link_addr_volatile = link_addr;
965 ptr = (void*)*link_addr_volatile;
967 * At this point we have a hidden pointer. If the GC runs
968 * here, it will not recognize the hidden pointer as a
969 * reference, and if the object behind it is not referenced
970 * elsewhere, it will be freed. Once the world is restarted
971 * we reveal the pointer, giving us a pointer to a freed
972 * object. To make sure we don't return it, we load the
973 * hidden pointer again. If it's still the same, we can be
974 * sure the object reference is valid.
976 if (ptr && MONO_GC_HANDLE_IS_OBJECT_POINTER (ptr))
977 obj = (GCObject *)MONO_GC_REVEAL_POINTER (ptr, is_weak);
983 * If a GC happens here, obj needs to be on the stack or in a
984 * register, so we need to prevent this from being reordered
987 mono_gc_dummy_use (obj);
988 mono_memory_barrier ();
991 sgen_client_ensure_weak_gchandles_accessible ();
993 if ((void*)*link_addr_volatile != ptr)
1000 * mono_gchandle_get_target:
1001 * @gchandle: a GCHandle's handle.
1003 * The handle was previously created by calling mono_gchandle_new or
1004 * mono_gchandle_new_weakref.
1006 * Returns a pointer to the MonoObject represented by the handle or
1007 * NULL for a collected object if using a weakref handle.
1010 mono_gchandle_get_target (guint32 gchandle)
1012 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1013 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1014 HandleData *handles = gc_handles_for_type (type);
1015 guint bucket, offset;
1016 g_assert (index < handles->capacity);
1017 bucketize (index, &bucket, &offset);
1018 return link_get (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
1022 sgen_gchandle_set_target (guint32 gchandle, GCObject *obj)
1024 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1025 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1026 HandleData *handles = gc_handles_for_type (type);
1027 gboolean track = handles->type == HANDLE_WEAK_TRACK;
1028 guint bucket, offset;
1031 g_assert (index < handles->capacity);
1032 bucketize (index, &bucket, &offset);
1035 slot = handles->entries [bucket] [offset];
1036 g_assert (MONO_GC_HANDLE_OCCUPIED (slot));
1037 if (!try_set_slot (&handles->entries [bucket] [offset], obj, slot, MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type)))
1039 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot))
1040 binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], track);
1042 binary_protocol_dislink_add ((gpointer)&handles->entries [bucket] [offset], obj, track);
1046 mono_gchandle_slot_metadata (volatile gpointer *slot_addr, gboolean is_weak)
1052 if (!MONO_GC_HANDLE_OCCUPIED (slot))
1054 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
1055 GCObject *obj = MONO_GC_REVEAL_POINTER (slot, is_weak);
1056 /* See note [dummy use]. */
1057 mono_gc_dummy_use (obj);
1059 * FIXME: The compiler could technically not carry a reference to obj around
1060 * at this point and recompute it later, in which case we would still use
1063 if (*slot_addr != slot)
1065 return sgen_client_metadata_for_object (obj);
1067 metadata = MONO_GC_REVEAL_POINTER (slot, is_weak);
1068 /* See note [dummy use]. */
1069 mono_gc_dummy_use (metadata);
1070 if (*slot_addr != slot)
1076 sgen_gchandle_get_metadata (guint32 gchandle)
1078 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1079 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1080 HandleData *handles = gc_handles_for_type (type);
1081 guint bucket, offset;
1082 if (index >= handles->capacity)
1084 bucketize (index, &bucket, &offset);
1085 return mono_gchandle_slot_metadata (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
1089 * mono_gchandle_free:
1090 * @gchandle: a GCHandle's handle.
1092 * Frees the @gchandle handle. If there are no outstanding
1093 * references, the garbage collector can reclaim the memory of the
1097 mono_gchandle_free (guint32 gchandle)
1099 guint index = MONO_GC_HANDLE_SLOT (gchandle);
1100 guint type = MONO_GC_HANDLE_TYPE (gchandle);
1101 HandleData *handles = gc_handles_for_type (type);
1102 guint bucket, offset;
1103 bucketize (index, &bucket, &offset);
1104 if (index < handles->capacity && MONO_GC_HANDLE_OCCUPIED (handles->entries [bucket] [offset])) {
1105 if (MONO_GC_HANDLE_TYPE_IS_WEAK (handles->type))
1106 binary_protocol_dislink_remove ((gpointer)&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
1107 handles->entries [bucket] [offset] = NULL;
1108 HEAVY_STAT (InterlockedDecrement64 ((volatile gint64 *)&stat_gc_handles_allocated));
1110 /* print a warning? */
1112 sgen_client_gchandle_destroyed (handles->type, gchandle);
1116 * Returns whether to remove the link from its hash.
1119 null_link_if_necessary (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
1121 const gboolean is_weak = GC_HANDLE_TYPE_IS_WEAK (handle_type);
1122 ScanCopyContext *ctx = (ScanCopyContext *)user;
1126 if (!MONO_GC_HANDLE_VALID (hidden))
1129 obj = MONO_GC_REVEAL_POINTER (hidden, MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type));
1130 SGEN_ASSERT (0, obj, "Why is the hidden pointer NULL?");
1132 if (object_older_than (obj, max_generation))
1135 if (major_collector.is_object_live (obj))
1138 /* Clear link if object is ready for finalization. This check may be redundant wrt is_object_live(). */
1139 if (sgen_gc_is_object_ready_for_finalization (obj))
1140 return MONO_GC_HANDLE_METADATA_POINTER (sgen_client_metadata_for_object (obj), is_weak);
1142 ctx->ops->copy_or_mark_object (©, ctx->queue);
1144 /* binary_protocol_dislink_update (hidden_entry, copy, handle_type == HANDLE_WEAK_TRACK); */
1147 ctx->ops->copy_or_mark_object (©, ctx->queue);
1148 SGEN_ASSERT (0, copy, "Why couldn't we copy the object?");
1149 /* Update link if object was moved. */
1150 return MONO_GC_HANDLE_OBJECT_POINTER (copy, is_weak);
1153 /* LOCKING: requires that the GC lock is held */
1155 sgen_null_link_in_range (int generation, ScanCopyContext ctx, gboolean track)
1157 sgen_gchandle_iterate (track ? HANDLE_WEAK_TRACK : HANDLE_WEAK, generation, null_link_if_necessary, &ctx);
1161 SgenObjectPredicateFunc predicate;
1163 } WeakLinkAlivePredicateClosure;
1166 null_link_if (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
1168 /* Strictly speaking, function pointers are not guaranteed to have the same size as data pointers. */
1169 WeakLinkAlivePredicateClosure *closure = (WeakLinkAlivePredicateClosure *)user;
1172 if (!MONO_GC_HANDLE_VALID (hidden))
1175 obj = MONO_GC_REVEAL_POINTER (hidden, MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type));
1176 SGEN_ASSERT (0, obj, "Why is the hidden pointer NULL?");
1178 if (object_older_than (obj, max_generation))
1181 if (closure->predicate (obj, closure->data))
1187 /* LOCKING: requires that the GC lock is held */
1189 sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation, gboolean track)
1191 WeakLinkAlivePredicateClosure closure = { predicate, data };
1192 sgen_gchandle_iterate (track ? HANDLE_WEAK_TRACK : HANDLE_WEAK, generation, null_link_if, &closure);
1196 sgen_init_fin_weak_hash (void)
1198 #ifdef HEAVY_STATISTICS
1199 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
1200 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
1201 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
1202 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
1203 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
1204 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
1206 mono_counters_register ("GC handles allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gc_handles_allocated);
1207 mono_counters_register ("max GC handles allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gc_handles_max_allocated);
1211 #endif /* HAVE_SGEN_GC */