2 * metadata/gc.c: GC icalls.
4 * Author: Paolo Molaro <lupus@ximian.com>
6 * Copyright 2002-2003 Ximian, Inc (http://www.ximian.com)
7 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
8 * Copyright 2012 Xamarin Inc (http://www.xamarin.com)
15 #include <mono/metadata/gc-internal.h>
16 #include <mono/metadata/mono-gc.h>
17 #include <mono/metadata/threads.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/exception.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/metadata/domain-internals.h>
22 #include <mono/metadata/class-internals.h>
23 #include <mono/metadata/metadata-internals.h>
24 #include <mono/metadata/mono-mlist.h>
25 #include <mono/metadata/threads-types.h>
26 #include <mono/metadata/threadpool-ms.h>
27 #include <mono/sgen/sgen-conf.h>
28 #include <mono/sgen/sgen-gc.h>
29 #include <mono/utils/mono-logger-internal.h>
30 #include <mono/metadata/gc-internal.h>
31 #include <mono/metadata/marshal.h> /* for mono_delegate_free_ftnptr () */
32 #include <mono/metadata/attach.h>
33 #include <mono/metadata/console-io.h>
34 #include <mono/utils/mono-semaphore.h>
35 #include <mono/utils/mono-memory-model.h>
36 #include <mono/utils/mono-counters.h>
37 #include <mono/utils/mono-time.h>
38 #include <mono/utils/dtrace.h>
39 #include <mono/utils/mono-threads.h>
40 #include <mono/utils/atomic.h>
46 typedef struct DomainFinalizationReq {
49 } DomainFinalizationReq;
51 static gboolean gc_disabled = FALSE;
53 static gboolean finalizing_root_domain = FALSE;
55 gboolean log_finalizers = FALSE;
56 gboolean do_not_finalize = FALSE;
58 #define mono_finalizer_lock() mono_mutex_lock (&finalizer_mutex)
59 #define mono_finalizer_unlock() mono_mutex_unlock (&finalizer_mutex)
60 static mono_mutex_t finalizer_mutex;
61 static mono_mutex_t reference_queue_mutex;
63 static GSList *domains_to_finalize= NULL;
64 static MonoMList *threads_to_finalize = NULL;
66 static gboolean finalizer_thread_exited;
67 /* Uses finalizer_mutex */
68 static mono_cond_t exited_cond;
70 static MonoInternalThread *gc_thread;
72 static void object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*));
74 static void mono_gchandle_set_target (guint32 gchandle, MonoObject *obj);
76 static void reference_queue_proccess_all (void);
77 static void mono_reference_queue_cleanup (void);
78 static void reference_queue_clear_for_domain (MonoDomain *domain);
79 static HANDLE pending_done_event;
82 guarded_wait (HANDLE handle, guint32 timeout, gboolean alertable)
86 MONO_PREPARE_BLOCKING;
87 result = WaitForSingleObjectEx (handle, timeout, alertable);
94 add_thread_to_finalize (MonoInternalThread *thread)
96 mono_finalizer_lock ();
97 if (!threads_to_finalize)
98 MONO_GC_REGISTER_ROOT_SINGLE (threads_to_finalize, MONO_ROOT_SOURCE_FINALIZER_QUEUE, "finalizable threads list");
99 threads_to_finalize = mono_mlist_append (threads_to_finalize, (MonoObject*)thread);
100 mono_finalizer_unlock ();
103 static gboolean suspend_finalizers = FALSE;
105 * actually, we might want to queue the finalize requests in a separate thread,
106 * but we need to be careful about the execution domain of the thread...
109 mono_gc_run_finalize (void *obj, void *data)
114 MonoObject *exc = NULL;
119 MonoMethod* finalizer = NULL;
120 MonoDomain *caller_domain = mono_domain_get ();
122 RuntimeInvokeFunction runtime_invoke;
124 // This function is called from the innards of the GC, so our best alternative for now is to do polling here
125 MONO_SUSPEND_CHECK ();
127 o = (MonoObject*)((char*)obj + GPOINTER_TO_UINT (data));
130 g_log ("mono-gc-finalizers", G_LOG_LEVEL_DEBUG, "<%s at %p> Starting finalizer checks.", o->vtable->klass->name, o);
132 if (suspend_finalizers)
135 domain = o->vtable->domain;
138 mono_domain_finalizers_lock (domain);
140 o2 = g_hash_table_lookup (domain->finalizable_objects_hash, o);
142 mono_domain_finalizers_unlock (domain);
145 /* Already finalized somehow */
149 /* make sure the finalizer is not called again if the object is resurrected */
150 object_register_finalizer (obj, NULL);
153 g_log ("mono-gc-finalizers", G_LOG_LEVEL_MESSAGE, "<%s at %p> Registered finalizer as processed.", o->vtable->klass->name, o);
155 if (o->vtable->klass == mono_defaults.internal_thread_class) {
156 MonoInternalThread *t = (MonoInternalThread*)o;
158 if (mono_gc_is_finalizer_internal_thread (t))
159 /* Avoid finalizing ourselves */
162 if (t->threadpool_thread && finalizing_root_domain) {
163 /* Don't finalize threadpool threads when
164 shutting down - they're finalized when the
165 threadpool shuts down. */
166 add_thread_to_finalize (t);
171 if (o->vtable->klass->image == mono_defaults.corlib && !strcmp (o->vtable->klass->name, "DynamicMethod") && finalizing_root_domain) {
173 * These can't be finalized during unloading/shutdown, since that would
174 * free the native code which can still be referenced by other
176 * FIXME: This is not perfect, objects dying at the same time as
177 * dynamic methods can still reference them even when !shutdown.
182 if (mono_runtime_get_no_exec ())
185 /* speedup later... and use a timeout */
186 /* g_print ("Finalize run on %p %s.%s\n", o, mono_object_class (o)->name_space, mono_object_class (o)->name); */
188 /* Use _internal here, since this thread can enter a doomed appdomain */
189 mono_domain_set_internal (mono_object_domain (o));
191 /* delegates that have a native function pointer allocated are
192 * registered for finalization, but they don't have a Finalize
193 * method, because in most cases it's not needed and it's just a waste.
195 if (o->vtable->klass->delegate) {
196 MonoDelegate* del = (MonoDelegate*)o;
197 if (del->delegate_trampoline)
198 mono_delegate_free_ftnptr ((MonoDelegate*)o);
199 mono_domain_set_internal (caller_domain);
203 finalizer = mono_class_get_finalizer (o->vtable->klass);
205 /* If object has a CCW but has no finalizer, it was only
206 * registered for finalization in order to free the CCW.
207 * Else it needs the regular finalizer run.
208 * FIXME: what to do about ressurection and suppression
209 * of finalizer on object with CCW.
211 if (mono_marshal_free_ccw (o) && !finalizer) {
212 mono_domain_set_internal (caller_domain);
217 * To avoid the locking plus the other overhead of mono_runtime_invoke (),
218 * create and precompile a wrapper which calls the finalize method using
222 g_log ("mono-gc-finalizers", G_LOG_LEVEL_MESSAGE, "<%s at %p> Compiling finalizer.", o->vtable->klass->name, o);
224 if (!domain->finalize_runtime_invoke) {
225 MonoMethod *invoke = mono_marshal_get_runtime_invoke (mono_class_get_method_from_name_flags (mono_defaults.object_class, "Finalize", 0, 0), TRUE);
227 domain->finalize_runtime_invoke = mono_compile_method (invoke);
230 runtime_invoke = domain->finalize_runtime_invoke;
232 mono_runtime_class_init (o->vtable);
234 if (G_UNLIKELY (MONO_GC_FINALIZE_INVOKE_ENABLED ())) {
235 MONO_GC_FINALIZE_INVOKE ((unsigned long)o, mono_object_get_size (o),
236 o->vtable->klass->name_space, o->vtable->klass->name);
240 g_log ("mono-gc-finalizers", G_LOG_LEVEL_MESSAGE, "<%s at %p> Calling finalizer.", o->vtable->klass->name, o);
242 runtime_invoke (o, NULL, &exc, NULL);
245 g_log ("mono-gc-finalizers", G_LOG_LEVEL_MESSAGE, "<%s at %p> Returned from finalizer.", o->vtable->klass->name, o);
248 mono_thread_internal_unhandled_exception (exc);
250 mono_domain_set_internal (caller_domain);
254 mono_gc_finalize_threadpool_threads (void)
256 while (threads_to_finalize) {
257 MonoInternalThread *thread = (MonoInternalThread*) mono_mlist_get_data (threads_to_finalize);
259 /* Force finalization of the thread. */
260 thread->threadpool_thread = FALSE;
261 mono_object_register_finalizer ((MonoObject*)thread);
263 mono_gc_run_finalize (thread, NULL);
265 threads_to_finalize = mono_mlist_next (threads_to_finalize);
270 mono_gc_out_of_memory (size_t size)
273 * we could allocate at program startup some memory that we could release
274 * back to the system at this point if we're really low on memory (ie, size is
275 * lower than the memory we set apart)
277 mono_raise_exception (mono_domain_get ()->out_of_memory_ex);
283 * Some of our objects may point to a different address than the address returned by GC_malloc()
284 * (because of the GetHashCode hack), but we need to pass the real address to register_finalizer.
285 * This also means that in the callback we need to adjust the pointer to get back the real
287 * We also need to be consistent in the use of the GC_debug* variants of malloc and register_finalizer,
288 * since that, too, can cause the underlying pointer to be offset.
291 object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*))
296 mono_raise_exception (mono_get_exception_argument_null ("obj"));
298 domain = obj->vtable->domain;
301 if (mono_domain_is_unloading (domain) && (callback != NULL))
303 * Can't register finalizers in a dying appdomain, since they
304 * could be invoked after the appdomain has been unloaded.
308 mono_domain_finalizers_lock (domain);
311 g_hash_table_insert (domain->finalizable_objects_hash, obj, obj);
313 g_hash_table_remove (domain->finalizable_objects_hash, obj);
315 mono_domain_finalizers_unlock (domain);
317 mono_gc_register_for_finalization (obj, callback);
318 #elif defined(HAVE_SGEN_GC)
320 * If we register finalizers for domains that are unloading we might
321 * end up running them while or after the domain is being cleared, so
322 * the objects will not be valid anymore.
324 if (!mono_domain_is_unloading (domain)) {
326 mono_gc_register_for_finalization (obj, callback);
327 MONO_FINISH_TRY_BLOCKING;
333 * mono_object_register_finalizer:
334 * @obj: object to register
336 * Records that object @obj has a finalizer, this will call the
337 * Finalize method when the garbage collector disposes the object.
341 mono_object_register_finalizer (MonoObject *obj)
343 /* g_print ("Registered finalizer on %p %s.%s\n", obj, mono_object_class (obj)->name_space, mono_object_class (obj)->name); */
344 object_register_finalizer (obj, mono_gc_run_finalize);
348 * mono_domain_finalize:
349 * @domain: the domain to finalize
350 * @timeout: msects to wait for the finalization to complete, -1 to wait indefinitely
352 * Request finalization of all finalizable objects inside @domain. Wait
353 * @timeout msecs for the finalization to complete.
355 * Returns: TRUE if succeeded, FALSE if there was a timeout
359 mono_domain_finalize (MonoDomain *domain, guint32 timeout)
361 DomainFinalizationReq *req;
364 MonoInternalThread *thread = mono_thread_internal_current ();
366 #if defined(__native_client__)
370 if (mono_thread_internal_current () == gc_thread)
371 /* We are called from inside a finalizer, not much we can do here */
375 * No need to create another thread 'cause the finalizer thread
376 * is still working and will take care of running the finalizers
382 /* We don't support domain finalization without a GC */
383 if (mono_gc_is_null ())
386 mono_gc_collect (mono_gc_max_generation ());
388 done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
389 if (done_event == NULL) {
393 req = g_new0 (DomainFinalizationReq, 1);
394 req->domain = domain;
395 req->done_event = done_event;
397 if (domain == mono_get_root_domain ())
398 finalizing_root_domain = TRUE;
400 mono_finalizer_lock ();
402 domains_to_finalize = g_slist_append (domains_to_finalize, req);
404 mono_finalizer_unlock ();
406 /* Tell the finalizer thread to finalize this appdomain */
407 mono_gc_finalize_notify ();
413 res = guarded_wait (done_event, timeout, TRUE);
414 /* printf ("WAIT RES: %d.\n", res); */
416 if (res == WAIT_IO_COMPLETION) {
417 if ((thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0)
419 } else if (res == WAIT_TIMEOUT) {
420 /* We leak the handle here */
427 CloseHandle (done_event);
429 if (domain == mono_get_root_domain ()) {
430 mono_threadpool_ms_cleanup ();
431 mono_gc_finalize_threadpool_threads ();
438 ves_icall_System_GC_InternalCollect (int generation)
440 mono_gc_collect (generation);
444 ves_icall_System_GC_GetTotalMemory (MonoBoolean forceCollection)
447 mono_gc_collect (mono_gc_max_generation ());
448 return mono_gc_get_used_size ();
452 ves_icall_System_GC_KeepAlive (MonoObject *obj)
460 ves_icall_System_GC_ReRegisterForFinalize (MonoObject *obj)
462 MONO_CHECK_ARG_NULL (obj,);
464 object_register_finalizer (obj, mono_gc_run_finalize);
468 ves_icall_System_GC_SuppressFinalize (MonoObject *obj)
470 MONO_CHECK_ARG_NULL (obj,);
472 /* delegates have no finalizers, but we register them to deal with the
473 * unmanaged->managed trampoline. We don't let the user suppress it
474 * otherwise we'd leak it.
476 if (obj->vtable->klass->delegate)
479 /* FIXME: Need to handle case where obj has COM Callable Wrapper
480 * generated for it that needs cleaned up, but user wants to suppress
481 * their derived object finalizer. */
483 object_register_finalizer (obj, NULL);
487 ves_icall_System_GC_WaitForPendingFinalizers (void)
489 if (mono_gc_is_null ())
492 if (!mono_gc_pending_finalizers ())
495 if (mono_thread_internal_current () == gc_thread)
496 /* Avoid deadlocks */
500 If the finalizer thread is not live, lets pretend no finalizers are pending since the current thread might
501 be the one responsible for starting it up.
503 if (gc_thread == NULL)
506 ResetEvent (pending_done_event);
507 mono_gc_finalize_notify ();
508 /* g_print ("Waiting for pending finalizers....\n"); */
509 guarded_wait (pending_done_event, INFINITE, TRUE);
510 /* g_print ("Done pending....\n"); */
514 ves_icall_System_GC_register_ephemeron_array (MonoObject *array)
517 if (!mono_gc_ephemeron_array_add (array)) {
518 mono_set_pending_exception (mono_object_domain (array)->out_of_memory_ex);
525 ves_icall_System_GC_get_ephemeron_tombstone (void)
527 return mono_domain_get ()->ephemeron_tombstone;
530 #define mono_allocator_lock() mono_mutex_lock (&allocator_section)
531 #define mono_allocator_unlock() mono_mutex_unlock (&allocator_section)
532 static mono_mutex_t allocator_section;
534 #define GC_HANDLE_TYPE_SHIFT (3)
535 #define GC_HANDLE_TYPE_MASK ((1 << GC_HANDLE_TYPE_SHIFT) - 1)
536 #define GC_HANDLE_TYPE(x) (((x) & GC_HANDLE_TYPE_MASK) - 1)
537 #define GC_HANDLE_INDEX(x) ((x) >> GC_HANDLE_TYPE_SHIFT)
538 #define GC_HANDLE(index, type) (((index) << GC_HANDLE_TYPE_SHIFT) | (((type) & GC_HANDLE_TYPE_MASK) + 1))
541 ves_icall_System_GCHandle_GetTarget (guint32 handle)
543 return mono_gchandle_get_target (handle);
547 * if type == -1, change the target of the handle, otherwise allocate a new handle.
550 ves_icall_System_GCHandle_GetTargetHandle (MonoObject *obj, guint32 handle, gint32 type)
553 mono_gchandle_set_target (handle, obj);
554 /* the handle doesn't change */
559 return mono_gchandle_new_weakref (obj, FALSE);
560 case HANDLE_WEAK_TRACK:
561 return mono_gchandle_new_weakref (obj, TRUE);
563 return mono_gchandle_new (obj, FALSE);
565 return mono_gchandle_new (obj, TRUE);
567 g_assert_not_reached ();
573 ves_icall_System_GCHandle_FreeHandle (guint32 handle)
575 mono_gchandle_free (handle);
579 ves_icall_System_GCHandle_GetAddrOfPinnedObject (guint32 handle)
583 if (GC_HANDLE_TYPE (handle) != HANDLE_PINNED)
585 obj = mono_gchandle_get_target (handle);
587 MonoClass *klass = mono_object_class (obj);
588 if (klass == mono_defaults.string_class) {
589 return mono_string_chars ((MonoString*)obj);
590 } else if (klass->rank) {
591 return mono_array_addr ((MonoArray*)obj, char, 0);
593 /* the C# code will check and throw the exception */
594 /* FIXME: missing !klass->blittable test, see bug #61134 */
595 if ((klass->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_AUTO_LAYOUT)
597 return (char*)obj + sizeof (MonoObject);
604 ves_icall_Mono_Runtime_SetGCAllowSynchronousMajor (MonoBoolean flag)
606 return mono_gc_set_allow_synchronous_major (flag);
609 #define BUCKETS (32 - GC_HANDLE_TYPE_SHIFT)
610 #define MIN_BUCKET_BITS (5)
611 #define MIN_BUCKET_SIZE (1 << MIN_BUCKET_BITS)
614 * A table of GC handle data, implementing a simple lock-free bitmap allocator.
616 * 'entries' is an array of pointers to buckets of increasing size. The first
617 * bucket has size 'MIN_BUCKET_SIZE', and each bucket is twice the size of the
620 * |-------|-- MIN_BUCKET_SIZE
622 * [1] -> xxxxxxxxxxxxxxxx
623 * [2] -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
626 * The size of the spine, 'BUCKETS', is chosen so that the maximum number of
627 * entries is no less than the maximum index value of a GC handle.
629 * Each entry in a bucket is a pointer with two tag bits: if
630 * 'GC_HANDLE_OCCUPIED' returns true for a slot, then the slot is occupied; if
631 * so, then 'GC_HANDLE_VALID' gives whether the entry refers to a valid (1) or
632 * NULL (0) object reference. If the reference is valid, then the pointer is an
633 * object pointer. If the reference is NULL, and 'GC_HANDLE_TYPE_IS_WEAK' is
634 * true for 'type', then the pointer is a domain pointer--this allows us to
635 * retrieve the domain ID of an expired weak reference.
637 * Finally, 'slot_hint' denotes the position of the last allocation, so that the
638 * whole array needn't be searched on every allocation.
642 volatile gpointer *volatile entries [BUCKETS];
643 volatile guint32 capacity;
644 volatile guint32 slot_hint;
649 bucket_size (guint index)
651 return 1 << (index + MIN_BUCKET_BITS);
654 /* Computes floor(log2(index + MIN_BUCKET_SIZE)) - 1, giving the index
655 * of the bucket containing a slot.
658 index_bucket (guint index)
661 return CHAR_BIT * sizeof (index) - __builtin_clz (index + MIN_BUCKET_SIZE) - 1 - MIN_BUCKET_BITS;
664 index += MIN_BUCKET_SIZE;
669 return count - 1 - MIN_BUCKET_BITS;
674 bucketize (guint index, guint *bucket, guint *offset)
676 *bucket = index_bucket (index);
677 *offset = index - bucket_size (*bucket) + MIN_BUCKET_SIZE;
680 static inline gboolean
681 try_set_slot (volatile gpointer *slot, MonoObject *obj, gpointer old, GCHandleType type)
684 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_OBJECT_POINTER (obj, GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
685 return InterlockedCompareExchangePointer (slot, MONO_GC_HANDLE_DOMAIN_POINTER (mono_domain_get (), GC_HANDLE_TYPE_IS_WEAK (type)), old) == old;
688 /* Try to claim a slot by setting its occupied bit. */
689 static inline gboolean
690 try_occupy_slot (HandleData *handles, guint bucket, guint offset, MonoObject *obj, gboolean track)
692 volatile gpointer *link_addr = &(handles->entries [bucket] [offset]);
693 if (MONO_GC_HANDLE_OCCUPIED (*link_addr))
695 return try_set_slot (link_addr, obj, NULL, handles->type);
698 #define EMPTY_HANDLE_DATA(type) { { NULL }, 0, 0, (type) }
700 /* weak and weak-track arrays will be allocated in malloc memory
702 static HandleData gc_handles [] = {
703 EMPTY_HANDLE_DATA (HANDLE_WEAK),
704 EMPTY_HANDLE_DATA (HANDLE_WEAK_TRACK),
705 EMPTY_HANDLE_DATA (HANDLE_NORMAL),
706 EMPTY_HANDLE_DATA (HANDLE_PINNED)
710 gc_handles_for_type (GCHandleType type)
712 g_assert (type < HANDLE_TYPE_MAX);
713 return &gc_handles [type];
717 mark_gc_handles (void *addr, MonoGCMarkFunc mark_func, void *gc_data)
719 HandleData *handles = gc_handles_for_type (HANDLE_NORMAL);
720 size_t bucket, offset;
721 const guint max_bucket = index_bucket (handles->capacity);
722 for (bucket = 0; bucket < max_bucket; ++bucket) {
723 volatile gpointer *entries = handles->entries [bucket];
724 for (offset = 0; offset < bucket_size (bucket); ++offset) {
725 volatile gpointer *entry = &entries [offset];
726 gpointer hidden = *entry;
727 gpointer revealed = MONO_GC_REVEAL_POINTER (hidden, FALSE);
728 if (!MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden))
730 mark_func ((MonoObject **)&revealed, gc_data);
732 *entry = MONO_GC_HANDLE_OBJECT_POINTER (revealed, FALSE);
738 handle_data_find_unset (HandleData *handles, guint32 begin, guint32 end)
741 gint delta = begin < end ? +1 : -1;
742 for (index = begin; index < end; index += delta) {
743 guint bucket, offset;
744 volatile gpointer *entries;
745 bucketize (index, &bucket, &offset);
746 entries = handles->entries [bucket];
748 if (!MONO_GC_HANDLE_OCCUPIED (entries [offset]))
754 /* Adds a bucket if necessary and possible. */
756 handle_data_grow (HandleData *handles, guint32 old_capacity)
758 const guint new_bucket = index_bucket (old_capacity);
759 const guint32 growth = bucket_size (new_bucket);
760 const guint32 new_capacity = old_capacity + growth;
762 const size_t new_bucket_size = sizeof (**handles->entries) * growth;
763 if (handles->capacity >= new_capacity)
765 entries = g_malloc0 (new_bucket_size);
767 if (!GC_HANDLE_TYPE_IS_WEAK (handles->type))
768 mono_gc_register_root ((char *)entries, new_bucket_size, handles->type == HANDLE_PINNED ? NULL : mono_gc_make_root_descr_all_refs (new_bucket_size * CHAR_BIT), MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table");
771 if (handles->type == HANDLE_PINNED)
772 mono_gc_register_root ((char *)entries, new_bucket_size, MONO_GC_DESCRIPTOR_NULL, MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table");
774 if (InterlockedCompareExchangePointer ((volatile gpointer *)&handles->entries [new_bucket], entries, NULL) == NULL) {
775 if (InterlockedCompareExchange ((volatile gint32 *)&handles->capacity, new_capacity, old_capacity) != old_capacity)
776 g_assert_not_reached ();
777 handles->slot_hint = old_capacity;
778 mono_memory_write_barrier ();
781 /* Someone beat us to the allocation. */
783 mono_gc_deregister_root ((char *)entries);
789 alloc_handle (HandleData *handles, MonoObject *obj, gboolean track)
793 guint bucket, offset;
796 if (!handles->capacity)
797 handle_data_grow (handles, 0);
799 capacity = handles->capacity;
800 slot_hint = handles->slot_hint;
801 index = handle_data_find_unset (handles, slot_hint, capacity);
803 index = handle_data_find_unset (handles, 0, slot_hint);
805 handle_data_grow (handles, capacity);
808 handles->slot_hint = index;
809 bucketize (index, &bucket, &offset);
810 if (!try_occupy_slot (handles, bucket, offset, obj, track))
812 if (obj && GC_HANDLE_TYPE_IS_WEAK (handles->type))
813 mono_gc_weak_link_register (&handles->entries [bucket] [offset], obj, track);
814 /* Ensure that a GC handle cannot be given to another thread without the slot having been set. */
815 mono_memory_write_barrier ();
816 #ifndef DISABLE_PERFCOUNTERS
817 mono_perfcounters->gc_num_handles++;
819 res = GC_HANDLE (index, handles->type);
820 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handles->type, res, obj);
825 * Maps a function over all GC handles.
826 * This assumes that the world is stopped!
829 mono_gchandle_iterate (GCHandleType handle_type, int max_generation, gpointer callback(gpointer, GCHandleType, gpointer), gpointer user)
831 HandleData *handle_data = gc_handles_for_type (handle_type);
832 size_t bucket, offset;
833 guint max_bucket = index_bucket (handle_data->capacity);
834 /* If a new bucket has been allocated, but the capacity has not yet been
835 * increased, nothing can yet have been allocated in the bucket because the
836 * world is stopped, so we shouldn't miss any handles during iteration.
838 for (bucket = 0; bucket < max_bucket; ++bucket) {
839 volatile gpointer *entries = handle_data->entries [bucket];
840 for (offset = 0; offset < bucket_size (bucket); ++offset) {
841 gpointer hidden = entries [offset];
844 /* Table must contain no garbage pointers. */
845 gboolean occupied = MONO_GC_HANDLE_OCCUPIED (hidden);
846 g_assert (hidden ? occupied : !occupied);
847 if (!occupied || !MONO_GC_HANDLE_VALID (hidden))
849 revealed = MONO_GC_REVEAL_POINTER (hidden, GC_HANDLE_TYPE_IS_WEAK (handle_type));
851 if (mono_gc_object_older_than (revealed, max_generation))
853 result = callback (revealed, handle_type, user);
855 g_assert (MONO_GC_HANDLE_OCCUPIED (result));
856 entries [offset] = result;
863 * @obj: managed object to get a handle for
864 * @pinned: whether the object should be pinned
866 * This returns a handle that wraps the object, this is used to keep a
867 * reference to a managed object from the unmanaged world and preventing the
868 * object from being disposed.
870 * If @pinned is false the address of the object can not be obtained, if it is
871 * true the address of the object can be obtained. This will also pin the
872 * object so it will not be possible by a moving garbage collector to move the
875 * Returns: a handle that can be used to access the object from
879 mono_gchandle_new (MonoObject *obj, gboolean pinned)
881 return alloc_handle (gc_handles_for_type (pinned ? HANDLE_PINNED : HANDLE_NORMAL), obj, FALSE);
885 * mono_gchandle_new_weakref:
886 * @obj: managed object to get a handle for
887 * @pinned: whether the object should be pinned
889 * This returns a weak handle that wraps the object, this is used to
890 * keep a reference to a managed object from the unmanaged world.
891 * Unlike the mono_gchandle_new the object can be reclaimed by the
892 * garbage collector. In this case the value of the GCHandle will be
895 * If @pinned is false the address of the object can not be obtained, if it is
896 * true the address of the object can be obtained. This will also pin the
897 * object so it will not be possible by a moving garbage collector to move the
900 * Returns: a handle that can be used to access the object from
904 mono_gchandle_new_weakref (MonoObject *obj, gboolean track_resurrection)
906 guint32 handle = alloc_handle (gc_handles_for_type (track_resurrection ? HANDLE_WEAK_TRACK : HANDLE_WEAK), obj, track_resurrection);
912 link_get (volatile gpointer *link_addr, gboolean is_weak)
914 void *volatile *link_addr_volatile;
918 link_addr_volatile = link_addr;
919 ptr = (void*)*link_addr_volatile;
921 * At this point we have a hidden pointer. If the GC runs
922 * here, it will not recognize the hidden pointer as a
923 * reference, and if the object behind it is not referenced
924 * elsewhere, it will be freed. Once the world is restarted
925 * we reveal the pointer, giving us a pointer to a freed
926 * object. To make sure we don't return it, we load the
927 * hidden pointer again. If it's still the same, we can be
928 * sure the object reference is valid.
930 if (ptr && MONO_GC_HANDLE_IS_OBJECT_POINTER (ptr))
931 obj = (MonoObject *)MONO_GC_REVEAL_POINTER (ptr, is_weak);
937 * If a GC happens here, obj needs to be on the stack or in a
938 * register, so we need to prevent this from being reordered
941 mono_gc_dummy_use (obj);
942 mono_memory_barrier ();
945 mono_gc_ensure_weak_links_accessible ();
947 if ((void*)*link_addr_volatile != ptr)
954 * mono_gchandle_get_target:
955 * @gchandle: a GCHandle's handle.
957 * The handle was previously created by calling mono_gchandle_new or
958 * mono_gchandle_new_weakref.
960 * Returns a pointer to the MonoObject represented by the handle or
961 * NULL for a collected object if using a weakref handle.
964 mono_gchandle_get_target (guint32 gchandle)
966 guint index = GC_HANDLE_INDEX (gchandle);
967 guint type = GC_HANDLE_TYPE (gchandle);
968 HandleData *handles = gc_handles_for_type (type);
969 guint bucket, offset;
970 g_assert (index < handles->capacity);
971 bucketize (index, &bucket, &offset);
972 return link_get (&handles->entries [bucket] [offset], GC_HANDLE_TYPE_IS_WEAK (type));
976 mono_gchandle_set_target (guint32 gchandle, MonoObject *obj)
978 guint index = GC_HANDLE_INDEX (gchandle);
979 guint type = GC_HANDLE_TYPE (gchandle);
980 HandleData *handles = gc_handles_for_type (type);
981 gboolean track = handles->type == HANDLE_WEAK_TRACK;
982 guint bucket, offset;
985 g_assert (index < handles->capacity);
986 bucketize (index, &bucket, &offset);
989 slot = handles->entries [bucket] [offset];
990 g_assert (MONO_GC_HANDLE_OCCUPIED (slot));
991 if (!try_set_slot (&handles->entries [bucket] [offset], obj, slot, GC_HANDLE_TYPE_IS_WEAK (handles->type)))
993 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot))
994 mono_gc_weak_link_unregister (&handles->entries [bucket] [offset], track);
996 mono_gc_weak_link_register (&handles->entries [bucket] [offset], obj, track);
1000 mono_gchandle_slot_domain (volatile gpointer *slot_addr, gboolean is_weak)
1006 if (!MONO_GC_HANDLE_OCCUPIED (slot))
1008 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
1009 MonoObject *obj = MONO_GC_REVEAL_POINTER (slot, is_weak);
1010 /* See note [dummy use]. */
1011 mono_gc_dummy_use (obj);
1012 if (*slot_addr != slot)
1014 return mono_object_domain (obj);
1016 domain = MONO_GC_REVEAL_POINTER (slot, is_weak);
1017 /* See note [dummy use]. */
1018 mono_gc_dummy_use (domain);
1019 if (*slot_addr != slot)
1025 gchandle_domain (guint32 gchandle) {
1026 guint index = GC_HANDLE_INDEX (gchandle);
1027 guint type = GC_HANDLE_TYPE (gchandle);
1028 HandleData *handles = gc_handles_for_type (type);
1029 guint bucket, offset;
1030 if (index >= handles->capacity)
1032 bucketize (index, &bucket, &offset);
1033 return mono_gchandle_slot_domain (&handles->entries [bucket] [offset], GC_HANDLE_TYPE_IS_WEAK (type));
1037 * mono_gchandle_is_in_domain:
1038 * @gchandle: a GCHandle's handle.
1039 * @domain: An application domain.
1041 * Returns: true if the object wrapped by the @gchandle belongs to the specific @domain.
1044 mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
1046 return domain->domain_id == gchandle_domain (gchandle)->domain_id;
1050 * mono_gchandle_free:
1051 * @gchandle: a GCHandle's handle.
1053 * Frees the @gchandle handle. If there are no outstanding
1054 * references, the garbage collector can reclaim the memory of the
1058 mono_gchandle_free (guint32 gchandle)
1060 guint index = GC_HANDLE_INDEX (gchandle);
1061 guint type = GC_HANDLE_TYPE (gchandle);
1062 HandleData *handles = gc_handles_for_type (type);
1063 guint bucket, offset;
1064 bucketize (index, &bucket, &offset);
1065 if (index < handles->capacity && MONO_GC_HANDLE_OCCUPIED (handles->entries [bucket] [offset])) {
1066 if (GC_HANDLE_TYPE_IS_WEAK (handles->type))
1067 mono_gc_weak_link_unregister (&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
1068 handles->entries [bucket] [offset] = NULL;
1070 /* print a warning? */
1072 #ifndef DISABLE_PERFCOUNTERS
1073 mono_perfcounters->gc_num_handles--;
1075 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handles->type, gchandle, NULL);
1079 * mono_gchandle_free_domain:
1080 * @unloading: domain that is unloading
1082 * Function used internally to cleanup any GC handle for objects belonging
1083 * to the specified domain during appdomain unload.
1086 mono_gchandle_free_domain (MonoDomain *unloading)
1089 /* All non-pinned handle types. */
1090 for (type = HANDLE_TYPE_MIN; type < HANDLE_PINNED; ++type) {
1091 const gboolean is_weak = GC_HANDLE_TYPE_IS_WEAK (type);
1093 HandleData *handles = gc_handles_for_type (type);
1094 guint32 capacity = handles->capacity;
1095 for (index = 0; index < capacity; ++index) {
1096 guint bucket, offset;
1098 bucketize (index, &bucket, &offset);
1099 MonoObject *obj = NULL;
1101 volatile gpointer *slot_addr = &handles->entries [bucket] [offset];
1102 /* NB: This should have the same behavior as mono_gchandle_slot_domain(). */
1105 if (!MONO_GC_HANDLE_OCCUPIED (slot))
1107 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
1108 obj = MONO_GC_REVEAL_POINTER (slot, is_weak);
1109 if (*slot_addr != slot)
1111 domain = mono_object_domain (obj);
1113 domain = MONO_GC_REVEAL_POINTER (slot, is_weak);
1115 if (unloading->domain_id == domain->domain_id) {
1116 if (GC_HANDLE_TYPE_IS_WEAK (type) && MONO_GC_REVEAL_POINTER (slot, is_weak))
1117 mono_gc_weak_link_unregister (&handles->entries [bucket] [offset], handles->type == HANDLE_WEAK_TRACK);
1120 /* See note [dummy use]. */
1121 mono_gc_dummy_use (obj);
1128 mono_gc_GCHandle_CheckCurrentDomain (guint32 gchandle)
1130 return mono_gchandle_is_in_domain (gchandle, mono_domain_get ());
1133 #ifdef MONO_HAS_SEMAPHORES
1134 static MonoSemType finalizer_sem;
1136 static HANDLE finalizer_event;
1137 static volatile gboolean finished=FALSE;
1140 mono_gc_finalize_notify (void)
1143 g_message ( "%s: prodding finalizer", __func__);
1146 if (mono_gc_is_null ())
1149 #ifdef MONO_HAS_SEMAPHORES
1150 MONO_SEM_POST (&finalizer_sem);
1152 SetEvent (finalizer_event);
1156 #ifdef HAVE_BOEHM_GC
1159 collect_objects (gpointer key, gpointer value, gpointer user_data)
1161 GPtrArray *arr = (GPtrArray*)user_data;
1162 g_ptr_array_add (arr, key);
1168 * finalize_domain_objects:
1170 * Run the finalizers of all finalizable objects in req->domain.
1173 finalize_domain_objects (DomainFinalizationReq *req)
1175 MonoDomain *domain = req->domain;
1178 #define NUM_FOBJECTS 64
1179 MonoObject *to_finalize [NUM_FOBJECTS];
1183 /* Process finalizers which are already in the queue */
1184 mono_gc_invoke_finalizers ();
1186 #ifdef HAVE_BOEHM_GC
1187 while (g_hash_table_size (domain->finalizable_objects_hash) > 0) {
1191 * Since the domain is unloading, nobody is allowed to put
1192 * new entries into the hash table. But finalize_object might
1193 * remove entries from the hash table, so we make a copy.
1195 objs = g_ptr_array_new ();
1196 g_hash_table_foreach (domain->finalizable_objects_hash, collect_objects, objs);
1197 /* printf ("FINALIZING %d OBJECTS.\n", objs->len); */
1199 for (i = 0; i < objs->len; ++i) {
1200 MonoObject *o = (MonoObject*)g_ptr_array_index (objs, i);
1201 /* FIXME: Avoid finalizing threads, etc */
1202 mono_gc_run_finalize (o, 0);
1205 g_ptr_array_free (objs, TRUE);
1207 #elif defined(HAVE_SGEN_GC)
1208 while ((count = mono_gc_finalizers_for_domain (domain, to_finalize, NUM_FOBJECTS))) {
1210 for (i = 0; i < count; ++i) {
1211 mono_gc_run_finalize (to_finalize [i], 0);
1216 /* cleanup the reference queue */
1217 reference_queue_clear_for_domain (domain);
1219 /* printf ("DONE.\n"); */
1220 SetEvent (req->done_event);
1222 /* The event is closed in mono_domain_finalize if we get here */
1227 finalizer_thread (gpointer unused)
1229 gboolean wait = TRUE;
1232 /* Wait to be notified that there's at least one
1236 g_assert (mono_domain_get () == mono_get_root_domain ());
1237 mono_gc_set_skip_thread (TRUE);
1238 MONO_PREPARE_BLOCKING;
1241 /* An alertable wait is required so this thread can be suspended on windows */
1242 #ifdef MONO_HAS_SEMAPHORES
1243 MONO_SEM_WAIT_ALERTABLE (&finalizer_sem, TRUE);
1245 WaitForSingleObjectEx (finalizer_event, INFINITE, TRUE);
1249 MONO_FINISH_BLOCKING;
1250 mono_gc_set_skip_thread (FALSE);
1252 mono_threads_perform_thread_dump ();
1254 mono_console_handle_async_ops ();
1256 mono_attach_maybe_start ();
1258 if (domains_to_finalize) {
1259 mono_finalizer_lock ();
1260 if (domains_to_finalize) {
1261 DomainFinalizationReq *req = domains_to_finalize->data;
1262 domains_to_finalize = g_slist_remove (domains_to_finalize, req);
1263 mono_finalizer_unlock ();
1265 finalize_domain_objects (req);
1267 mono_finalizer_unlock ();
1271 /* If finished == TRUE, mono_gc_cleanup has been called (from mono_runtime_cleanup),
1272 * before the domain is unloaded.
1274 mono_gc_invoke_finalizers ();
1276 mono_threads_join_threads ();
1278 reference_queue_proccess_all ();
1280 #ifdef MONO_HAS_SEMAPHORES
1281 /* Avoid posting the pending done event until there are pending finalizers */
1282 if (MONO_SEM_TIMEDWAIT (&finalizer_sem, 0) == 0)
1283 /* Don't wait again at the start of the loop */
1286 SetEvent (pending_done_event);
1288 SetEvent (pending_done_event);
1292 mono_finalizer_lock ();
1293 finalizer_thread_exited = TRUE;
1294 mono_cond_signal (&exited_cond);
1295 mono_finalizer_unlock ();
1300 #ifndef LAZY_GC_THREAD_CREATION
1304 mono_gc_init_finalizer_thread (void)
1306 gc_thread = mono_thread_create_internal (mono_domain_get (), finalizer_thread, NULL, FALSE, 0);
1307 ves_icall_System_Threading_Thread_SetName_internal (gc_thread, mono_string_new (mono_domain_get (), "Finalizer"));
1313 mono_mutex_init_recursive (&allocator_section);
1315 mono_mutex_init_recursive (&finalizer_mutex);
1316 mono_mutex_init_recursive (&reference_queue_mutex);
1318 mono_counters_register ("Minor GC collections", MONO_COUNTER_GC | MONO_COUNTER_UINT, &gc_stats.minor_gc_count);
1319 mono_counters_register ("Major GC collections", MONO_COUNTER_GC | MONO_COUNTER_UINT, &gc_stats.major_gc_count);
1320 mono_counters_register ("Minor GC time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &gc_stats.minor_gc_time);
1321 mono_counters_register ("Major GC time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &gc_stats.major_gc_time);
1322 mono_counters_register ("Major GC time concurrent", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &gc_stats.major_gc_time_concurrent);
1324 mono_gc_base_init ();
1327 mono_gc_register_root ((char *)&gc_handles [0], sizeof (gc_handles), mono_gc_make_root_descr_user (mark_gc_handles), MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table");
1330 if (mono_gc_is_disabled ()) {
1335 finalizer_event = CreateEvent (NULL, FALSE, FALSE, NULL);
1336 g_assert (finalizer_event);
1337 pending_done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1338 g_assert (pending_done_event);
1339 mono_cond_init (&exited_cond, 0);
1340 #ifdef MONO_HAS_SEMAPHORES
1341 MONO_SEM_INIT (&finalizer_sem, 0);
1344 #ifndef LAZY_GC_THREAD_CREATION
1345 mono_gc_init_finalizer_thread ();
1350 mono_gc_cleanup (void)
1353 g_message ("%s: cleaning up finalizer", __func__);
1356 if (mono_gc_is_null ())
1361 if (mono_thread_internal_current () != gc_thread) {
1362 gboolean timed_out = FALSE;
1363 guint32 start_ticks = mono_msec_ticks ();
1364 guint32 end_ticks = start_ticks + 2000;
1366 mono_gc_finalize_notify ();
1367 /* Finishing the finalizer thread, so wait a little bit... */
1368 /* MS seems to wait for about 2 seconds */
1369 while (!finalizer_thread_exited) {
1370 guint32 current_ticks = mono_msec_ticks ();
1373 if (current_ticks >= end_ticks)
1376 timeout = end_ticks - current_ticks;
1377 MONO_PREPARE_BLOCKING;
1378 mono_finalizer_lock ();
1379 if (!finalizer_thread_exited)
1380 mono_cond_timedwait_ms (&exited_cond, &finalizer_mutex, timeout);
1381 mono_finalizer_unlock ();
1382 MONO_FINISH_BLOCKING;
1385 if (!finalizer_thread_exited) {
1388 /* Set a flag which the finalizer thread can check */
1389 suspend_finalizers = TRUE;
1391 /* Try to abort the thread, in the hope that it is running managed code */
1392 mono_thread_internal_stop (gc_thread);
1394 /* Wait for it to stop */
1395 ret = guarded_wait (gc_thread->handle, 100, TRUE);
1397 if (ret == WAIT_TIMEOUT) {
1399 * The finalizer thread refused to die. There is not much we
1400 * can do here, since the runtime is shutting down so the
1401 * state the finalizer thread depends on will vanish.
1403 g_warning ("Shutting down finalizer thread timed out.");
1411 /* Wait for the thread to actually exit */
1412 ret = guarded_wait (gc_thread->handle, INFINITE, TRUE);
1413 g_assert (ret == WAIT_OBJECT_0);
1415 mono_thread_join (GUINT_TO_POINTER (gc_thread->tid));
1419 mono_gc_base_cleanup ();
1422 mono_reference_queue_cleanup ();
1424 mono_mutex_destroy (&allocator_section);
1425 mono_mutex_destroy (&finalizer_mutex);
1426 mono_mutex_destroy (&reference_queue_mutex);
1430 mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread)
1432 return thread == gc_thread;
1436 * mono_gc_is_finalizer_thread:
1437 * @thread: the thread to test.
1439 * In Mono objects are finalized asynchronously on a separate thread.
1440 * This routine tests whether the @thread argument represents the
1441 * finalization thread.
1443 * Returns true if @thread is the finalization thread.
1446 mono_gc_is_finalizer_thread (MonoThread *thread)
1448 return mono_gc_is_finalizer_internal_thread (thread->internal_thread);
1451 #if defined(__MACH__)
1452 static pthread_t mach_exception_thread;
1455 mono_gc_register_mach_exception_thread (pthread_t thread)
1457 mach_exception_thread = thread;
1461 mono_gc_get_mach_exception_thread (void)
1463 return mach_exception_thread;
1467 #ifndef HAVE_SGEN_GC
1469 mono_gc_alloc_mature (MonoVTable *vtable)
1471 return mono_object_new_specific (vtable);
1476 static MonoReferenceQueue *ref_queues;
1479 ref_list_remove_element (RefQueueEntry **prev, RefQueueEntry *element)
1482 /* Guard if head is changed concurrently. */
1483 while (*prev != element)
1484 prev = &(*prev)->next;
1485 } while (prev && InterlockedCompareExchangePointer ((void*)prev, element->next, element) != element);
1489 ref_list_push (RefQueueEntry **head, RefQueueEntry *value)
1491 RefQueueEntry *current;
1494 value->next = current;
1495 STORE_STORE_FENCE; /*Must make sure the previous store is visible before the CAS. */
1496 } while (InterlockedCompareExchangePointer ((void*)head, value, current) != current);
1500 reference_queue_proccess (MonoReferenceQueue *queue)
1502 RefQueueEntry **iter = &queue->queue;
1503 RefQueueEntry *entry;
1504 while ((entry = *iter)) {
1505 if (queue->should_be_deleted || !mono_gchandle_get_target (entry->gchandle)) {
1506 mono_gchandle_free ((guint32)entry->gchandle);
1507 ref_list_remove_element (iter, entry);
1508 queue->callback (entry->user_data);
1511 iter = &entry->next;
1517 reference_queue_proccess_all (void)
1519 MonoReferenceQueue **iter;
1520 MonoReferenceQueue *queue = ref_queues;
1521 for (; queue; queue = queue->next)
1522 reference_queue_proccess (queue);
1525 mono_mutex_lock (&reference_queue_mutex);
1526 for (iter = &ref_queues; *iter;) {
1528 if (!queue->should_be_deleted) {
1529 iter = &queue->next;
1533 mono_mutex_unlock (&reference_queue_mutex);
1534 reference_queue_proccess (queue);
1537 *iter = queue->next;
1540 mono_mutex_unlock (&reference_queue_mutex);
1544 mono_reference_queue_cleanup (void)
1546 MonoReferenceQueue *queue = ref_queues;
1547 for (; queue; queue = queue->next)
1548 queue->should_be_deleted = TRUE;
1549 reference_queue_proccess_all ();
1553 reference_queue_clear_for_domain (MonoDomain *domain)
1555 MonoReferenceQueue *queue = ref_queues;
1556 for (; queue; queue = queue->next) {
1557 RefQueueEntry **iter = &queue->queue;
1558 RefQueueEntry *entry;
1559 while ((entry = *iter)) {
1560 if (entry->domain == domain) {
1561 mono_gchandle_free ((guint32)entry->gchandle);
1562 ref_list_remove_element (iter, entry);
1563 queue->callback (entry->user_data);
1566 iter = &entry->next;
1572 * mono_gc_reference_queue_new:
1573 * @callback callback used when processing collected entries.
1575 * Create a new reference queue used to process collected objects.
1576 * A reference queue let you add a pair of (managed object, user data)
1577 * using the mono_gc_reference_queue_add method.
1579 * Once the managed object is collected @callback will be called
1580 * in the finalizer thread with 'user data' as argument.
1582 * The callback is called from the finalizer thread without any locks held.
1583 * When a AppDomain is unloaded, all callbacks for objects belonging to it
1586 * @returns the new queue.
1589 mono_gc_reference_queue_new (mono_reference_queue_callback callback)
1591 MonoReferenceQueue *res = g_new0 (MonoReferenceQueue, 1);
1592 res->callback = callback;
1594 mono_mutex_lock (&reference_queue_mutex);
1595 res->next = ref_queues;
1597 mono_mutex_unlock (&reference_queue_mutex);
1603 * mono_gc_reference_queue_add:
1604 * @queue the queue to add the reference to.
1605 * @obj the object to be watched for collection
1606 * @user_data parameter to be passed to the queue callback
1608 * Queue an object to be watched for collection, when the @obj is
1609 * collected, the callback that was registered for the @queue will
1610 * be invoked with @user_data as argument.
1612 * @returns false if the queue is scheduled to be freed.
1615 mono_gc_reference_queue_add (MonoReferenceQueue *queue, MonoObject *obj, void *user_data)
1617 RefQueueEntry *entry;
1618 if (queue->should_be_deleted)
1621 entry = g_new0 (RefQueueEntry, 1);
1622 entry->user_data = user_data;
1623 entry->domain = mono_object_domain (obj);
1625 entry->gchandle = mono_gchandle_new_weakref (obj, TRUE);
1626 mono_object_register_finalizer (obj);
1628 ref_list_push (&queue->queue, entry);
1633 * mono_gc_reference_queue_free:
1634 * @queue the queue that should be freed.
1636 * This operation signals that @queue should be freed. This operation is deferred
1637 * as it happens on the finalizer thread.
1639 * After this call, no further objects can be queued. It's the responsibility of the
1640 * caller to make sure that no further attempt to access queue will be made.
1643 mono_gc_reference_queue_free (MonoReferenceQueue *queue)
1645 queue->should_be_deleted = TRUE;