2 * metadata/gc.c: GC icalls.
4 * Author: Paolo Molaro <lupus@ximian.com>
6 * Copyright 2002-2003 Ximian, Inc (http://www.ximian.com)
7 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
8 * Copyright 2012 Xamarin Inc (http://www.xamarin.com)
16 #include <mono/metadata/gc-internal.h>
17 #include <mono/metadata/mono-gc.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/metadata/tabledefs.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/domain-internals.h>
23 #include <mono/metadata/class-internals.h>
24 #include <mono/metadata/metadata-internals.h>
25 #include <mono/metadata/mono-mlist.h>
26 #include <mono/metadata/threadpool.h>
27 #include <mono/metadata/threadpool-internals.h>
28 #include <mono/metadata/threads-types.h>
29 #include <mono/utils/mono-logger-internal.h>
30 #include <mono/metadata/gc-internal.h>
31 #include <mono/metadata/marshal.h> /* for mono_delegate_free_ftnptr () */
32 #include <mono/metadata/attach.h>
33 #include <mono/metadata/console-io.h>
34 #include <mono/utils/mono-semaphore.h>
35 #include <mono/utils/mono-memory-model.h>
36 #include <mono/utils/mono-counters.h>
37 #include <mono/utils/dtrace.h>
38 #include <mono/utils/mono-threads.h>
39 #include <mono/utils/atomic.h>
45 typedef struct DomainFinalizationReq {
48 } DomainFinalizationReq;
50 #ifdef PLATFORM_WINCE /* FIXME: add accessors to gc.dll API */
51 extern void (*__imp_GC_finalizer_notifier)(void);
52 #define GC_finalizer_notifier __imp_GC_finalizer_notifier
53 extern int __imp_GC_finalize_on_demand;
54 #define GC_finalize_on_demand __imp_GC_finalize_on_demand
57 static gboolean gc_disabled = FALSE;
59 static gboolean finalizing_root_domain = FALSE;
61 #define mono_finalizer_lock() EnterCriticalSection (&finalizer_mutex)
62 #define mono_finalizer_unlock() LeaveCriticalSection (&finalizer_mutex)
63 static CRITICAL_SECTION finalizer_mutex;
64 static CRITICAL_SECTION reference_queue_mutex;
66 static GSList *domains_to_finalize= NULL;
67 static MonoMList *threads_to_finalize = NULL;
69 static MonoInternalThread *gc_thread;
71 static void object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*));
73 static void mono_gchandle_set_target (guint32 gchandle, MonoObject *obj);
75 static void reference_queue_proccess_all (void);
76 static void mono_reference_queue_cleanup (void);
77 static void reference_queue_clear_for_domain (MonoDomain *domain);
79 static HANDLE pending_done_event;
80 static HANDLE shutdown_event;
86 add_thread_to_finalize (MonoInternalThread *thread)
88 mono_finalizer_lock ();
89 if (!threads_to_finalize)
90 MONO_GC_REGISTER_ROOT_SINGLE (threads_to_finalize);
91 threads_to_finalize = mono_mlist_append (threads_to_finalize, (MonoObject*)thread);
92 mono_finalizer_unlock ();
95 static gboolean suspend_finalizers = FALSE;
97 * actually, we might want to queue the finalize requests in a separate thread,
98 * but we need to be careful about the execution domain of the thread...
101 mono_gc_run_finalize (void *obj, void *data)
103 MonoObject *exc = NULL;
108 MonoMethod* finalizer = NULL;
109 MonoDomain *caller_domain = mono_domain_get ();
111 RuntimeInvokeFunction runtime_invoke;
113 o = (MonoObject*)((char*)obj + GPOINTER_TO_UINT (data));
115 if (suspend_finalizers)
118 domain = o->vtable->domain;
121 mono_domain_finalizers_lock (domain);
123 o2 = g_hash_table_lookup (domain->finalizable_objects_hash, o);
125 mono_domain_finalizers_unlock (domain);
128 /* Already finalized somehow */
132 /* make sure the finalizer is not called again if the object is resurrected */
133 object_register_finalizer (obj, NULL);
135 if (o->vtable->klass == mono_defaults.internal_thread_class) {
136 MonoInternalThread *t = (MonoInternalThread*)o;
138 if (mono_gc_is_finalizer_internal_thread (t))
139 /* Avoid finalizing ourselves */
142 if (t->threadpool_thread && finalizing_root_domain) {
143 /* Don't finalize threadpool threads when
144 shutting down - they're finalized when the
145 threadpool shuts down. */
146 add_thread_to_finalize (t);
151 if (o->vtable->klass->image == mono_defaults.corlib && !strcmp (o->vtable->klass->name, "DynamicMethod") && finalizing_root_domain) {
153 * These can't be finalized during unloading/shutdown, since that would
154 * free the native code which can still be referenced by other
156 * FIXME: This is not perfect, objects dying at the same time as
157 * dynamic methods can still reference them even when !shutdown.
162 if (mono_runtime_get_no_exec ())
165 /* speedup later... and use a timeout */
166 /* g_print ("Finalize run on %p %s.%s\n", o, mono_object_class (o)->name_space, mono_object_class (o)->name); */
168 /* Use _internal here, since this thread can enter a doomed appdomain */
169 mono_domain_set_internal (mono_object_domain (o));
171 /* delegates that have a native function pointer allocated are
172 * registered for finalization, but they don't have a Finalize
173 * method, because in most cases it's not needed and it's just a waste.
175 if (o->vtable->klass->delegate) {
176 MonoDelegate* del = (MonoDelegate*)o;
177 if (del->delegate_trampoline)
178 mono_delegate_free_ftnptr ((MonoDelegate*)o);
179 mono_domain_set_internal (caller_domain);
183 finalizer = mono_class_get_finalizer (o->vtable->klass);
186 /* If object has a CCW but has no finalizer, it was only
187 * registered for finalization in order to free the CCW.
188 * Else it needs the regular finalizer run.
189 * FIXME: what to do about ressurection and suppression
190 * of finalizer on object with CCW.
192 if (mono_marshal_free_ccw (o) && !finalizer) {
193 mono_domain_set_internal (caller_domain);
199 * To avoid the locking plus the other overhead of mono_runtime_invoke (),
200 * create and precompile a wrapper which calls the finalize method using
203 if (!domain->finalize_runtime_invoke) {
204 MonoMethod *invoke = mono_marshal_get_runtime_invoke (mono_class_get_method_from_name_flags (mono_defaults.object_class, "Finalize", 0, 0), TRUE);
206 domain->finalize_runtime_invoke = mono_compile_method (invoke);
209 runtime_invoke = domain->finalize_runtime_invoke;
211 mono_runtime_class_init (o->vtable);
213 if (G_UNLIKELY (MONO_GC_FINALIZE_INVOKE_ENABLED ())) {
214 MONO_GC_FINALIZE_INVOKE ((unsigned long)o, mono_object_get_size (o),
215 o->vtable->klass->name_space, o->vtable->klass->name);
218 runtime_invoke (o, NULL, &exc, NULL);
221 mono_internal_thread_unhandled_exception (exc);
223 mono_domain_set_internal (caller_domain);
227 mono_gc_finalize_threadpool_threads (void)
229 while (threads_to_finalize) {
230 MonoInternalThread *thread = (MonoInternalThread*) mono_mlist_get_data (threads_to_finalize);
232 /* Force finalization of the thread. */
233 thread->threadpool_thread = FALSE;
234 mono_object_register_finalizer ((MonoObject*)thread);
236 mono_gc_run_finalize (thread, NULL);
238 threads_to_finalize = mono_mlist_next (threads_to_finalize);
243 mono_gc_out_of_memory (size_t size)
246 * we could allocate at program startup some memory that we could release
247 * back to the system at this point if we're really low on memory (ie, size is
248 * lower than the memory we set apart)
250 mono_raise_exception (mono_domain_get ()->out_of_memory_ex);
256 * Some of our objects may point to a different address than the address returned by GC_malloc()
257 * (because of the GetHashCode hack), but we need to pass the real address to register_finalizer.
258 * This also means that in the callback we need to adjust the pointer to get back the real
260 * We also need to be consistent in the use of the GC_debug* variants of malloc and register_finalizer,
261 * since that, too, can cause the underlying pointer to be offset.
264 object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*))
271 mono_raise_exception (mono_get_exception_argument_null ("obj"));
273 domain = obj->vtable->domain;
276 /* This assertion is not valid when GC_DEBUG is defined */
277 g_assert (GC_base (obj) == (char*)obj - offset);
280 if (mono_domain_is_unloading (domain) && (callback != NULL))
282 * Can't register finalizers in a dying appdomain, since they
283 * could be invoked after the appdomain has been unloaded.
287 mono_domain_finalizers_lock (domain);
290 g_hash_table_insert (domain->finalizable_objects_hash, obj, obj);
292 g_hash_table_remove (domain->finalizable_objects_hash, obj);
294 mono_domain_finalizers_unlock (domain);
296 GC_REGISTER_FINALIZER_NO_ORDER ((char*)obj - offset, callback, GUINT_TO_POINTER (offset), NULL, NULL);
297 #elif defined(HAVE_SGEN_GC)
299 mono_raise_exception (mono_get_exception_argument_null ("obj"));
302 * If we register finalizers for domains that are unloading we might
303 * end up running them while or after the domain is being cleared, so
304 * the objects will not be valid anymore.
306 if (!mono_domain_is_unloading (obj->vtable->domain))
307 mono_gc_register_for_finalization (obj, callback);
312 * mono_object_register_finalizer:
313 * @obj: object to register
315 * Records that object @obj has a finalizer, this will call the
316 * Finalize method when the garbage collector disposes the object.
320 mono_object_register_finalizer (MonoObject *obj)
322 /* g_print ("Registered finalizer on %p %s.%s\n", obj, mono_object_class (obj)->name_space, mono_object_class (obj)->name); */
323 object_register_finalizer (obj, mono_gc_run_finalize);
327 * mono_domain_finalize:
328 * @domain: the domain to finalize
329 * @timeout: msects to wait for the finalization to complete, -1 to wait indefinitely
331 * Request finalization of all finalizable objects inside @domain. Wait
332 * @timeout msecs for the finalization to complete.
334 * Returns: TRUE if succeeded, FALSE if there was a timeout
338 mono_domain_finalize (MonoDomain *domain, guint32 timeout)
340 DomainFinalizationReq *req;
343 MonoInternalThread *thread = mono_thread_internal_current ();
345 #if defined(__native_client__)
349 if (mono_thread_internal_current () == gc_thread)
350 /* We are called from inside a finalizer, not much we can do here */
354 * No need to create another thread 'cause the finalizer thread
355 * is still working and will take care of running the finalizers
362 mono_gc_collect (mono_gc_max_generation ());
364 done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
365 if (done_event == NULL) {
369 req = g_new0 (DomainFinalizationReq, 1);
370 req->domain = domain;
371 req->done_event = done_event;
373 if (domain == mono_get_root_domain ())
374 finalizing_root_domain = TRUE;
376 mono_finalizer_lock ();
378 domains_to_finalize = g_slist_append (domains_to_finalize, req);
380 mono_finalizer_unlock ();
382 /* Tell the finalizer thread to finalize this appdomain */
383 mono_gc_finalize_notify ();
389 res = WaitForSingleObjectEx (done_event, timeout, TRUE);
390 /* printf ("WAIT RES: %d.\n", res); */
392 if (res == WAIT_IO_COMPLETION) {
393 if ((thread->state & (ThreadState_StopRequested | ThreadState_SuspendRequested)) != 0)
395 } else if (res == WAIT_TIMEOUT) {
396 /* We leak the handle here */
403 CloseHandle (done_event);
405 if (domain == mono_get_root_domain ()) {
406 mono_thread_pool_cleanup ();
407 mono_gc_finalize_threadpool_threads ();
412 /* We don't support domain finalization without a GC */
418 ves_icall_System_GC_InternalCollect (int generation)
420 mono_gc_collect (generation);
424 ves_icall_System_GC_GetTotalMemory (MonoBoolean forceCollection)
429 mono_gc_collect (mono_gc_max_generation ());
430 return mono_gc_get_used_size ();
434 ves_icall_System_GC_KeepAlive (MonoObject *obj)
444 ves_icall_System_GC_ReRegisterForFinalize (MonoObject *obj)
447 mono_raise_exception (mono_get_exception_argument_null ("obj"));
449 object_register_finalizer (obj, mono_gc_run_finalize);
453 ves_icall_System_GC_SuppressFinalize (MonoObject *obj)
456 mono_raise_exception (mono_get_exception_argument_null ("obj"));
458 /* delegates have no finalizers, but we register them to deal with the
459 * unmanaged->managed trampoline. We don't let the user suppress it
460 * otherwise we'd leak it.
462 if (obj->vtable->klass->delegate)
465 /* FIXME: Need to handle case where obj has COM Callable Wrapper
466 * generated for it that needs cleaned up, but user wants to suppress
467 * their derived object finalizer. */
469 object_register_finalizer (obj, NULL);
473 ves_icall_System_GC_WaitForPendingFinalizers (void)
476 if (!mono_gc_pending_finalizers ())
479 if (mono_thread_internal_current () == gc_thread)
480 /* Avoid deadlocks */
484 If the finalizer thread is not live, lets pretend no finalizers are pending since the current thread might
485 be the one responsible for starting it up.
487 if (gc_thread == NULL)
490 ResetEvent (pending_done_event);
491 mono_gc_finalize_notify ();
492 /* g_print ("Waiting for pending finalizers....\n"); */
493 WaitForSingleObjectEx (pending_done_event, INFINITE, TRUE);
494 /* g_print ("Done pending....\n"); */
499 ves_icall_System_GC_register_ephemeron_array (MonoObject *array)
502 if (!mono_gc_ephemeron_array_add (array))
503 mono_raise_exception (mono_object_domain (array)->out_of_memory_ex);
508 ves_icall_System_GC_get_ephemeron_tombstone (void)
510 return mono_domain_get ()->ephemeron_tombstone;
513 #define mono_allocator_lock() EnterCriticalSection (&allocator_section)
514 #define mono_allocator_unlock() LeaveCriticalSection (&allocator_section)
515 static CRITICAL_SECTION allocator_section;
516 static CRITICAL_SECTION handle_section;
525 static HandleType mono_gchandle_get_type (guint32 gchandle);
528 ves_icall_System_GCHandle_GetTarget (guint32 handle)
530 return mono_gchandle_get_target (handle);
534 * if type == -1, change the target of the handle, otherwise allocate a new handle.
537 ves_icall_System_GCHandle_GetTargetHandle (MonoObject *obj, guint32 handle, gint32 type)
540 mono_gchandle_set_target (handle, obj);
541 /* the handle doesn't change */
546 return mono_gchandle_new_weakref (obj, FALSE);
547 case HANDLE_WEAK_TRACK:
548 return mono_gchandle_new_weakref (obj, TRUE);
550 return mono_gchandle_new (obj, FALSE);
552 return mono_gchandle_new (obj, TRUE);
554 g_assert_not_reached ();
560 ves_icall_System_GCHandle_FreeHandle (guint32 handle)
562 mono_gchandle_free (handle);
566 ves_icall_System_GCHandle_GetAddrOfPinnedObject (guint32 handle)
570 if (mono_gchandle_get_type (handle) != HANDLE_PINNED)
572 obj = mono_gchandle_get_target (handle);
574 MonoClass *klass = mono_object_class (obj);
575 if (klass == mono_defaults.string_class) {
576 return mono_string_chars ((MonoString*)obj);
577 } else if (klass->rank) {
578 return mono_array_addr ((MonoArray*)obj, char, 0);
580 /* the C# code will check and throw the exception */
581 /* FIXME: missing !klass->blittable test, see bug #61134 */
582 if ((klass->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_AUTO_LAYOUT)
584 return (char*)obj + sizeof (MonoObject);
591 ves_icall_Mono_Runtime_SetGCAllowSynchronousMajor (MonoBoolean flag)
593 return mono_gc_set_allow_synchronous_major (flag);
601 guint slot_hint : 24; /* starting slot for search */
602 /* 2^16 appdomains should be enough for everyone (though I know I'll regret this in 20 years) */
603 /* we alloc this only for weak refs, since we can get the domain directly in the other cases */
607 /* weak and weak-track arrays will be allocated in malloc memory
609 static HandleData gc_handles [] = {
610 {NULL, NULL, 0, HANDLE_WEAK, 0},
611 {NULL, NULL, 0, HANDLE_WEAK_TRACK, 0},
612 {NULL, NULL, 0, HANDLE_NORMAL, 0},
613 {NULL, NULL, 0, HANDLE_PINNED, 0}
616 #define lock_handles(handles) EnterCriticalSection (&handle_section)
617 #define unlock_handles(handles) LeaveCriticalSection (&handle_section)
620 find_first_unset (guint32 bitmap)
623 for (i = 0; i < 32; ++i) {
624 if (!(bitmap & (1 << i)))
631 make_root_descr_all_refs (int numbits, gboolean pinned)
637 return mono_gc_make_root_descr_all_refs (numbits);
641 alloc_handle (HandleData *handles, MonoObject *obj, gboolean track)
645 lock_handles (handles);
646 if (!handles->size) {
648 if (handles->type > HANDLE_WEAK_TRACK) {
649 handles->entries = mono_gc_alloc_fixed (sizeof (gpointer) * handles->size, make_root_descr_all_refs (handles->size, handles->type == HANDLE_PINNED));
651 handles->entries = g_malloc0 (sizeof (gpointer) * handles->size);
652 handles->domain_ids = g_malloc0 (sizeof (guint16) * handles->size);
654 handles->bitmap = g_malloc0 (handles->size / 8);
657 for (slot = handles->slot_hint; slot < handles->size / 32; ++slot) {
658 if (handles->bitmap [slot] != 0xffffffff) {
659 i = find_first_unset (handles->bitmap [slot]);
660 handles->slot_hint = slot;
664 if (i == -1 && handles->slot_hint != 0) {
665 for (slot = 0; slot < handles->slot_hint; ++slot) {
666 if (handles->bitmap [slot] != 0xffffffff) {
667 i = find_first_unset (handles->bitmap [slot]);
668 handles->slot_hint = slot;
675 guint32 new_size = handles->size * 2; /* always double: we memset to 0 based on this below */
677 /* resize and copy the bitmap */
678 new_bitmap = g_malloc0 (new_size / 8);
679 memcpy (new_bitmap, handles->bitmap, handles->size / 8);
680 g_free (handles->bitmap);
681 handles->bitmap = new_bitmap;
683 /* resize and copy the entries */
684 if (handles->type > HANDLE_WEAK_TRACK) {
687 entries = mono_gc_alloc_fixed (sizeof (gpointer) * new_size, make_root_descr_all_refs (new_size, handles->type == HANDLE_PINNED));
688 mono_gc_memmove (entries, handles->entries, sizeof (gpointer) * handles->size);
690 mono_gc_free_fixed (handles->entries);
691 handles->entries = entries;
695 domain_ids = g_malloc0 (sizeof (guint16) * new_size);
696 entries = g_malloc0 (sizeof (gpointer) * new_size);
697 memcpy (domain_ids, handles->domain_ids, sizeof (guint16) * handles->size);
698 for (i = 0; i < handles->size; ++i) {
699 MonoObject *obj = mono_gc_weak_link_get (&(handles->entries [i]));
701 mono_gc_weak_link_add (&(entries [i]), obj, track);
702 mono_gc_weak_link_remove (&(handles->entries [i]), track);
704 g_assert (!handles->entries [i]);
707 g_free (handles->entries);
708 g_free (handles->domain_ids);
709 handles->entries = entries;
710 handles->domain_ids = domain_ids;
713 /* set i and slot to the next free position */
715 slot = (handles->size + 1) / 32;
716 handles->slot_hint = handles->size + 1;
717 handles->size = new_size;
719 handles->bitmap [slot] |= 1 << i;
720 slot = slot * 32 + i;
721 handles->entries [slot] = NULL;
722 if (handles->type <= HANDLE_WEAK_TRACK) {
723 /*FIXME, what to use when obj == null?*/
724 handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id;
726 mono_gc_weak_link_add (&(handles->entries [slot]), obj, track);
728 handles->entries [slot] = obj;
731 #ifndef DISABLE_PERFCOUNTERS
732 mono_perfcounters->gc_num_handles++;
734 unlock_handles (handles);
735 /*g_print ("allocated entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/
736 res = (slot << 3) | (handles->type + 1);
737 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handles->type, res, obj);
743 * @obj: managed object to get a handle for
744 * @pinned: whether the object should be pinned
746 * This returns a handle that wraps the object, this is used to keep a
747 * reference to a managed object from the unmanaged world and preventing the
748 * object from being disposed.
750 * If @pinned is false the address of the object can not be obtained, if it is
751 * true the address of the object can be obtained. This will also pin the
752 * object so it will not be possible by a moving garbage collector to move the
755 * Returns: a handle that can be used to access the object from
759 mono_gchandle_new (MonoObject *obj, gboolean pinned)
761 return alloc_handle (&gc_handles [pinned? HANDLE_PINNED: HANDLE_NORMAL], obj, FALSE);
765 * mono_gchandle_new_weakref:
766 * @obj: managed object to get a handle for
767 * @pinned: whether the object should be pinned
769 * This returns a weak handle that wraps the object, this is used to
770 * keep a reference to a managed object from the unmanaged world.
771 * Unlike the mono_gchandle_new the object can be reclaimed by the
772 * garbage collector. In this case the value of the GCHandle will be
775 * If @pinned is false the address of the object can not be obtained, if it is
776 * true the address of the object can be obtained. This will also pin the
777 * object so it will not be possible by a moving garbage collector to move the
780 * Returns: a handle that can be used to access the object from
784 mono_gchandle_new_weakref (MonoObject *obj, gboolean track_resurrection)
786 guint32 handle = alloc_handle (&gc_handles [track_resurrection? HANDLE_WEAK_TRACK: HANDLE_WEAK], obj, track_resurrection);
792 mono_gchandle_get_type (guint32 gchandle)
794 guint type = (gchandle & 7) - 1;
800 * mono_gchandle_get_target:
801 * @gchandle: a GCHandle's handle.
803 * The handle was previously created by calling mono_gchandle_new or
804 * mono_gchandle_new_weakref.
806 * Returns a pointer to the MonoObject represented by the handle or
807 * NULL for a collected object if using a weakref handle.
810 mono_gchandle_get_target (guint32 gchandle)
812 guint slot = gchandle >> 3;
813 guint type = (gchandle & 7) - 1;
814 HandleData *handles = &gc_handles [type];
815 MonoObject *obj = NULL;
818 lock_handles (handles);
819 if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
820 if (handles->type <= HANDLE_WEAK_TRACK) {
821 obj = mono_gc_weak_link_get (&handles->entries [slot]);
823 obj = handles->entries [slot];
826 /* print a warning? */
828 unlock_handles (handles);
829 /*g_print ("get target of entry %d of type %d: %p\n", slot, handles->type, obj);*/
834 mono_gchandle_set_target (guint32 gchandle, MonoObject *obj)
836 guint slot = gchandle >> 3;
837 guint type = (gchandle & 7) - 1;
838 HandleData *handles = &gc_handles [type];
839 MonoObject *old_obj = NULL;
843 lock_handles (handles);
844 if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
845 if (handles->type <= HANDLE_WEAK_TRACK) {
846 old_obj = handles->entries [slot];
847 if (handles->entries [slot])
848 mono_gc_weak_link_remove (&handles->entries [slot], handles->type == HANDLE_WEAK_TRACK);
850 mono_gc_weak_link_add (&handles->entries [slot], obj, handles->type == HANDLE_WEAK_TRACK);
851 /*FIXME, what to use when obj == null?*/
852 handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id;
854 handles->entries [slot] = obj;
857 /* print a warning? */
859 /*g_print ("changed entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/
860 unlock_handles (handles);
864 * mono_gchandle_is_in_domain:
865 * @gchandle: a GCHandle's handle.
866 * @domain: An application domain.
868 * Returns: true if the object wrapped by the @gchandle belongs to the specific @domain.
871 mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
873 guint slot = gchandle >> 3;
874 guint type = (gchandle & 7) - 1;
875 HandleData *handles = &gc_handles [type];
876 gboolean result = FALSE;
879 lock_handles (handles);
880 if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
881 if (handles->type <= HANDLE_WEAK_TRACK) {
882 result = domain->domain_id == handles->domain_ids [slot];
885 obj = handles->entries [slot];
889 result = domain == mono_object_domain (obj);
892 /* print a warning? */
894 unlock_handles (handles);
899 * mono_gchandle_free:
900 * @gchandle: a GCHandle's handle.
902 * Frees the @gchandle handle. If there are no outstanding
903 * references, the garbage collector can reclaim the memory of the
907 mono_gchandle_free (guint32 gchandle)
909 guint slot = gchandle >> 3;
910 guint type = (gchandle & 7) - 1;
911 HandleData *handles = &gc_handles [type];
915 lock_handles (handles);
916 if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
917 if (handles->type <= HANDLE_WEAK_TRACK) {
918 if (handles->entries [slot])
919 mono_gc_weak_link_remove (&handles->entries [slot], handles->type == HANDLE_WEAK_TRACK);
921 handles->entries [slot] = NULL;
923 handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
925 /* print a warning? */
927 #ifndef DISABLE_PERFCOUNTERS
928 mono_perfcounters->gc_num_handles--;
930 /*g_print ("freed entry %d of type %d\n", slot, handles->type);*/
931 unlock_handles (handles);
932 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handles->type, gchandle, NULL);
936 * mono_gchandle_free_domain:
937 * @domain: domain that is unloading
939 * Function used internally to cleanup any GC handle for objects belonging
940 * to the specified domain during appdomain unload.
943 mono_gchandle_free_domain (MonoDomain *domain)
947 for (type = 0; type < 3; ++type) {
949 HandleData *handles = &gc_handles [type];
950 lock_handles (handles);
951 for (slot = 0; slot < handles->size; ++slot) {
952 if (!(handles->bitmap [slot / 32] & (1 << (slot % 32))))
954 if (type <= HANDLE_WEAK_TRACK) {
955 if (domain->domain_id == handles->domain_ids [slot]) {
956 handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
957 if (handles->entries [slot])
958 mono_gc_weak_link_remove (&handles->entries [slot], handles->type == HANDLE_WEAK_TRACK);
961 if (handles->entries [slot] && mono_object_domain (handles->entries [slot]) == domain) {
962 handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
963 handles->entries [slot] = NULL;
967 unlock_handles (handles);
973 GCHandle_CheckCurrentDomain (guint32 gchandle)
975 return mono_gchandle_is_in_domain (gchandle, mono_domain_get ());
980 #ifdef MONO_HAS_SEMAPHORES
981 static MonoSemType finalizer_sem;
983 static HANDLE finalizer_event;
984 static volatile gboolean finished=FALSE;
987 mono_gc_finalize_notify (void)
990 g_message ( "%s: prodding finalizer", __func__);
993 #ifdef MONO_HAS_SEMAPHORES
994 MONO_SEM_POST (&finalizer_sem);
996 SetEvent (finalizer_event);
1000 #ifdef HAVE_BOEHM_GC
1003 collect_objects (gpointer key, gpointer value, gpointer user_data)
1005 GPtrArray *arr = (GPtrArray*)user_data;
1006 g_ptr_array_add (arr, key);
1012 * finalize_domain_objects:
1014 * Run the finalizers of all finalizable objects in req->domain.
1017 finalize_domain_objects (DomainFinalizationReq *req)
1019 MonoDomain *domain = req->domain;
1022 #define NUM_FOBJECTS 64
1023 MonoObject *to_finalize [NUM_FOBJECTS];
1027 /* Process finalizers which are already in the queue */
1028 mono_gc_invoke_finalizers ();
1030 #ifdef HAVE_BOEHM_GC
1031 while (g_hash_table_size (domain->finalizable_objects_hash) > 0) {
1035 * Since the domain is unloading, nobody is allowed to put
1036 * new entries into the hash table. But finalize_object might
1037 * remove entries from the hash table, so we make a copy.
1039 objs = g_ptr_array_new ();
1040 g_hash_table_foreach (domain->finalizable_objects_hash, collect_objects, objs);
1041 /* printf ("FINALIZING %d OBJECTS.\n", objs->len); */
1043 for (i = 0; i < objs->len; ++i) {
1044 MonoObject *o = (MonoObject*)g_ptr_array_index (objs, i);
1045 /* FIXME: Avoid finalizing threads, etc */
1046 mono_gc_run_finalize (o, 0);
1049 g_ptr_array_free (objs, TRUE);
1051 #elif defined(HAVE_SGEN_GC)
1052 while ((count = mono_gc_finalizers_for_domain (domain, to_finalize, NUM_FOBJECTS))) {
1054 for (i = 0; i < count; ++i) {
1055 mono_gc_run_finalize (to_finalize [i], 0);
1060 /* cleanup the reference queue */
1061 reference_queue_clear_for_domain (domain);
1063 /* printf ("DONE.\n"); */
1064 SetEvent (req->done_event);
1066 /* The event is closed in mono_domain_finalize if we get here */
1071 finalizer_thread (gpointer unused)
1074 /* Wait to be notified that there's at least one
1078 g_assert (mono_domain_get () == mono_get_root_domain ());
1080 /* An alertable wait is required so this thread can be suspended on windows */
1081 #ifdef MONO_HAS_SEMAPHORES
1082 MONO_SEM_WAIT_ALERTABLE (&finalizer_sem, TRUE);
1084 WaitForSingleObjectEx (finalizer_event, INFINITE, TRUE);
1087 mono_threads_perform_thread_dump ();
1089 mono_console_handle_async_ops ();
1091 #ifndef DISABLE_ATTACH
1092 mono_attach_maybe_start ();
1095 if (domains_to_finalize) {
1096 mono_finalizer_lock ();
1097 if (domains_to_finalize) {
1098 DomainFinalizationReq *req = domains_to_finalize->data;
1099 domains_to_finalize = g_slist_remove (domains_to_finalize, req);
1100 mono_finalizer_unlock ();
1102 finalize_domain_objects (req);
1104 mono_finalizer_unlock ();
1108 /* If finished == TRUE, mono_gc_cleanup has been called (from mono_runtime_cleanup),
1109 * before the domain is unloaded.
1111 mono_gc_invoke_finalizers ();
1113 reference_queue_proccess_all ();
1115 SetEvent (pending_done_event);
1118 SetEvent (shutdown_event);
1122 #ifndef LAZY_GC_THREAD_CREATION
1126 mono_gc_init_finalizer_thread (void)
1128 gc_thread = mono_thread_create_internal (mono_domain_get (), finalizer_thread, NULL, FALSE, TRUE, 0);
1129 ves_icall_System_Threading_Thread_SetName_internal (gc_thread, mono_string_new (mono_domain_get (), "Finalizer"));
1135 InitializeCriticalSection (&handle_section);
1136 InitializeCriticalSection (&allocator_section);
1138 InitializeCriticalSection (&finalizer_mutex);
1139 InitializeCriticalSection (&reference_queue_mutex);
1141 MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries);
1142 MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries);
1144 mono_counters_register ("Created object count", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &mono_stats.new_object_count);
1145 mono_counters_register ("Minor GC collections", MONO_COUNTER_GC | MONO_COUNTER_INT, &gc_stats.minor_gc_count);
1146 mono_counters_register ("Major GC collections", MONO_COUNTER_GC | MONO_COUNTER_INT, &gc_stats.major_gc_count);
1147 mono_counters_register ("Minor GC time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &gc_stats.minor_gc_time_usecs);
1148 mono_counters_register ("Major GC time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &gc_stats.major_gc_time_usecs);
1150 mono_gc_base_init ();
1152 if (mono_gc_is_disabled ()) {
1157 finalizer_event = CreateEvent (NULL, FALSE, FALSE, NULL);
1158 pending_done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1159 shutdown_event = CreateEvent (NULL, TRUE, FALSE, NULL);
1160 if (finalizer_event == NULL || pending_done_event == NULL || shutdown_event == NULL) {
1161 g_assert_not_reached ();
1163 #ifdef MONO_HAS_SEMAPHORES
1164 MONO_SEM_INIT (&finalizer_sem, 0);
1167 #ifndef LAZY_GC_THREAD_CREATION
1168 mono_gc_init_finalizer_thread ();
1173 mono_gc_cleanup (void)
1176 g_message ("%s: cleaning up finalizer", __func__);
1180 ResetEvent (shutdown_event);
1182 if (mono_thread_internal_current () != gc_thread) {
1183 mono_gc_finalize_notify ();
1184 /* Finishing the finalizer thread, so wait a little bit... */
1185 /* MS seems to wait for about 2 seconds */
1186 if (WaitForSingleObjectEx (shutdown_event, 2000, FALSE) == WAIT_TIMEOUT) {
1189 /* Set a flag which the finalizer thread can check */
1190 suspend_finalizers = TRUE;
1192 /* Try to abort the thread, in the hope that it is running managed code */
1193 mono_thread_internal_stop (gc_thread);
1195 /* Wait for it to stop */
1196 ret = WaitForSingleObjectEx (gc_thread->handle, 100, TRUE);
1198 if (ret == WAIT_TIMEOUT) {
1200 * The finalizer thread refused to die. There is not much we
1201 * can do here, since the runtime is shutting down so the
1202 * state the finalizer thread depends on will vanish.
1204 g_warning ("Shutting down finalizer thread timed out.");
1207 * FIXME: On unix, when the above wait returns, the thread
1208 * might still be running io-layer code, or pthreads code.
1215 /* Wait for the thread to actually exit */
1216 ret = WaitForSingleObjectEx (gc_thread->handle, INFINITE, TRUE);
1217 g_assert (ret == WAIT_OBJECT_0);
1221 * The above wait only waits for the exited event to be signalled, the thread might still be running. To fix this race, we
1222 * create the finalizer thread without calling pthread_detach () on it, so we can wait for it manually.
1224 ret = pthread_join ((MonoNativeThreadId)(gpointer)(gsize)gc_thread->tid, NULL);
1225 g_assert (ret == 0);
1230 #ifdef HAVE_BOEHM_GC
1231 GC_finalizer_notifier = NULL;
1235 mono_reference_queue_cleanup ();
1237 DeleteCriticalSection (&handle_section);
1238 DeleteCriticalSection (&allocator_section);
1239 DeleteCriticalSection (&finalizer_mutex);
1240 DeleteCriticalSection (&reference_queue_mutex);
1245 /* Null GC dummy functions */
1247 mono_gc_finalize_notify (void)
1251 void mono_gc_init (void)
1253 InitializeCriticalSection (&handle_section);
1256 void mono_gc_cleanup (void)
1263 mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread)
1265 return thread == gc_thread;
1269 * mono_gc_is_finalizer_thread:
1270 * @thread: the thread to test.
1272 * In Mono objects are finalized asynchronously on a separate thread.
1273 * This routine tests whether the @thread argument represents the
1274 * finalization thread.
1276 * Returns true if @thread is the finalization thread.
1279 mono_gc_is_finalizer_thread (MonoThread *thread)
1281 return mono_gc_is_finalizer_internal_thread (thread->internal_thread);
1284 #if defined(__MACH__)
1285 static pthread_t mach_exception_thread;
1288 mono_gc_register_mach_exception_thread (pthread_t thread)
1290 mach_exception_thread = thread;
1294 mono_gc_get_mach_exception_thread (void)
1296 return mach_exception_thread;
1301 * mono_gc_parse_environment_string_extract_number:
1303 * @str: points to the first digit of the number
1304 * @out: pointer to the variable that will receive the value
1306 * Tries to extract a number from the passed string, taking in to account m, k
1309 * Returns true if passing was successful
1312 mono_gc_parse_environment_string_extract_number (const char *str, glong *out)
1315 int len = strlen (str), shift = 0;
1317 gboolean is_suffix = FALSE;
1323 suffix = str [len - 1];
1338 if (!isdigit (suffix))
1344 val = strtol (str, &endptr, 10);
1346 if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
1347 || (errno != 0 && val == 0) || (endptr == str))
1353 if (val < 0) /* negative numbers cannot be suffixed */
1355 if (*(endptr + 1)) /* Invalid string. */
1358 unshifted = (gulong)val;
1360 if (val < 0) /* overflow */
1362 if (((gulong)val >> shift) != unshifted) /* value too large */
1370 #ifndef HAVE_SGEN_GC
1372 mono_gc_alloc_mature (MonoVTable *vtable)
1374 return mono_object_new_specific (vtable);
1379 static MonoReferenceQueue *ref_queues;
1382 ref_list_remove_element (RefQueueEntry **prev, RefQueueEntry *element)
1385 /* Guard if head is changed concurrently. */
1386 while (*prev != element)
1387 prev = &(*prev)->next;
1388 } while (prev && InterlockedCompareExchangePointer ((void*)prev, element->next, element) != element);
1392 ref_list_push (RefQueueEntry **head, RefQueueEntry *value)
1394 RefQueueEntry *current;
1397 value->next = current;
1398 STORE_STORE_FENCE; /*Must make sure the previous store is visible before the CAS. */
1399 } while (InterlockedCompareExchangePointer ((void*)head, value, current) != current);
1403 reference_queue_proccess (MonoReferenceQueue *queue)
1405 RefQueueEntry **iter = &queue->queue;
1406 RefQueueEntry *entry;
1407 while ((entry = *iter)) {
1409 if (queue->should_be_deleted || !mono_gc_weak_link_get (&entry->dis_link)) {
1410 mono_gc_weak_link_remove (&entry->dis_link, TRUE);
1412 if (queue->should_be_deleted || !mono_gchandle_get_target (entry->gchandle)) {
1413 mono_gchandle_free ((guint32)entry->gchandle);
1415 ref_list_remove_element (iter, entry);
1416 queue->callback (entry->user_data);
1419 iter = &entry->next;
1425 reference_queue_proccess_all (void)
1427 MonoReferenceQueue **iter;
1428 MonoReferenceQueue *queue = ref_queues;
1429 for (; queue; queue = queue->next)
1430 reference_queue_proccess (queue);
1433 EnterCriticalSection (&reference_queue_mutex);
1434 for (iter = &ref_queues; *iter;) {
1436 if (!queue->should_be_deleted) {
1437 iter = &queue->next;
1441 LeaveCriticalSection (&reference_queue_mutex);
1442 reference_queue_proccess (queue);
1445 *iter = queue->next;
1448 LeaveCriticalSection (&reference_queue_mutex);
1452 mono_reference_queue_cleanup (void)
1454 MonoReferenceQueue *queue = ref_queues;
1455 for (; queue; queue = queue->next)
1456 queue->should_be_deleted = TRUE;
1457 reference_queue_proccess_all ();
1461 reference_queue_clear_for_domain (MonoDomain *domain)
1463 MonoReferenceQueue *queue = ref_queues;
1464 for (; queue; queue = queue->next) {
1465 RefQueueEntry **iter = &queue->queue;
1466 RefQueueEntry *entry;
1467 while ((entry = *iter)) {
1470 obj = mono_gc_weak_link_get (&entry->dis_link);
1471 if (obj && mono_object_domain (obj) == domain) {
1472 mono_gc_weak_link_remove (&entry->dis_link, TRUE);
1474 obj = mono_gchandle_get_target (entry->gchandle);
1475 if (obj && mono_object_domain (obj) == domain) {
1476 mono_gchandle_free ((guint32)entry->gchandle);
1478 ref_list_remove_element (iter, entry);
1479 queue->callback (entry->user_data);
1482 iter = &entry->next;
1488 * mono_gc_reference_queue_new:
1489 * @callback callback used when processing dead entries.
1491 * Create a new reference queue used to process collected objects.
1492 * A reference queue let you queue a pair (managed object, user data)
1493 * using the mono_gc_reference_queue_add method.
1495 * Once the managed object is collected @callback will be called
1496 * in the finalizer thread with 'user data' as argument.
1498 * The callback is called without any locks held.
1501 mono_gc_reference_queue_new (mono_reference_queue_callback callback)
1503 MonoReferenceQueue *res = g_new0 (MonoReferenceQueue, 1);
1504 res->callback = callback;
1506 EnterCriticalSection (&reference_queue_mutex);
1507 res->next = ref_queues;
1509 LeaveCriticalSection (&reference_queue_mutex);
1515 * mono_gc_reference_queue_add:
1516 * @queue the queue to add the reference to.
1517 * @obj the object to be watched for collection
1518 * @user_data parameter to be passed to the queue callback
1520 * Queue an object to be watched for collection, when the @obj is
1521 * collected, the callback that was registered for the @queue will
1522 * be invoked with the @obj and @user_data arguments.
1524 * @returns false if the queue is scheduled to be freed.
1527 mono_gc_reference_queue_add (MonoReferenceQueue *queue, MonoObject *obj, void *user_data)
1529 RefQueueEntry *entry;
1530 if (queue->should_be_deleted)
1533 entry = g_new0 (RefQueueEntry, 1);
1534 entry->user_data = user_data;
1537 mono_gc_weak_link_add (&entry->dis_link, obj, TRUE);
1539 entry->gchandle = mono_gchandle_new_weakref (obj, TRUE);
1540 mono_object_register_finalizer (obj);
1543 ref_list_push (&queue->queue, entry);
1548 * mono_gc_reference_queue_free:
1549 * @queue the queue that should be deleted.
1551 * This operation signals that @queue should be deleted. This operation is deferred
1552 * as it happens on the finalizer thread.
1554 * After this call, no further objects can be queued. It's the responsibility of the
1555 * caller to make sure that no further attempt to access queue will be made.
1558 mono_gc_reference_queue_free (MonoReferenceQueue *queue)
1560 queue->should_be_deleted = TRUE;
1563 #define ptr_mask ((sizeof (void*) - 1))
1564 #define _toi(ptr) ((size_t)ptr)
1565 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
1566 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
1567 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
1569 #define BZERO_WORDS(dest,words) do { \
1571 for (__i = 0; __i < (words); ++__i) \
1572 ((void **)(dest))[__i] = 0; \
1577 * @dest: address to start to clear
1578 * @size: size of the region to clear
1580 * Zero @size bytes starting at @dest.
1582 * Use this to zero memory that can hold managed pointers.
1584 * FIXME borrow faster code from some BSD libc or bionic
1587 mono_gc_bzero (void *dest, size_t size)
1589 char *d = (char*)dest;
1590 size_t tail_bytes, word_bytes;
1593 If we're copying less than a word, just use memset.
1595 We cannot bail out early if both are aligned because some implementations
1596 use byte copying for sizes smaller than 16. OSX, on this case.
1598 if (size < sizeof(void*)) {
1599 memset (dest, 0, size);
1603 /*align to word boundary */
1604 while (unaligned_bytes (d) && size) {
1609 /* copy all words with memmove */
1610 word_bytes = (size_t)align_down (size);
1611 switch (word_bytes) {
1612 case sizeof (void*) * 1:
1615 case sizeof (void*) * 2:
1618 case sizeof (void*) * 3:
1621 case sizeof (void*) * 4:
1625 memset (d, 0, word_bytes);
1628 tail_bytes = unaligned_bytes (size);
1633 } while (--tail_bytes);
1639 * @dest: destination of the move
1641 * @size: size of the block to move
1643 * Move @size bytes from @src to @dest.
1644 * size MUST be a multiple of sizeof (gpointer)
1648 mono_gc_memmove (void *dest, const void *src, size_t size)
1651 If we're copying less than a word we don't need to worry about word tearing
1652 so we bailout to memmove early.
1654 If both dest is aligned and size is a multiple of word size, we can go straigh
1658 if (size < sizeof(void*) || !((_toi (dest) | (size)) & sizeof (void*))) {
1659 memmove (dest, src, size);
1664 * A bit of explanation on why we align only dest before doing word copies.
1665 * Pointers to managed objects must always be stored in word aligned addresses, so
1666 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
1668 * We don't need to case when source and destination have different alignments since we only do word stores
1669 * using memmove, which must handle it.
1671 if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
1672 char *p = (char*)dest + size;
1673 char *s = (char*)src + size;
1674 char *start = (char*)dest;
1675 char *align_end = MAX((char*)dest, (char*)align_down (p));
1677 size_t bytes_to_memmove;
1679 while (p > align_end)
1682 word_start = align_up (start);
1683 bytes_to_memmove = p - word_start;
1684 p -= bytes_to_memmove;
1685 s -= bytes_to_memmove;
1686 memmove (p, s, bytes_to_memmove);
1691 char *d = (char*)dest;
1692 const char *s = (const char*)src;
1695 /*align to word boundary */
1696 while (unaligned_bytes (d)) {
1701 /* copy all words with memmove */
1702 memmove (d, s, (size_t)align_down (size));
1704 tail_bytes = unaligned_bytes (size);
1706 d += (size_t)align_down (size);
1707 s += (size_t)align_down (size);
1710 } while (--tail_bytes);