2 * sgen-mono.c: SGen features specific to Mono.
4 * Copyright (C) 2014 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "metadata/sgen-gc.h"
24 #include "metadata/sgen-protocol.h"
25 #include "metadata/monitor.h"
26 #include "metadata/sgen-layout-stats.h"
27 #include "metadata/sgen-client.h"
28 #include "metadata/sgen-cardtable.h"
29 #include "metadata/sgen-pinning.h"
30 #include "metadata/marshal.h"
31 #include "metadata/method-builder.h"
32 #include "metadata/abi-details.h"
33 #include "metadata/mono-gc.h"
34 #include "metadata/runtime.h"
35 #include "metadata/sgen-bridge-internal.h"
36 #include "metadata/gc-internal.h"
37 #include "utils/mono-memory-model.h"
38 #include "utils/mono-logger-internal.h"
40 /* If set, mark stacks conservatively, even if precise marking is possible */
41 static gboolean conservative_stack_mark = FALSE;
42 /* If set, check that there are no references to the domain left at domain unload */
43 gboolean sgen_mono_xdomain_checks = FALSE;
45 /* Functions supplied by the runtime to be called by the GC */
46 static MonoGCCallbacks gc_callbacks;
49 __thread SgenThreadInfo *sgen_thread_info;
51 MonoNativeTlsKey thread_info_key;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
56 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
60 #include "mono/cil/opcode.def"
71 ptr_on_stack (void *ptr)
73 gpointer stack_start = &stack_start;
74 SgenThreadInfo *info = mono_thread_info_current ();
76 if (ptr >= stack_start && ptr < (gpointer)info->client_info.stack_end)
81 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
83 #define HANDLE_PTR(ptr,obj) do { \
84 gpointer o = *(gpointer*)(ptr); \
86 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
87 binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
92 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
94 #define SCAN_OBJECT_NOVTABLE
95 #include "sgen-scan-object.h"
100 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
102 HEAVY_STAT (++stat_wbarrier_value_copy);
103 g_assert (klass->valuetype);
105 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
107 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)klass->gc_descr)) {
108 size_t element_size = mono_class_value_size (klass, NULL);
109 size_t size = count * element_size;
110 mono_gc_memmove_atomic (dest, src, size);
114 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
115 if (binary_protocol_is_heavy_enabled ()) {
116 size_t element_size = mono_class_value_size (klass, NULL);
118 for (i = 0; i < count; ++i) {
119 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
120 (char*)src + i * element_size - sizeof (MonoObject),
121 (mword) klass->gc_descr);
126 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
130 * mono_gc_wbarrier_object_copy:
132 * Write barrier to call when obj is the result of a clone or copy of an object.
135 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
139 HEAVY_STAT (++stat_wbarrier_object_copy);
141 if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj)) {
142 size = mono_object_class (obj)->instance_size;
143 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
144 size - sizeof (MonoObject));
148 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
149 if (binary_protocol_is_heavy_enabled ())
150 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
153 sgen_get_remset ()->wbarrier_object_copy (obj, src);
157 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
159 HEAVY_STAT (++stat_wbarrier_set_arrayref);
160 if (sgen_ptr_in_nursery (slot_ptr)) {
161 *(void**)slot_ptr = value;
164 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
166 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
168 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
172 mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
174 sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
177 static MonoMethod *write_barrier_conc_method;
178 static MonoMethod *write_barrier_noconc_method;
181 sgen_is_critical_method (MonoMethod *method)
183 return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
187 sgen_has_critical_method (void)
189 return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
195 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels, gboolean is_concurrent)
197 int shifted_nursery_start = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
199 memset (nursery_check_return_labels, 0, sizeof (int) * 2);
200 // if (ptr_in_nursery (ptr)) return;
202 * Masking out the bits might be faster, but we would have to use 64 bit
203 * immediates, which might be slower.
205 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
206 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_NURSERY_START);
207 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
208 mono_mb_emit_byte (mb, CEE_SHR_UN);
209 mono_mb_emit_stloc (mb, shifted_nursery_start);
211 mono_mb_emit_ldarg (mb, 0);
212 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
213 mono_mb_emit_byte (mb, CEE_SHR_UN);
214 mono_mb_emit_ldloc (mb, shifted_nursery_start);
215 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
217 if (!is_concurrent) {
218 // if (!ptr_in_nursery (*ptr)) return;
219 mono_mb_emit_ldarg (mb, 0);
220 mono_mb_emit_byte (mb, CEE_LDIND_I);
221 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
222 mono_mb_emit_byte (mb, CEE_SHR_UN);
223 mono_mb_emit_ldloc (mb, shifted_nursery_start);
224 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
230 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
233 MonoMethodBuilder *mb;
234 MonoMethodSignature *sig;
235 MonoMethod **write_barrier_method_addr;
236 #ifdef MANAGED_WBARRIER
237 int i, nursery_check_labels [2];
240 // FIXME: Maybe create a separate version for ctors (the branch would be
241 // correctly predicted more times)
243 write_barrier_method_addr = &write_barrier_conc_method;
245 write_barrier_method_addr = &write_barrier_noconc_method;
247 if (*write_barrier_method_addr)
248 return *write_barrier_method_addr;
250 /* Create the IL version of mono_gc_barrier_generic_store () */
251 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
252 sig->ret = &mono_defaults.void_class->byval_arg;
253 sig->params [0] = &mono_defaults.int_class->byval_arg;
256 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
258 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
261 #ifdef MANAGED_WBARRIER
262 emit_nursery_check (mb, nursery_check_labels, is_concurrent);
264 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
268 LDC_PTR sgen_cardtable
274 if (SGEN_HAVE_OVERLAPPING_CARDS) {
275 LDC_PTR card_table_mask
282 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
283 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_CARD_TABLE);
284 mono_mb_emit_ldarg (mb, 0);
285 mono_mb_emit_icon (mb, CARD_BITS);
286 mono_mb_emit_byte (mb, CEE_SHR_UN);
287 mono_mb_emit_byte (mb, CEE_CONV_I);
288 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
289 #if SIZEOF_VOID_P == 8
290 mono_mb_emit_icon8 (mb, CARD_MASK);
292 mono_mb_emit_icon (mb, CARD_MASK);
294 mono_mb_emit_byte (mb, CEE_CONV_I);
295 mono_mb_emit_byte (mb, CEE_AND);
297 mono_mb_emit_byte (mb, CEE_ADD);
298 mono_mb_emit_icon (mb, 1);
299 mono_mb_emit_byte (mb, CEE_STIND_I1);
302 for (i = 0; i < 2; ++i) {
303 if (nursery_check_labels [i])
304 mono_mb_patch_branch (mb, nursery_check_labels [i]);
306 mono_mb_emit_byte (mb, CEE_RET);
308 mono_mb_emit_ldarg (mb, 0);
309 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
310 mono_mb_emit_byte (mb, CEE_RET);
313 res = mono_mb_create_method (mb, sig, 16);
317 if (*write_barrier_method_addr) {
318 /* Already created */
319 mono_free_method (res);
321 /* double-checked locking */
322 mono_memory_barrier ();
323 *write_barrier_method_addr = res;
327 return *write_barrier_method_addr;
331 mono_gc_get_write_barrier (void)
333 return mono_gc_get_specific_write_barrier (major_collector.is_concurrent);
337 * Dummy filler objects
340 /* Vtable of the objects used to fill out nursery fragments before a collection */
341 static GCVTable *array_fill_vtable;
344 get_array_fill_vtable (void)
346 if (!array_fill_vtable) {
347 static MonoClass klass;
348 static char _vtable[sizeof(MonoVTable)+8];
349 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
352 MonoDomain *domain = mono_get_root_domain ();
355 klass.element_class = mono_defaults.byte_class;
357 klass.instance_size = sizeof (MonoArray);
358 klass.sizes.element_size = 1;
359 klass.name = "array_filler_type";
361 vtable->klass = &klass;
363 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
366 array_fill_vtable = (GCVTable*)vtable;
368 return array_fill_vtable;
372 sgen_client_array_fill_range (char *start, size_t size)
376 if (size < sizeof (MonoArray)) {
377 memset (start, 0, size);
381 o = (MonoArray*)start;
382 o->obj.vtable = (MonoVTable*)get_array_fill_vtable ();
383 /* Mark this as not a real object */
384 o->obj.synchronisation = GINT_TO_POINTER (-1);
386 o->max_length = (mono_array_size_t)(size - sizeof (MonoArray));
392 sgen_client_zero_array_fill_header (void *p, size_t size)
394 if (size >= sizeof (MonoArray)) {
395 memset (p, 0, sizeof (MonoArray));
397 static guint8 zeros [sizeof (MonoArray)];
399 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
407 static MonoGCFinalizerCallbacks fin_callbacks;
410 mono_gc_get_vtable_bits (MonoClass *class)
413 /* FIXME move this to the bridge code */
414 if (sgen_need_bridge_processing ()) {
415 switch (sgen_bridge_class_kind (class)) {
416 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
417 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
418 res = SGEN_GC_BIT_BRIDGE_OBJECT;
420 case GC_BRIDGE_OPAQUE_CLASS:
421 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
423 case GC_BRIDGE_TRANSPARENT_CLASS:
427 if (fin_callbacks.is_class_finalization_aware) {
428 if (fin_callbacks.is_class_finalization_aware (class))
429 res |= SGEN_GC_BIT_FINALIZER_AWARE;
435 is_finalization_aware (MonoObject *obj)
437 MonoVTable *vt = ((MonoVTable*)SGEN_LOAD_VTABLE (obj));
438 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
442 sgen_client_object_queued_for_finalization (GCObject *obj)
444 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
445 fin_callbacks.object_queued_for_finalization (obj);
448 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
449 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
450 GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj);
451 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
452 sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen,
453 sgen_client_object_has_critical_finalizer (obj));
459 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
461 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
462 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
464 fin_callbacks = *callbacks;
468 sgen_client_run_finalize (MonoObject *obj)
470 mono_gc_run_finalize (obj, NULL);
474 mono_gc_invoke_finalizers (void)
476 return sgen_gc_invoke_finalizers ();
480 mono_gc_pending_finalizers (void)
482 return sgen_have_pending_finalizers ();
486 sgen_client_finalize_notify (void)
488 mono_gc_finalize_notify ();
492 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
494 sgen_object_register_for_finalization (obj, user_data);
498 object_in_domain_predicate (MonoObject *obj, void *user_data)
500 MonoDomain *domain = user_data;
501 if (mono_object_domain (obj) == domain) {
502 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
509 * mono_gc_finalizers_for_domain:
510 * @domain: the unloading appdomain
511 * @out_array: output array
512 * @out_size: size of output array
514 * Store inside @out_array up to @out_size objects that belong to the unloading
515 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
516 * until it returns 0.
517 * The items are removed from the finalizer data structure, so the caller is supposed
519 * @out_array should be on the stack to allow the GC to know the objects are still alive.
522 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
524 return sgen_gather_finalizers_if (object_in_domain_predicate, domain, out_array, out_size);
531 typedef struct _EphemeronLinkNode EphemeronLinkNode;
533 struct _EphemeronLinkNode {
534 EphemeronLinkNode *next;
543 static EphemeronLinkNode *ephemeron_list;
545 /* LOCKING: requires that the GC lock is held */
547 null_ephemerons_for_domain (MonoDomain *domain)
549 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
552 MonoObject *object = (MonoObject*)current->array;
555 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
557 if (object && object->vtable->domain == domain) {
558 EphemeronLinkNode *tmp = current;
561 prev->next = current->next;
563 ephemeron_list = current->next;
565 current = current->next;
566 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
569 current = current->next;
574 /* LOCKING: requires that the GC lock is held */
576 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
578 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
579 SgenGrayQueue *queue = ctx.queue;
580 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
582 Ephemeron *cur, *array_end;
586 char *object = current->array;
588 if (!sgen_is_object_alive_for_current_gen (object)) {
589 EphemeronLinkNode *tmp = current;
591 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
594 prev->next = current->next;
596 ephemeron_list = current->next;
598 current = current->next;
599 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
604 copy_func ((void**)&object, queue);
605 current->array = object;
607 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
609 array = (MonoArray*)object;
610 cur = mono_array_addr (array, Ephemeron, 0);
611 array_end = cur + mono_array_length_fast (array);
612 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
614 for (; cur < array_end; ++cur) {
615 char *key = (char*)cur->key;
617 if (!key || key == tombstone)
620 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
621 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
622 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
624 if (!sgen_is_object_alive_for_current_gen (key)) {
625 cur->key = tombstone;
631 current = current->next;
636 LOCKING: requires that the GC lock is held
638 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
641 sgen_client_mark_ephemerons (ScanCopyContext ctx)
643 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
644 SgenGrayQueue *queue = ctx.queue;
645 gboolean nothing_marked = TRUE;
646 EphemeronLinkNode *current = ephemeron_list;
648 Ephemeron *cur, *array_end;
651 for (current = ephemeron_list; current; current = current->next) {
652 char *object = current->array;
653 SGEN_LOG (5, "Ephemeron array at %p", object);
655 /*It has to be alive*/
656 if (!sgen_is_object_alive_for_current_gen (object)) {
657 SGEN_LOG (5, "\tnot reachable");
661 copy_func ((void**)&object, queue);
663 array = (MonoArray*)object;
664 cur = mono_array_addr (array, Ephemeron, 0);
665 array_end = cur + mono_array_length_fast (array);
666 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
668 for (; cur < array_end; ++cur) {
669 char *key = cur->key;
671 if (!key || key == tombstone)
674 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
675 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
676 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
678 if (sgen_is_object_alive_for_current_gen (key)) {
679 char *value = cur->value;
681 copy_func ((void**)&cur->key, queue);
683 if (!sgen_is_object_alive_for_current_gen (value))
684 nothing_marked = FALSE;
685 copy_func ((void**)&cur->value, queue);
691 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
692 return nothing_marked;
696 mono_gc_ephemeron_array_add (MonoObject *obj)
698 EphemeronLinkNode *node;
702 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
707 node->array = (char*)obj;
708 node->next = ephemeron_list;
709 ephemeron_list = node;
711 SGEN_LOG (5, "Registered ephemeron array %p", obj);
722 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
724 SgenThreadInfo *info = mono_thread_info_current ();
726 /* Could be called from sgen_thread_unregister () with a NULL info */
729 info->client_info.stopped_domain = domain;
734 need_remove_object_for_domain (char *start, MonoDomain *domain)
736 if (mono_object_domain (start) == domain) {
737 SGEN_LOG (4, "Need to cleanup object %p", start);
738 binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
745 process_object_for_domain_clearing (char *start, MonoDomain *domain)
747 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start);
748 if (vt->klass == mono_defaults.internal_thread_class)
749 g_assert (mono_object_domain (start) == mono_get_root_domain ());
750 /* The object could be a proxy for an object in the domain
752 #ifndef DISABLE_REMOTING
753 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
754 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
756 /* The server could already have been zeroed out, so
757 we need to check for that, too. */
758 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
759 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
760 ((MonoRealProxy*)start)->unwrapped_server = NULL;
767 clear_domain_process_object (char *obj, MonoDomain *domain)
771 process_object_for_domain_clearing (obj, domain);
772 remove = need_remove_object_for_domain (obj, domain);
774 if (remove && ((MonoObject*)obj)->synchronisation) {
775 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
777 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
784 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
786 if (clear_domain_process_object (obj, domain)) {
787 CANARIFY_SIZE (size);
788 memset (obj, 0, size);
793 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
795 clear_domain_process_object (obj, domain);
799 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
801 if (need_remove_object_for_domain (obj, domain))
802 major_collector.free_non_pinned_object (obj, size);
806 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
808 if (need_remove_object_for_domain (obj, domain))
809 major_collector.free_pinned_object (obj, size);
813 * When appdomains are unloaded we can easily remove objects that have finalizers,
814 * but all the others could still be present in random places on the heap.
815 * We need a sweep to get rid of them even though it's going to be costly
817 * The reason we need to remove them is because we access the vtable and class
818 * structures to know the object size and the reference bitmap: once the domain is
819 * unloaded the point to random memory.
822 mono_gc_clear_domain (MonoDomain * domain)
824 LOSObject *bigobj, *prev;
829 binary_protocol_domain_unload_begin (domain);
833 if (sgen_concurrent_collection_in_progress ())
834 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
835 SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
837 major_collector.finish_sweeping ();
839 sgen_process_fin_stage_entries ();
840 sgen_process_dislink_stage_entries ();
842 sgen_clear_nursery_fragments ();
844 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
845 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
846 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
847 sgen_check_for_xdomain_refs ();
850 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
851 to memory returned to the OS.*/
852 null_ephemerons_for_domain (domain);
854 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
855 sgen_null_links_if (object_in_domain_predicate, domain, i);
857 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
858 sgen_remove_finalizers_if (object_in_domain_predicate, domain, i);
860 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
861 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
863 /* We need two passes over major and large objects because
864 freeing such objects might give their memory back to the OS
865 (in the case of large objects) or obliterate its vtable
866 (pinned objects with major-copying or pinned and non-pinned
867 objects with major-mark&sweep), but we might need to
868 dereference a pointer from an object to another object if
869 the first object is a proxy. */
870 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
871 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
872 clear_domain_process_object (bigobj->data, domain);
875 for (bigobj = los_object_list; bigobj;) {
876 if (need_remove_object_for_domain (bigobj->data, domain)) {
877 LOSObject *to_free = bigobj;
879 prev->next = bigobj->next;
881 los_object_list = bigobj->next;
882 bigobj = bigobj->next;
883 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
884 sgen_los_free_object (to_free);
888 bigobj = bigobj->next;
890 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
891 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
893 if (domain == mono_get_root_domain ()) {
894 sgen_pin_stats_print_class_stats ();
895 sgen_object_layout_dump (stdout);
898 sgen_restart_world (0, NULL);
900 binary_protocol_domain_unload_end (domain);
901 binary_protocol_flush_buffers (FALSE);
911 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
913 return sgen_alloc_obj (vtable, size);
917 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
919 return sgen_alloc_obj_pinned (vtable, size);
923 mono_gc_alloc_mature (MonoVTable *vtable)
925 MonoObject *obj = sgen_alloc_obj_mature (vtable, vtable->klass->instance_size);
926 if (obj && G_UNLIKELY (obj->vtable->klass->has_finalize))
927 mono_object_register_finalizer (obj);
932 mono_gc_alloc_fixed (size_t size, void *descr)
934 /* FIXME: do a single allocation */
935 void *res = calloc (1, size);
938 if (!mono_gc_register_root (res, size, descr)) {
946 mono_gc_free_fixed (void* addr)
948 mono_gc_deregister_root (addr);
956 static MonoMethod* alloc_method_cache [ATYPE_NUM];
957 static gboolean use_managed_allocator = TRUE;
959 #ifdef MANAGED_ALLOCATION
961 #ifdef HAVE_KW_THREAD
963 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
964 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
965 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
966 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
969 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
970 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
971 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
972 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
977 #if defined(__APPLE__) || defined (HOST_WIN32)
978 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
979 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
980 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
981 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
982 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
983 mono_mb_emit_byte ((mb), CEE_ADD); \
984 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
987 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
988 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
989 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
990 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
991 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
992 mono_mb_emit_byte ((mb), CEE_ADD); \
993 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
997 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
998 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1003 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
1004 * for each class. This is currently not easy to do, as it is hard to generate basic
1005 * blocks + branches, but it is easy with the linear IL codebase.
1007 * For this to work we'd need to solve the TLAB race, first. Now we
1008 * require the allocator to be in a few known methods to make sure
1009 * that they are executed atomically via the restart mechanism.
1012 create_allocator (int atype)
1014 int p_var, size_var;
1015 guint32 slowpath_branch, max_size_branch;
1016 MonoMethodBuilder *mb;
1018 MonoMethodSignature *csig;
1019 static gboolean registered = FALSE;
1020 int tlab_next_addr_var, new_next_var;
1022 const char *name = NULL;
1023 AllocatorWrapperInfo *info;
1026 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
1027 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
1028 mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
1032 if (atype == ATYPE_SMALL) {
1034 name = "AllocSmall";
1035 } else if (atype == ATYPE_NORMAL) {
1038 } else if (atype == ATYPE_VECTOR) {
1040 name = "AllocVector";
1041 } else if (atype == ATYPE_STRING) {
1043 name = "AllocString";
1045 g_assert_not_reached ();
1048 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
1049 if (atype == ATYPE_STRING) {
1050 csig->ret = &mono_defaults.string_class->byval_arg;
1051 csig->params [0] = &mono_defaults.int_class->byval_arg;
1052 csig->params [1] = &mono_defaults.int32_class->byval_arg;
1054 csig->ret = &mono_defaults.object_class->byval_arg;
1055 for (i = 0; i < num_params; ++i)
1056 csig->params [i] = &mono_defaults.int_class->byval_arg;
1059 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
1062 size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1063 if (atype == ATYPE_SMALL) {
1064 /* size_var = size_arg */
1065 mono_mb_emit_ldarg (mb, 1);
1066 mono_mb_emit_stloc (mb, size_var);
1067 } else if (atype == ATYPE_NORMAL) {
1068 /* size = vtable->klass->instance_size; */
1069 mono_mb_emit_ldarg (mb, 0);
1070 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
1071 mono_mb_emit_byte (mb, CEE_ADD);
1072 mono_mb_emit_byte (mb, CEE_LDIND_I);
1073 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, instance_size));
1074 mono_mb_emit_byte (mb, CEE_ADD);
1075 /* FIXME: assert instance_size stays a 4 byte integer */
1076 mono_mb_emit_byte (mb, CEE_LDIND_U4);
1077 mono_mb_emit_byte (mb, CEE_CONV_I);
1078 mono_mb_emit_stloc (mb, size_var);
1079 } else if (atype == ATYPE_VECTOR) {
1080 MonoExceptionClause *clause;
1081 int pos, pos_leave, pos_error;
1082 MonoClass *oom_exc_class;
1086 * n > MONO_ARRAY_MAX_INDEX => OutOfMemoryException
1087 * n < 0 => OverflowException
1089 * We can do an unsigned comparison to catch both cases, then in the error
1090 * case compare signed to distinguish between them.
1092 mono_mb_emit_ldarg (mb, 1);
1093 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
1094 mono_mb_emit_byte (mb, CEE_CONV_U);
1095 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
1097 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1098 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1099 mono_mb_emit_ldarg (mb, 1);
1100 mono_mb_emit_icon (mb, 0);
1101 pos_error = mono_mb_emit_short_branch (mb, CEE_BLT_S);
1102 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
1103 mono_mb_patch_short_branch (mb, pos_error);
1104 mono_mb_emit_exception (mb, "OverflowException", NULL);
1106 mono_mb_patch_short_branch (mb, pos);
1108 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
1109 clause->try_offset = mono_mb_get_label (mb);
1111 /* vtable->klass->sizes.element_size */
1112 mono_mb_emit_ldarg (mb, 0);
1113 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
1114 mono_mb_emit_byte (mb, CEE_ADD);
1115 mono_mb_emit_byte (mb, CEE_LDIND_I);
1116 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, sizes));
1117 mono_mb_emit_byte (mb, CEE_ADD);
1118 mono_mb_emit_byte (mb, CEE_LDIND_U4);
1119 mono_mb_emit_byte (mb, CEE_CONV_I);
1122 mono_mb_emit_ldarg (mb, 1);
1123 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
1124 /* + sizeof (MonoArray) */
1125 mono_mb_emit_icon (mb, sizeof (MonoArray));
1126 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
1127 mono_mb_emit_stloc (mb, size_var);
1129 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
1132 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
1133 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
1134 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
1135 "System", "OverflowException");
1136 g_assert (clause->data.catch_class);
1137 clause->handler_offset = mono_mb_get_label (mb);
1139 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
1140 "System", "OutOfMemoryException");
1141 g_assert (oom_exc_class);
1142 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
1145 mono_mb_emit_byte (mb, CEE_POP);
1146 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
1147 mono_mb_emit_byte (mb, CEE_THROW);
1149 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
1150 mono_mb_set_clauses (mb, 1, clause);
1151 mono_mb_patch_branch (mb, pos_leave);
1153 } else if (atype == ATYPE_STRING) {
1157 * a string allocator method takes the args: (vtable, len)
1159 * bytes = offsetof (MonoString, chars) + ((len + 1) * 2)
1163 * bytes <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1167 * offsetof (MonoString, chars) + ((len + 1) * 2) <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1168 * len <= (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - offsetof (MonoString, chars)) / 2 - 1
1170 mono_mb_emit_ldarg (mb, 1);
1171 mono_mb_emit_icon (mb, (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - MONO_STRUCT_OFFSET (MonoString, chars)) / 2 - 1);
1172 pos = mono_mb_emit_short_branch (mb, MONO_CEE_BLE_UN_S);
1174 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1175 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1176 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
1177 mono_mb_patch_short_branch (mb, pos);
1179 mono_mb_emit_ldarg (mb, 1);
1180 mono_mb_emit_icon (mb, 1);
1181 mono_mb_emit_byte (mb, MONO_CEE_SHL);
1182 //WE manually fold the above + 2 here
1183 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, chars) + 2);
1184 mono_mb_emit_byte (mb, CEE_ADD);
1185 mono_mb_emit_stloc (mb, size_var);
1187 g_assert_not_reached ();
1190 if (atype != ATYPE_SMALL) {
1191 /* size += ALLOC_ALIGN - 1; */
1192 mono_mb_emit_ldloc (mb, size_var);
1193 mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
1194 mono_mb_emit_byte (mb, CEE_ADD);
1195 /* size &= ~(ALLOC_ALIGN - 1); */
1196 mono_mb_emit_icon (mb, ~(SGEN_ALLOC_ALIGN - 1));
1197 mono_mb_emit_byte (mb, CEE_AND);
1198 mono_mb_emit_stloc (mb, size_var);
1201 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
1202 if (atype != ATYPE_SMALL) {
1203 mono_mb_emit_ldloc (mb, size_var);
1204 mono_mb_emit_icon (mb, SGEN_MAX_SMALL_OBJ_SIZE);
1205 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_UN_S);
1209 * We need to modify tlab_next, but the JIT only supports reading, so we read
1210 * another tls var holding its address instead.
1213 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
1214 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1215 EMIT_TLS_ACCESS_NEXT_ADDR (mb);
1216 mono_mb_emit_stloc (mb, tlab_next_addr_var);
1218 /* p = (void**)tlab_next; */
1219 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1220 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
1221 mono_mb_emit_byte (mb, CEE_LDIND_I);
1222 mono_mb_emit_stloc (mb, p_var);
1224 /* new_next = (char*)p + size; */
1225 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1226 mono_mb_emit_ldloc (mb, p_var);
1227 mono_mb_emit_ldloc (mb, size_var);
1228 mono_mb_emit_byte (mb, CEE_CONV_I);
1229 mono_mb_emit_byte (mb, CEE_ADD);
1230 mono_mb_emit_stloc (mb, new_next_var);
1232 /* if (G_LIKELY (new_next < tlab_temp_end)) */
1233 mono_mb_emit_ldloc (mb, new_next_var);
1234 EMIT_TLS_ACCESS_TEMP_END (mb);
1235 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
1238 if (atype != ATYPE_SMALL)
1239 mono_mb_patch_short_branch (mb, max_size_branch);
1241 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1242 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1244 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
1245 mono_mb_emit_ldarg (mb, 0);
1246 mono_mb_emit_ldloc (mb, size_var);
1247 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
1248 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
1249 } else if (atype == ATYPE_VECTOR) {
1250 mono_mb_emit_ldarg (mb, 1);
1251 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
1252 } else if (atype == ATYPE_STRING) {
1253 mono_mb_emit_ldarg (mb, 1);
1254 mono_mb_emit_icall (mb, mono_gc_alloc_string);
1256 g_assert_not_reached ();
1258 mono_mb_emit_byte (mb, CEE_RET);
1261 mono_mb_patch_short_branch (mb, slowpath_branch);
1263 /* FIXME: Memory barrier */
1265 /* tlab_next = new_next */
1266 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
1267 mono_mb_emit_ldloc (mb, new_next_var);
1268 mono_mb_emit_byte (mb, CEE_STIND_I);
1270 /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
1271 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1272 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1273 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1276 mono_mb_emit_ldloc (mb, p_var);
1277 mono_mb_emit_ldarg (mb, 0);
1278 mono_mb_emit_byte (mb, CEE_STIND_I);
1280 if (atype == ATYPE_VECTOR) {
1281 /* arr->max_length = max_length; */
1282 mono_mb_emit_ldloc (mb, p_var);
1283 mono_mb_emit_ldflda (mb, MONO_STRUCT_OFFSET (MonoArray, max_length));
1284 mono_mb_emit_ldarg (mb, 1);
1285 #ifdef MONO_BIG_ARRAYS
1286 mono_mb_emit_byte (mb, CEE_STIND_I);
1288 mono_mb_emit_byte (mb, CEE_STIND_I4);
1290 } else if (atype == ATYPE_STRING) {
1291 /* need to set length and clear the last char */
1292 /* s->length = len; */
1293 mono_mb_emit_ldloc (mb, p_var);
1294 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, length));
1295 mono_mb_emit_byte (mb, MONO_CEE_ADD);
1296 mono_mb_emit_ldarg (mb, 1);
1297 mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
1298 /* s->chars [len] = 0; */
1299 mono_mb_emit_ldloc (mb, p_var);
1300 mono_mb_emit_ldloc (mb, size_var);
1301 mono_mb_emit_icon (mb, 2);
1302 mono_mb_emit_byte (mb, MONO_CEE_SUB);
1303 mono_mb_emit_byte (mb, MONO_CEE_ADD);
1304 mono_mb_emit_icon (mb, 0);
1305 mono_mb_emit_byte (mb, MONO_CEE_STIND_I2);
1309 We must make sure both vtable and max_length are globaly visible before returning to managed land.
1311 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1312 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1313 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1316 mono_mb_emit_ldloc (mb, p_var);
1317 mono_mb_emit_byte (mb, CEE_RET);
1320 res = mono_mb_create_method (mb, csig, 8);
1322 mono_method_get_header (res)->init_locals = FALSE;
1324 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
1325 info->gc_name = "sgen";
1326 info->alloc_type = atype;
1327 mono_marshal_set_wrapper_info (res, info);
1334 mono_gc_get_aligned_size_for_allocator (int size)
1336 int aligned_size = size;
1337 aligned_size += SGEN_ALLOC_ALIGN - 1;
1338 aligned_size &= ~(SGEN_ALLOC_ALIGN - 1);
1339 return aligned_size;
1343 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1344 * The signature of the called method is:
1345 * object allocate (MonoVTable *vtable)
1348 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1350 #ifdef MANAGED_ALLOCATION
1351 if (collect_before_allocs)
1353 if (!mono_runtime_has_tls_get ())
1355 if (klass->instance_size > tlab_size)
1357 if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1359 if (klass->has_finalize || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
1363 if (klass->byval_arg.type == MONO_TYPE_STRING)
1364 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING);
1365 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1366 if (known_instance_size)
1367 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
1369 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
1376 mono_gc_get_managed_array_allocator (MonoClass *klass)
1378 #ifdef MANAGED_ALLOCATION
1379 if (klass->rank != 1)
1381 if (!mono_runtime_has_tls_get ())
1383 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
1385 if (has_per_allocation_action)
1387 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1389 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
1396 sgen_set_use_managed_allocator (gboolean flag)
1398 use_managed_allocator = flag;
1402 mono_gc_get_managed_allocator_by_type (int atype)
1404 #ifdef MANAGED_ALLOCATION
1407 if (!use_managed_allocator)
1410 if (!mono_runtime_has_tls_get ())
1413 res = alloc_method_cache [atype];
1417 res = create_allocator (atype);
1419 if (alloc_method_cache [atype]) {
1420 mono_free_method (res);
1421 res = alloc_method_cache [atype];
1423 mono_memory_barrier ();
1424 alloc_method_cache [atype] = res;
1435 mono_gc_get_managed_allocator_types (void)
1441 sgen_is_managed_allocator (MonoMethod *method)
1445 for (i = 0; i < ATYPE_NUM; ++i)
1446 if (method == alloc_method_cache [i])
1452 sgen_has_managed_allocator (void)
1456 for (i = 0; i < ATYPE_NUM; ++i)
1457 if (alloc_method_cache [i])
1463 * Cardtable scanning
1466 #define MWORD_MASK (sizeof (mword) - 1)
1469 find_card_offset (mword card)
1471 /*XXX Use assembly as this generates some pretty bad code */
1472 #if defined(__i386__) && defined(__GNUC__)
1473 return (__builtin_ffs (card) - 1) / 8;
1474 #elif defined(__x86_64__) && defined(__GNUC__)
1475 return (__builtin_ffsll (card) - 1) / 8;
1476 #elif defined(__s390x__)
1477 return (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1480 guint8 *ptr = (guint8 *) &card;
1481 for (i = 0; i < sizeof (mword); ++i) {
1490 find_next_card (guint8 *card_data, guint8 *end)
1492 mword *cards, *cards_end;
1495 while ((((mword)card_data) & MWORD_MASK) && card_data < end) {
1501 if (card_data == end)
1504 cards = (mword*)card_data;
1505 cards_end = (mword*)((mword)end & ~MWORD_MASK);
1506 while (cards < cards_end) {
1509 return (guint8*)cards + find_card_offset (card);
1513 card_data = (guint8*)cards_end;
1514 while (card_data < end) {
1523 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1526 sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
1528 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1529 MonoClass *klass = vt->klass;
1531 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vt), "Why would we ever call this on reference-free objects?");
1534 guint8 *card_data, *card_base;
1535 guint8 *card_data_end;
1536 char *obj_start = sgen_card_table_align_pointer (obj);
1537 mword obj_size = sgen_client_par_object_get_size (vt, (GCObject*)obj);
1538 char *obj_end = obj + obj_size;
1540 size_t extra_idx = 0;
1542 MonoArray *arr = (MonoArray*)obj;
1543 mword desc = (mword)klass->element_class->gc_descr;
1544 int elem_size = mono_array_element_size (klass);
1546 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1547 guint8 *overflow_scan_end = NULL;
1550 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1551 if (klass->element_class->valuetype)
1552 sgen_object_layout_scanned_vtype_array ();
1554 sgen_object_layout_scanned_ref_array ();
1560 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1562 card_base = card_data;
1563 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1564 card_data_end = card_data + card_count;
1567 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1568 /*Check for overflow and if so, setup to scan in two steps*/
1569 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1570 overflow_scan_end = sgen_shadow_cardtable + (card_data_end - SGEN_SHADOW_CARDTABLE_END);
1571 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1577 card_data = find_next_card (card_data, card_data_end);
1578 for (; card_data < card_data_end; card_data = find_next_card (card_data + 1, card_data_end)) {
1580 size_t idx = (card_data - card_base) + extra_idx;
1581 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1582 char *card_end = start + CARD_SIZE_IN_BYTES;
1583 char *first_elem, *elem;
1585 HEAVY_STAT (++los_marked_cards);
1588 sgen_card_table_prepare_card_for_scanning (card_data);
1590 card_end = MIN (card_end, obj_end);
1592 if (start <= (char*)arr->vector)
1595 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1597 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1598 if (klass->element_class->valuetype) {
1599 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1601 for (; elem < card_end; elem += elem_size)
1602 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1604 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
1606 HEAVY_STAT (++los_array_cards);
1607 for (; elem < card_end; elem += SIZEOF_VOID_P) {
1608 gpointer new, old = *(gpointer*)elem;
1609 if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
1610 HEAVY_STAT (++los_array_remsets);
1611 copy_func ((void**)elem, ctx.queue);
1612 new = *(gpointer*)elem;
1613 if (G_UNLIKELY (sgen_ptr_in_nursery (new)))
1614 sgen_add_to_global_remset (elem, new);
1619 binary_protocol_card_scan (first_elem, elem - first_elem);
1622 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1623 if (overflow_scan_end) {
1624 extra_idx = card_data - card_base;
1625 card_base = card_data = sgen_shadow_cardtable;
1626 card_data_end = overflow_scan_end;
1627 overflow_scan_end = NULL;
1638 * Array and string allocation
1642 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1647 if (!SGEN_CAN_ALIGN_UP (size))
1650 #ifndef DISABLE_CRITICAL_REGION
1651 ENTER_CRITICAL_REGION;
1652 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1654 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1655 arr->max_length = (mono_array_size_t)max_length;
1656 EXIT_CRITICAL_REGION;
1659 EXIT_CRITICAL_REGION;
1664 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1665 if (G_UNLIKELY (!arr)) {
1667 return mono_gc_out_of_memory (size);
1670 arr->max_length = (mono_array_size_t)max_length;
1675 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Vector has incorrect size.");
1680 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1683 MonoArrayBounds *bounds;
1686 if (!SGEN_CAN_ALIGN_UP (size))
1689 #ifndef DISABLE_CRITICAL_REGION
1690 ENTER_CRITICAL_REGION;
1691 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1693 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1694 arr->max_length = (mono_array_size_t)max_length;
1696 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1697 arr->bounds = bounds;
1698 EXIT_CRITICAL_REGION;
1701 EXIT_CRITICAL_REGION;
1706 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1707 if (G_UNLIKELY (!arr)) {
1709 return mono_gc_out_of_memory (size);
1712 arr->max_length = (mono_array_size_t)max_length;
1714 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1715 arr->bounds = bounds;
1720 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Array has incorrect size.");
1725 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1730 if (!SGEN_CAN_ALIGN_UP (size))
1733 #ifndef DISABLE_CRITICAL_REGION
1734 ENTER_CRITICAL_REGION;
1735 str = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1737 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1739 EXIT_CRITICAL_REGION;
1742 EXIT_CRITICAL_REGION;
1747 str = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1748 if (G_UNLIKELY (!str)) {
1750 return mono_gc_out_of_memory (size);
1765 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1767 mono_unichar2 *new_end = str->chars + new_length;
1769 /* zero the discarded string. This null-delimits the string and allows
1770 * the space to be reclaimed by SGen. */
1772 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1773 CHECK_CANARY_FOR_OBJECT (str);
1774 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1775 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1777 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1780 str->length = new_length;
1787 #define GC_ROOT_NUM 32
1789 int count; /* must be the first field */
1790 void *objects [GC_ROOT_NUM];
1791 int root_types [GC_ROOT_NUM];
1792 uintptr_t extra_info [GC_ROOT_NUM];
1796 notify_gc_roots (GCRootReport *report)
1800 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
1805 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
1807 if (report->count == GC_ROOT_NUM)
1808 notify_gc_roots (report);
1809 report->objects [report->count] = object;
1810 report->root_types [report->count] = rtype;
1811 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)SGEN_LOAD_VTABLE (object))->klass;
1815 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1817 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1818 GCRootReport report;
1821 for (idx = 0; idx < count; ++idx)
1822 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1823 notify_gc_roots (&report);
1828 report_finalizer_roots_from_queue (SgenPointerQueue *queue)
1830 GCRootReport report;
1834 for (i = 0; i < queue->next_slot; ++i) {
1835 void *obj = queue->data [i];
1838 add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1840 notify_gc_roots (&report);
1844 report_finalizer_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1846 report_finalizer_roots_from_queue (fin_ready_queue);
1847 report_finalizer_roots_from_queue (critical_fin_queue);
1850 static GCRootReport *root_report;
1853 single_arg_report_root (void **obj, void *gc_data)
1856 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1860 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1862 switch (desc & ROOT_DESC_TYPE_MASK) {
1863 case ROOT_DESC_BITMAP:
1864 desc >>= ROOT_DESC_TYPE_SHIFT;
1866 if ((desc & 1) && *start_root) {
1867 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1873 case ROOT_DESC_COMPLEX: {
1874 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1875 gsize bwords = (*bitmap_data) - 1;
1876 void **start_run = start_root;
1878 while (bwords-- > 0) {
1879 gsize bmap = *bitmap_data++;
1880 void **objptr = start_run;
1882 if ((bmap & 1) && *objptr) {
1883 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1888 start_run += GC_BITS_PER_WORD;
1892 case ROOT_DESC_USER: {
1893 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1894 root_report = report;
1895 marker (start_root, single_arg_report_root, NULL);
1898 case ROOT_DESC_RUN_LEN:
1899 g_assert_not_reached ();
1901 g_assert_not_reached ();
1906 report_registered_roots_by_type (int root_type)
1908 GCRootReport report;
1912 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1913 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1914 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1915 } SGEN_HASH_TABLE_FOREACH_END;
1916 notify_gc_roots (&report);
1920 report_registered_roots (void)
1922 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1923 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1927 sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1929 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1930 report_registered_roots ();
1931 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1932 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1935 static GCRootReport major_root_report;
1936 static gboolean profile_roots;
1939 sgen_client_collecting_major_1 (void)
1941 profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
1942 memset (&major_root_report, 0, sizeof (GCRootReport));
1946 sgen_client_pinned_los_object (char *obj)
1949 add_profile_gc_root (&major_root_report, obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1953 sgen_client_collecting_major_2 (void)
1956 notify_gc_roots (&major_root_report);
1958 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1959 report_registered_roots ();
1963 sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1965 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1966 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1969 #define MOVED_OBJECTS_NUM 64
1970 static void *moved_objects [MOVED_OBJECTS_NUM];
1971 static int moved_objects_idx = 0;
1974 mono_sgen_register_moved_object (void *obj, void *destination)
1976 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1978 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1979 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1980 moved_objects_idx = 0;
1982 moved_objects [moved_objects_idx++] = obj;
1983 moved_objects [moved_objects_idx++] = destination;
1987 mono_sgen_gc_event_moves (void)
1989 if (moved_objects_idx) {
1990 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1991 moved_objects_idx = 0;
1999 #define REFS_SIZE 128
2002 MonoGCReferences callback;
2006 MonoObject *refs [REFS_SIZE];
2007 uintptr_t offsets [REFS_SIZE];
2011 #define HANDLE_PTR(ptr,obj) do { \
2013 if (hwi->count == REFS_SIZE) { \
2014 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
2018 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
2019 hwi->refs [hwi->count++] = *(ptr); \
2024 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
2026 mword desc = sgen_obj_get_descriptor (start);
2028 #include "sgen-scan-object.h"
2032 walk_references (char *start, size_t size, void *data)
2034 HeapWalkInfo *hwi = data;
2037 collect_references (hwi, start, size);
2038 if (hwi->count || !hwi->called)
2039 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
2043 * mono_gc_walk_heap:
2044 * @flags: flags for future use
2045 * @callback: a function pointer called for each object in the heap
2046 * @data: a user data pointer that is passed to callback
2048 * This function can be used to iterate over all the live objects in the heap:
2049 * for each object, @callback is invoked, providing info about the object's
2050 * location in memory, its class, its size and the objects it references.
2051 * For each referenced object it's offset from the object address is
2052 * reported in the offsets array.
2053 * The object references may be buffered, so the callback may be invoked
2054 * multiple times for the same object: in all but the first call, the size
2055 * argument will be zero.
2056 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
2057 * profiler event handler.
2059 * Returns: a non-zero value if the GC doesn't support heap walking
2062 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
2067 hwi.callback = callback;
2070 sgen_clear_nursery_fragments ();
2071 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
2073 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
2074 sgen_los_iterate_objects (walk_references, &hwi);
2084 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
2086 gc_callbacks = *callbacks;
2090 mono_gc_get_gc_callbacks ()
2092 return &gc_callbacks;
2096 sgen_client_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback)
2099 guint8 *staddr = NULL;
2101 #ifndef HAVE_KW_THREAD
2102 g_assert (!mono_native_tls_get_value (thread_info_key));
2103 mono_native_tls_set_value (thread_info_key, info);
2105 sgen_thread_info = info;
2108 info->client_info.skip = 0;
2109 info->client_info.stopped_ip = NULL;
2110 info->client_info.stopped_domain = NULL;
2112 info->client_info.stack_start = NULL;
2114 #ifdef SGEN_POSIX_STW
2115 info->client_info.stop_count = -1;
2116 info->client_info.signal = 0;
2119 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
2120 mono_thread_info_get_stack_bounds (&staddr, &stsize);
2123 info->client_info.stack_start_limit = staddr;
2125 info->client_info.stack_end = staddr + stsize;
2127 gsize stack_bottom = (gsize)stack_bottom_fallback;
2128 stack_bottom += 4095;
2129 stack_bottom &= ~4095;
2130 info->client_info.stack_end = (char*)stack_bottom;
2134 memset (&info->client_info.ctx, 0, sizeof (MonoContext));
2136 memset (&info->client_info.regs, 0, sizeof (info->regs));
2139 if (mono_gc_get_gc_callbacks ()->thread_attach_func)
2140 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2142 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
2144 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.stack_end);
2148 sgen_client_thread_unregister (SgenThreadInfo *p)
2150 MonoNativeThreadId tid;
2152 #ifndef HAVE_KW_THREAD
2153 mono_native_tls_set_value (thread_info_key, NULL);
2155 sgen_thread_info = NULL;
2158 tid = mono_thread_info_get_tid (p);
2160 if (p->client_info.info.runtime_thread)
2161 mono_threads_add_joinable_thread ((gpointer)tid);
2163 if (mono_gc_get_gc_callbacks ()->thread_detach_func) {
2164 mono_gc_get_gc_callbacks ()->thread_detach_func (p->client_info.runtime_data);
2165 p->client_info.runtime_data = NULL;
2168 binary_protocol_thread_unregister ((gpointer)tid);
2169 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
2173 mono_gc_set_skip_thread (gboolean skip)
2175 SgenThreadInfo *info = mono_thread_info_current ();
2178 info->client_info.gc_disabled = skip;
2183 is_critical_method (MonoMethod *method)
2185 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
2189 thread_in_critical_region (SgenThreadInfo *info)
2191 return info->client_info.in_critical_region;
2195 sgen_thread_attach (SgenThreadInfo *info)
2197 if (mono_gc_get_gc_callbacks ()->thread_attach_func && !info->client_info.runtime_data)
2198 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2202 sgen_thread_detach (SgenThreadInfo *p)
2204 /* If a delegate is passed to native code and invoked on a thread we dont
2205 * know about, the jit will register it with mono_jit_thread_attach, but
2206 * we have no way of knowing when that thread goes away. SGen has a TSD
2207 * so we assume that if the domain is still registered, we can detach
2210 if (mono_domain_get ())
2211 mono_thread_detach_internal (mono_thread_internal_current ());
2215 mono_gc_register_thread (void *baseptr)
2217 return mono_thread_info_attach (baseptr) != NULL;
2221 mono_gc_is_gc_thread (void)
2225 result = mono_thread_info_current () != NULL;
2231 sgen_client_thread_register_worker (void)
2233 mono_thread_info_register_small_id ();
2236 /* Variables holding start/end nursery so it won't have to be passed at every call */
2237 static void *scan_area_arg_start, *scan_area_arg_end;
2240 mono_gc_conservatively_scan_area (void *start, void *end)
2242 sgen_conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
2246 mono_gc_scan_object (void *obj, void *gc_data)
2248 ScanCopyContext *ctx = gc_data;
2249 ctx->ops->copy_or_mark_object (&obj, ctx->queue);
2254 * Mark from thread stacks and registers.
2257 sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx)
2259 SgenThreadInfo *info;
2261 scan_area_arg_start = start_nursery;
2262 scan_area_arg_end = end_nursery;
2264 FOREACH_THREAD (info) {
2265 if (info->client_info.skip) {
2266 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
2269 if (info->client_info.gc_disabled) {
2270 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
2273 if (!mono_thread_info_is_live (info)) {
2274 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %x)", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
2277 g_assert (info->client_info.suspend_done);
2278 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
2279 if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
2280 mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, info->client_info.stack_start, info->client_info.stack_end, precise, &ctx);
2281 } else if (!precise) {
2282 if (!conservative_stack_mark) {
2283 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
2284 conservative_stack_mark = TRUE;
2286 sgen_conservatively_pin_objects_from (info->client_info.stack_start, info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
2291 sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)&info->client_info.ctx + ARCH_NUM_REGS,
2292 start_nursery, end_nursery, PIN_TYPE_STACK);
2294 sgen_conservatively_pin_objects_from ((void**)&info->client_info.regs, (void**)&info->client_info.regs + ARCH_NUM_REGS,
2295 start_nursery, end_nursery, PIN_TYPE_STACK);
2298 } END_FOREACH_THREAD
2302 * mono_gc_set_stack_end:
2304 * Set the end of the current threads stack to STACK_END. The stack space between
2305 * STACK_END and the real end of the threads stack will not be scanned during collections.
2308 mono_gc_set_stack_end (void *stack_end)
2310 SgenThreadInfo *info;
2313 info = mono_thread_info_current ();
2315 SGEN_ASSERT (0, stack_end < info->client_info.stack_end, "Can only lower stack end");
2316 info->client_info.stack_end = stack_end;
2326 mono_gc_register_root (char *start, size_t size, void *descr)
2328 return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
2332 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
2334 return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER);
2338 mono_gc_deregister_root (char* addr)
2340 sgen_deregister_root (addr);
2347 #if USE_PTHREAD_INTERCEPT
2350 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
2352 return pthread_create (new_thread, attr, start_routine, arg);
2356 mono_gc_pthread_join (pthread_t thread, void **retval)
2358 return pthread_join (thread, retval);
2362 mono_gc_pthread_detach (pthread_t thread)
2364 return pthread_detach (thread);
2368 mono_gc_pthread_exit (void *retval)
2370 mono_thread_info_detach ();
2371 pthread_exit (retval);
2372 g_assert_not_reached ();
2375 #endif /* USE_PTHREAD_INTERCEPT */
2382 sgen_client_total_allocated_heap_changed (size_t allocated_heap)
2384 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP, allocated_heap);
2388 mono_gc_user_markers_supported (void)
2394 mono_object_is_alive (MonoObject* o)
2400 mono_gc_get_generation (MonoObject *obj)
2402 if (sgen_ptr_in_nursery (obj))
2408 mono_gc_enable_events (void)
2413 mono_gc_get_gc_name (void)
2419 mono_gc_get_description (void)
2421 return g_strdup ("sgen");
2425 mono_gc_set_desktop_mode (void)
2430 mono_gc_is_moving (void)
2436 mono_gc_is_disabled (void)
2442 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
2449 mono_gc_max_generation (void)
2455 mono_gc_precise_stack_mark_enabled (void)
2457 return !conservative_stack_mark;
2461 mono_gc_collect (int generation)
2463 sgen_gc_collect (generation);
2467 mono_gc_collection_count (int generation)
2469 return sgen_gc_collection_count (generation);
2473 mono_gc_get_used_size (void)
2475 return (int64_t)sgen_gc_get_used_size ();
2479 mono_gc_get_heap_size (void)
2481 return (int64_t)sgen_gc_get_total_heap_allocation ();
2485 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
2487 return sgen_make_user_root_descriptor (marker);
2491 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
2493 return (void*)SGEN_DESC_STRING;
2497 mono_gc_get_nursery (int *shift_bits, size_t *size)
2499 *size = sgen_nursery_size;
2500 *shift_bits = DEFAULT_NURSERY_BITS;
2501 return sgen_get_nursery_start ();
2505 mono_gc_get_los_limit (void)
2507 return SGEN_MAX_SMALL_OBJ_SIZE;
2511 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
2513 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
2517 mono_gc_weak_link_remove (void **link_addr, gboolean track)
2519 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
2523 mono_gc_weak_link_get (void **link_addr)
2525 return sgen_weak_link_get (link_addr);
2529 mono_gc_set_allow_synchronous_major (gboolean flag)
2531 return sgen_set_allow_synchronous_major (flag);
2535 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
2539 result = func (data);
2540 UNLOCK_INTERRUPTION;
2545 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
2551 sgen_client_out_of_memory (size_t size)
2553 mono_gc_out_of_memory (size);
2557 mono_gc_get_card_table (int *shift_bits, gpointer *mask)
2559 return sgen_get_card_table_configuration (shift_bits, mask);
2563 mono_gc_card_table_nursery_check (void)
2565 return !sgen_get_major_collector ()->is_concurrent;
2568 /* Negative value to remove */
2570 mono_gc_add_memory_pressure (gint64 value)
2572 /* FIXME: Implement at some point? */
2580 sgen_client_degraded_allocation (size_t size)
2582 static int last_major_gc_warned = -1;
2583 static int num_degraded = 0;
2585 if (last_major_gc_warned < gc_stats.major_gc_count) {
2587 if (num_degraded == 1 || num_degraded == 3)
2588 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2589 else if (num_degraded == 10)
2590 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2591 last_major_gc_warned = gc_stats.major_gc_count;
2596 sgen_client_log_timing (GGTimingInfo *info, mword last_major_num_sections, mword last_los_memory_usage)
2598 SgenMajorCollector *major_collector = sgen_get_major_collector ();
2599 mword num_major_sections = major_collector->get_num_major_sections ();
2600 char full_timing_buff [1024];
2601 full_timing_buff [0] = '\0';
2603 if (!info->is_overflow)
2604 sprintf (full_timing_buff, "total %.2fms, bridge %.2fms", info->stw_time / 10000.0f, (int)info->bridge_time / 10000.0f);
2605 if (info->generation == GENERATION_OLD)
2606 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR%s: (%s) pause %.2fms, %s major %dK/%dK los %dK/%dK",
2607 info->is_overflow ? "_OVERFLOW" : "",
2608 info->reason ? info->reason : "",
2609 (int)info->total_time / 10000.0f,
2611 major_collector->section_size * num_major_sections / 1024,
2612 major_collector->section_size * last_major_num_sections / 1024,
2613 los_memory_usage / 1024,
2614 last_los_memory_usage / 1024);
2616 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MINOR%s: (%s) pause %.2fms, %s promoted %dK major %dK los %dK",
2617 info->is_overflow ? "_OVERFLOW" : "",
2618 info->reason ? info->reason : "",
2619 (int)info->total_time / 10000.0f,
2621 (num_major_sections - last_major_num_sections) * major_collector->section_size / 1024,
2622 major_collector->section_size * num_major_sections / 1024,
2623 los_memory_usage / 1024);
2631 sgen_client_description_for_internal_mem_type (int type)
2634 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
2641 sgen_client_pre_collection_checks (void)
2643 if (sgen_mono_xdomain_checks) {
2644 sgen_clear_nursery_fragments ();
2645 sgen_check_for_xdomain_refs ();
2650 sgen_client_vtable_get_namespace (GCVTable *gc_vtable)
2652 MonoVTable *vt = (MonoVTable*)gc_vtable;
2653 return vt->klass->name_space;
2657 sgen_client_vtable_get_name (GCVTable *gc_vtable)
2659 MonoVTable *vt = (MonoVTable*)gc_vtable;
2660 return vt->klass->name;
2668 sgen_client_init (void)
2671 MonoThreadInfoCallbacks cb;
2673 cb.thread_register = sgen_thread_register;
2674 cb.thread_detach = sgen_thread_detach;
2675 cb.thread_unregister = sgen_thread_unregister;
2676 cb.thread_attach = sgen_thread_attach;
2677 cb.mono_method_is_critical = (gpointer)is_critical_method;
2678 cb.mono_thread_in_critical_region = thread_in_critical_region;
2680 cb.thread_exit = mono_gc_pthread_exit;
2681 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
2684 mono_threads_init (&cb, sizeof (SgenThreadInfo));
2686 ///* Keep this the default for now */
2687 /* Precise marking is broken on all supported targets. Disable until fixed. */
2688 conservative_stack_mark = TRUE;
2690 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
2692 mono_sgen_init_stw ();
2694 #ifndef HAVE_KW_THREAD
2695 mono_native_tls_alloc (&thread_info_key, NULL);
2696 #if defined(__APPLE__) || defined (HOST_WIN32)
2698 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
2699 * where the two are the same.
2701 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
2705 int tls_offset = -1;
2706 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
2707 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
2712 * This needs to happen before any internal allocations because
2713 * it inits the small id which is required for hazard pointer
2718 mono_gc_register_thread (&dummy);
2722 sgen_client_handle_gc_param (const char *opt)
2724 if (g_str_has_prefix (opt, "stack-mark=")) {
2725 opt = strchr (opt, '=') + 1;
2726 if (!strcmp (opt, "precise")) {
2727 conservative_stack_mark = FALSE;
2728 } else if (!strcmp (opt, "conservative")) {
2729 conservative_stack_mark = TRUE;
2731 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
2732 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
2734 } else if (g_str_has_prefix (opt, "bridge-implementation=")) {
2735 opt = strchr (opt, '=') + 1;
2736 sgen_set_bridge_implementation (opt);
2737 } else if (g_str_has_prefix (opt, "toggleref-test")) {
2738 /* FIXME: This should probably in MONO_GC_DEBUG */
2739 sgen_register_test_toggleref_callback ();
2747 sgen_client_print_gc_params_usage (void)
2749 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2753 sgen_client_handle_gc_debug (const char *opt)
2755 if (!strcmp (opt, "xdomain-checks")) {
2756 sgen_mono_xdomain_checks = TRUE;
2757 } else if (!strcmp (opt, "do-not-finalize")) {
2758 do_not_finalize = TRUE;
2759 } else if (!strcmp (opt, "log-finalizers")) {
2760 log_finalizers = TRUE;
2761 } else if (!strcmp (opt, "no-managed-allocator")) {
2762 sgen_set_use_managed_allocator (FALSE);
2763 } else if (!sgen_bridge_handle_gc_debug (opt)) {
2770 sgen_client_print_gc_debug_usage (void)
2772 fprintf (stderr, " xdomain-checks\n");
2773 fprintf (stderr, " do-not-finalize\n");
2774 fprintf (stderr, " log-finalizers\n");
2775 fprintf (stderr, " no-managed-allocator\n");
2776 sgen_bridge_print_gc_debug_usage ();
2780 mono_gc_base_init (void)
2782 mono_counters_init ();
2786 if (nursery_canaries_enabled ())
2787 sgen_set_use_managed_allocator (FALSE);