2 * sgen-mono.c: SGen features specific to Mono.
4 * Copyright (C) 2014 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "metadata/sgen-gc.h"
24 #include "metadata/sgen-protocol.h"
25 #include "metadata/monitor.h"
26 #include "metadata/sgen-layout-stats.h"
27 #include "metadata/sgen-client.h"
28 #include "metadata/sgen-cardtable.h"
29 #include "metadata/sgen-pinning.h"
30 #include "metadata/marshal.h"
31 #include "metadata/method-builder.h"
32 #include "metadata/abi-details.h"
33 #include "metadata/mono-gc.h"
34 #include "metadata/runtime.h"
35 #include "metadata/sgen-bridge-internal.h"
36 #include "metadata/gc-internal.h"
37 #include "utils/mono-memory-model.h"
38 #include "utils/mono-logger-internal.h"
40 /* If set, mark stacks conservatively, even if precise marking is possible */
41 static gboolean conservative_stack_mark = FALSE;
42 /* If set, check that there are no references to the domain left at domain unload */
43 gboolean sgen_mono_xdomain_checks = FALSE;
45 /* Functions supplied by the runtime to be called by the GC */
46 static MonoGCCallbacks gc_callbacks;
49 __thread SgenThreadInfo *sgen_thread_info;
51 MonoNativeTlsKey thread_info_key;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
56 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
60 #include "mono/cil/opcode.def"
71 ptr_on_stack (void *ptr)
73 gpointer stack_start = &stack_start;
74 SgenThreadInfo *info = mono_thread_info_current ();
76 if (ptr >= stack_start && ptr < (gpointer)info->client_info.stack_end)
81 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
83 #define HANDLE_PTR(ptr,obj) do { \
84 gpointer o = *(gpointer*)(ptr); \
86 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
87 binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
92 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
94 #define SCAN_OBJECT_NOVTABLE
95 #include "sgen-scan-object.h"
100 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
102 HEAVY_STAT (++stat_wbarrier_value_copy);
103 g_assert (klass->valuetype);
105 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
107 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)klass->gc_descr)) {
108 size_t element_size = mono_class_value_size (klass, NULL);
109 size_t size = count * element_size;
110 mono_gc_memmove_atomic (dest, src, size);
114 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
115 if (binary_protocol_is_heavy_enabled ()) {
116 size_t element_size = mono_class_value_size (klass, NULL);
118 for (i = 0; i < count; ++i) {
119 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
120 (char*)src + i * element_size - sizeof (MonoObject),
121 (mword) klass->gc_descr);
126 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
130 * mono_gc_wbarrier_object_copy:
132 * Write barrier to call when obj is the result of a clone or copy of an object.
135 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
139 HEAVY_STAT (++stat_wbarrier_object_copy);
141 if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
142 size = mono_object_class (obj)->instance_size;
143 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
144 size - sizeof (MonoObject));
148 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
149 if (binary_protocol_is_heavy_enabled ())
150 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
153 sgen_get_remset ()->wbarrier_object_copy (obj, src);
157 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
159 HEAVY_STAT (++stat_wbarrier_set_arrayref);
160 if (sgen_ptr_in_nursery (slot_ptr)) {
161 *(void**)slot_ptr = value;
164 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
166 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
168 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
172 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
174 mono_gc_wbarrier_set_arrayref ((MonoArray*)obj, field_ptr, value);
178 mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
180 sgen_wbarrier_value_copy_bitmap (_dest, _src, size, bitmap);
183 static MonoMethod *write_barrier_conc_method;
184 static MonoMethod *write_barrier_noconc_method;
187 sgen_is_critical_method (MonoMethod *method)
189 return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
193 sgen_has_critical_method (void)
195 return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
201 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels, gboolean is_concurrent)
203 int shifted_nursery_start = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
205 memset (nursery_check_return_labels, 0, sizeof (int) * 2);
206 // if (ptr_in_nursery (ptr)) return;
208 * Masking out the bits might be faster, but we would have to use 64 bit
209 * immediates, which might be slower.
211 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
212 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_NURSERY_START);
213 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
214 mono_mb_emit_byte (mb, CEE_SHR_UN);
215 mono_mb_emit_stloc (mb, shifted_nursery_start);
217 mono_mb_emit_ldarg (mb, 0);
218 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
219 mono_mb_emit_byte (mb, CEE_SHR_UN);
220 mono_mb_emit_ldloc (mb, shifted_nursery_start);
221 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
223 if (!is_concurrent) {
224 // if (!ptr_in_nursery (*ptr)) return;
225 mono_mb_emit_ldarg (mb, 0);
226 mono_mb_emit_byte (mb, CEE_LDIND_I);
227 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
228 mono_mb_emit_byte (mb, CEE_SHR_UN);
229 mono_mb_emit_ldloc (mb, shifted_nursery_start);
230 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
236 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
239 MonoMethodBuilder *mb;
240 MonoMethodSignature *sig;
241 MonoMethod **write_barrier_method_addr;
242 #ifdef MANAGED_WBARRIER
243 int i, nursery_check_labels [2];
246 // FIXME: Maybe create a separate version for ctors (the branch would be
247 // correctly predicted more times)
249 write_barrier_method_addr = &write_barrier_conc_method;
251 write_barrier_method_addr = &write_barrier_noconc_method;
253 if (*write_barrier_method_addr)
254 return *write_barrier_method_addr;
256 /* Create the IL version of mono_gc_barrier_generic_store () */
257 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
258 sig->ret = &mono_defaults.void_class->byval_arg;
259 sig->params [0] = &mono_defaults.int_class->byval_arg;
262 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
264 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
267 #ifdef MANAGED_WBARRIER
268 emit_nursery_check (mb, nursery_check_labels, is_concurrent);
270 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
274 LDC_PTR sgen_cardtable
280 if (SGEN_HAVE_OVERLAPPING_CARDS) {
281 LDC_PTR card_table_mask
288 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
289 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_CARD_TABLE);
290 mono_mb_emit_ldarg (mb, 0);
291 mono_mb_emit_icon (mb, CARD_BITS);
292 mono_mb_emit_byte (mb, CEE_SHR_UN);
293 mono_mb_emit_byte (mb, CEE_CONV_I);
294 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
295 #if SIZEOF_VOID_P == 8
296 mono_mb_emit_icon8 (mb, CARD_MASK);
298 mono_mb_emit_icon (mb, CARD_MASK);
300 mono_mb_emit_byte (mb, CEE_CONV_I);
301 mono_mb_emit_byte (mb, CEE_AND);
303 mono_mb_emit_byte (mb, CEE_ADD);
304 mono_mb_emit_icon (mb, 1);
305 mono_mb_emit_byte (mb, CEE_STIND_I1);
308 for (i = 0; i < 2; ++i) {
309 if (nursery_check_labels [i])
310 mono_mb_patch_branch (mb, nursery_check_labels [i]);
312 mono_mb_emit_byte (mb, CEE_RET);
314 mono_mb_emit_ldarg (mb, 0);
315 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
316 mono_mb_emit_byte (mb, CEE_RET);
319 res = mono_mb_create_method (mb, sig, 16);
323 if (*write_barrier_method_addr) {
324 /* Already created */
325 mono_free_method (res);
327 /* double-checked locking */
328 mono_memory_barrier ();
329 *write_barrier_method_addr = res;
333 return *write_barrier_method_addr;
337 mono_gc_get_write_barrier (void)
339 return mono_gc_get_specific_write_barrier (major_collector.is_concurrent);
343 * Dummy filler objects
346 /* Vtable of the objects used to fill out nursery fragments before a collection */
347 static GCVTable *array_fill_vtable;
350 get_array_fill_vtable (void)
352 if (!array_fill_vtable) {
353 static MonoClass klass;
354 static char _vtable[sizeof(MonoVTable)+8];
355 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
358 MonoDomain *domain = mono_get_root_domain ();
361 klass.element_class = mono_defaults.byte_class;
363 klass.instance_size = sizeof (MonoArray);
364 klass.sizes.element_size = 1;
365 klass.name = "array_filler_type";
367 vtable->klass = &klass;
369 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
372 array_fill_vtable = (GCVTable*)vtable;
374 return array_fill_vtable;
378 sgen_client_array_fill_range (char *start, size_t size)
382 if (size < sizeof (MonoArray)) {
383 memset (start, 0, size);
387 o = (MonoArray*)start;
388 o->obj.vtable = (MonoVTable*)get_array_fill_vtable ();
389 /* Mark this as not a real object */
390 o->obj.synchronisation = GINT_TO_POINTER (-1);
392 o->max_length = (mono_array_size_t)(size - sizeof (MonoArray));
398 sgen_client_zero_array_fill_header (void *p, size_t size)
400 if (size >= sizeof (MonoArray)) {
401 memset (p, 0, sizeof (MonoArray));
403 static guint8 zeros [sizeof (MonoArray)];
405 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
413 static MonoGCFinalizerCallbacks fin_callbacks;
416 mono_gc_get_vtable_bits (MonoClass *class)
419 /* FIXME move this to the bridge code */
420 if (sgen_need_bridge_processing ()) {
421 switch (sgen_bridge_class_kind (class)) {
422 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
423 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
424 res = SGEN_GC_BIT_BRIDGE_OBJECT;
426 case GC_BRIDGE_OPAQUE_CLASS:
427 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
429 case GC_BRIDGE_TRANSPARENT_CLASS:
433 if (fin_callbacks.is_class_finalization_aware) {
434 if (fin_callbacks.is_class_finalization_aware (class))
435 res |= SGEN_GC_BIT_FINALIZER_AWARE;
441 is_finalization_aware (MonoObject *obj)
443 MonoVTable *vt = ((MonoVTable*)SGEN_LOAD_VTABLE (obj));
444 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
448 sgen_client_object_queued_for_finalization (GCObject *obj)
450 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
451 fin_callbacks.object_queued_for_finalization (obj);
454 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
455 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
456 GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj);
457 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
458 sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen,
459 sgen_client_object_has_critical_finalizer (obj));
465 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
467 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
468 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
470 fin_callbacks = *callbacks;
474 sgen_client_run_finalize (MonoObject *obj)
476 mono_gc_run_finalize (obj, NULL);
480 mono_gc_invoke_finalizers (void)
482 return sgen_gc_invoke_finalizers ();
486 mono_gc_pending_finalizers (void)
488 return sgen_have_pending_finalizers ();
492 sgen_client_finalize_notify (void)
494 mono_gc_finalize_notify ();
498 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
500 sgen_object_register_for_finalization (obj, user_data);
504 object_in_domain_predicate (MonoObject *obj, void *user_data)
506 MonoDomain *domain = user_data;
507 if (mono_object_domain (obj) == domain) {
508 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
515 * mono_gc_finalizers_for_domain:
516 * @domain: the unloading appdomain
517 * @out_array: output array
518 * @out_size: size of output array
520 * Store inside @out_array up to @out_size objects that belong to the unloading
521 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
522 * until it returns 0.
523 * The items are removed from the finalizer data structure, so the caller is supposed
525 * @out_array should be on the stack to allow the GC to know the objects are still alive.
528 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
530 return sgen_gather_finalizers_if (object_in_domain_predicate, domain, out_array, out_size);
537 typedef struct _EphemeronLinkNode EphemeronLinkNode;
539 struct _EphemeronLinkNode {
540 EphemeronLinkNode *next;
549 static EphemeronLinkNode *ephemeron_list;
551 /* LOCKING: requires that the GC lock is held */
553 null_ephemerons_for_domain (MonoDomain *domain)
555 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
558 MonoObject *object = (MonoObject*)current->array;
561 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
563 if (object && object->vtable->domain == domain) {
564 EphemeronLinkNode *tmp = current;
567 prev->next = current->next;
569 ephemeron_list = current->next;
571 current = current->next;
572 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
575 current = current->next;
580 /* LOCKING: requires that the GC lock is held */
582 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
584 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
585 SgenGrayQueue *queue = ctx.queue;
586 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
588 Ephemeron *cur, *array_end;
592 char *object = current->array;
594 if (!sgen_is_object_alive_for_current_gen (object)) {
595 EphemeronLinkNode *tmp = current;
597 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
600 prev->next = current->next;
602 ephemeron_list = current->next;
604 current = current->next;
605 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
610 copy_func ((void**)&object, queue);
611 current->array = object;
613 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
615 array = (MonoArray*)object;
616 cur = mono_array_addr (array, Ephemeron, 0);
617 array_end = cur + mono_array_length_fast (array);
618 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
620 for (; cur < array_end; ++cur) {
621 char *key = (char*)cur->key;
623 if (!key || key == tombstone)
626 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
627 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
628 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
630 if (!sgen_is_object_alive_for_current_gen (key)) {
631 cur->key = tombstone;
637 current = current->next;
642 LOCKING: requires that the GC lock is held
644 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
647 sgen_client_mark_ephemerons (ScanCopyContext ctx)
649 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
650 SgenGrayQueue *queue = ctx.queue;
651 gboolean nothing_marked = TRUE;
652 EphemeronLinkNode *current = ephemeron_list;
654 Ephemeron *cur, *array_end;
657 for (current = ephemeron_list; current; current = current->next) {
658 char *object = current->array;
659 SGEN_LOG (5, "Ephemeron array at %p", object);
661 /*It has to be alive*/
662 if (!sgen_is_object_alive_for_current_gen (object)) {
663 SGEN_LOG (5, "\tnot reachable");
667 copy_func ((void**)&object, queue);
669 array = (MonoArray*)object;
670 cur = mono_array_addr (array, Ephemeron, 0);
671 array_end = cur + mono_array_length_fast (array);
672 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
674 for (; cur < array_end; ++cur) {
675 char *key = cur->key;
677 if (!key || key == tombstone)
680 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
681 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
682 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
684 if (sgen_is_object_alive_for_current_gen (key)) {
685 char *value = cur->value;
687 copy_func ((void**)&cur->key, queue);
689 if (!sgen_is_object_alive_for_current_gen (value))
690 nothing_marked = FALSE;
691 copy_func ((void**)&cur->value, queue);
697 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
698 return nothing_marked;
702 mono_gc_ephemeron_array_add (MonoObject *obj)
704 EphemeronLinkNode *node;
708 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
713 node->array = (char*)obj;
714 node->next = ephemeron_list;
715 ephemeron_list = node;
717 SGEN_LOG (5, "Registered ephemeron array %p", obj);
728 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
730 SgenThreadInfo *info = mono_thread_info_current ();
732 /* Could be called from sgen_thread_unregister () with a NULL info */
735 info->client_info.stopped_domain = domain;
740 need_remove_object_for_domain (char *start, MonoDomain *domain)
742 if (mono_object_domain (start) == domain) {
743 SGEN_LOG (4, "Need to cleanup object %p", start);
744 binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
751 process_object_for_domain_clearing (char *start, MonoDomain *domain)
753 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start);
754 if (vt->klass == mono_defaults.internal_thread_class)
755 g_assert (mono_object_domain (start) == mono_get_root_domain ());
756 /* The object could be a proxy for an object in the domain
758 #ifndef DISABLE_REMOTING
759 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
760 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
762 /* The server could already have been zeroed out, so
763 we need to check for that, too. */
764 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
765 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
766 ((MonoRealProxy*)start)->unwrapped_server = NULL;
773 clear_domain_process_object (char *obj, MonoDomain *domain)
777 process_object_for_domain_clearing (obj, domain);
778 remove = need_remove_object_for_domain (obj, domain);
780 if (remove && ((MonoObject*)obj)->synchronisation) {
781 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
783 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
790 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
792 if (clear_domain_process_object (obj, domain)) {
793 CANARIFY_SIZE (size);
794 memset (obj, 0, size);
799 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
801 clear_domain_process_object (obj, domain);
805 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
807 if (need_remove_object_for_domain (obj, domain))
808 major_collector.free_non_pinned_object (obj, size);
812 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
814 if (need_remove_object_for_domain (obj, domain))
815 major_collector.free_pinned_object (obj, size);
819 * When appdomains are unloaded we can easily remove objects that have finalizers,
820 * but all the others could still be present in random places on the heap.
821 * We need a sweep to get rid of them even though it's going to be costly
823 * The reason we need to remove them is because we access the vtable and class
824 * structures to know the object size and the reference bitmap: once the domain is
825 * unloaded the point to random memory.
828 mono_gc_clear_domain (MonoDomain * domain)
830 LOSObject *bigobj, *prev;
835 binary_protocol_domain_unload_begin (domain);
839 if (sgen_concurrent_collection_in_progress ())
840 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
841 SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
843 major_collector.finish_sweeping ();
845 sgen_process_fin_stage_entries ();
846 sgen_process_dislink_stage_entries ();
848 sgen_clear_nursery_fragments ();
850 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
851 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
852 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
853 sgen_check_for_xdomain_refs ();
856 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
857 to memory returned to the OS.*/
858 null_ephemerons_for_domain (domain);
860 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
861 sgen_null_links_if (object_in_domain_predicate, domain, i);
863 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
864 sgen_remove_finalizers_if (object_in_domain_predicate, domain, i);
866 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
867 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
869 /* We need two passes over major and large objects because
870 freeing such objects might give their memory back to the OS
871 (in the case of large objects) or obliterate its vtable
872 (pinned objects with major-copying or pinned and non-pinned
873 objects with major-mark&sweep), but we might need to
874 dereference a pointer from an object to another object if
875 the first object is a proxy. */
876 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
877 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
878 clear_domain_process_object (bigobj->data, domain);
881 for (bigobj = los_object_list; bigobj;) {
882 if (need_remove_object_for_domain (bigobj->data, domain)) {
883 LOSObject *to_free = bigobj;
885 prev->next = bigobj->next;
887 los_object_list = bigobj->next;
888 bigobj = bigobj->next;
889 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
890 sgen_los_free_object (to_free);
894 bigobj = bigobj->next;
896 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
897 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
899 if (domain == mono_get_root_domain ()) {
900 sgen_pin_stats_print_class_stats ();
901 sgen_object_layout_dump (stdout);
904 sgen_restart_world (0, NULL);
906 binary_protocol_domain_unload_end (domain);
907 binary_protocol_flush_buffers (FALSE);
917 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
919 return sgen_alloc_obj (vtable, size);
923 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
925 return sgen_alloc_obj_pinned (vtable, size);
929 mono_gc_alloc_mature (MonoVTable *vtable)
931 MonoObject *obj = sgen_alloc_obj_mature (vtable, vtable->klass->instance_size);
932 if (obj && G_UNLIKELY (obj->vtable->klass->has_finalize))
933 mono_object_register_finalizer (obj);
938 mono_gc_alloc_fixed (size_t size, void *descr)
940 /* FIXME: do a single allocation */
941 void *res = calloc (1, size);
944 if (!mono_gc_register_root (res, size, descr)) {
952 mono_gc_free_fixed (void* addr)
954 mono_gc_deregister_root (addr);
962 static MonoMethod* alloc_method_cache [ATYPE_NUM];
963 static gboolean use_managed_allocator = TRUE;
965 #ifdef MANAGED_ALLOCATION
967 #ifdef HAVE_KW_THREAD
969 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
970 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
971 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
972 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
975 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
976 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
977 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
978 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
983 #if defined(__APPLE__) || defined (HOST_WIN32)
984 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
985 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
986 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
987 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
988 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
989 mono_mb_emit_byte ((mb), CEE_ADD); \
990 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
993 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
994 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
995 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
996 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
997 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
998 mono_mb_emit_byte ((mb), CEE_ADD); \
999 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
1003 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1004 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1009 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
1010 * for each class. This is currently not easy to do, as it is hard to generate basic
1011 * blocks + branches, but it is easy with the linear IL codebase.
1013 * For this to work we'd need to solve the TLAB race, first. Now we
1014 * require the allocator to be in a few known methods to make sure
1015 * that they are executed atomically via the restart mechanism.
1018 create_allocator (int atype)
1020 int p_var, size_var;
1021 guint32 slowpath_branch, max_size_branch;
1022 MonoMethodBuilder *mb;
1024 MonoMethodSignature *csig;
1025 static gboolean registered = FALSE;
1026 int tlab_next_addr_var, new_next_var;
1028 const char *name = NULL;
1029 AllocatorWrapperInfo *info;
1032 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
1033 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
1034 mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
1038 if (atype == ATYPE_SMALL) {
1040 name = "AllocSmall";
1041 } else if (atype == ATYPE_NORMAL) {
1044 } else if (atype == ATYPE_VECTOR) {
1046 name = "AllocVector";
1047 } else if (atype == ATYPE_STRING) {
1049 name = "AllocString";
1051 g_assert_not_reached ();
1054 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
1055 if (atype == ATYPE_STRING) {
1056 csig->ret = &mono_defaults.string_class->byval_arg;
1057 csig->params [0] = &mono_defaults.int_class->byval_arg;
1058 csig->params [1] = &mono_defaults.int32_class->byval_arg;
1060 csig->ret = &mono_defaults.object_class->byval_arg;
1061 for (i = 0; i < num_params; ++i)
1062 csig->params [i] = &mono_defaults.int_class->byval_arg;
1065 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
1068 size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1069 if (atype == ATYPE_SMALL) {
1070 /* size_var = size_arg */
1071 mono_mb_emit_ldarg (mb, 1);
1072 mono_mb_emit_stloc (mb, size_var);
1073 } else if (atype == ATYPE_NORMAL) {
1074 /* size = vtable->klass->instance_size; */
1075 mono_mb_emit_ldarg (mb, 0);
1076 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
1077 mono_mb_emit_byte (mb, CEE_ADD);
1078 mono_mb_emit_byte (mb, CEE_LDIND_I);
1079 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, instance_size));
1080 mono_mb_emit_byte (mb, CEE_ADD);
1081 /* FIXME: assert instance_size stays a 4 byte integer */
1082 mono_mb_emit_byte (mb, CEE_LDIND_U4);
1083 mono_mb_emit_byte (mb, CEE_CONV_I);
1084 mono_mb_emit_stloc (mb, size_var);
1085 } else if (atype == ATYPE_VECTOR) {
1086 MonoExceptionClause *clause;
1087 int pos, pos_leave, pos_error;
1088 MonoClass *oom_exc_class;
1092 * n > MONO_ARRAY_MAX_INDEX => OutOfMemoryException
1093 * n < 0 => OverflowException
1095 * We can do an unsigned comparison to catch both cases, then in the error
1096 * case compare signed to distinguish between them.
1098 mono_mb_emit_ldarg (mb, 1);
1099 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
1100 mono_mb_emit_byte (mb, CEE_CONV_U);
1101 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
1103 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1104 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1105 mono_mb_emit_ldarg (mb, 1);
1106 mono_mb_emit_icon (mb, 0);
1107 pos_error = mono_mb_emit_short_branch (mb, CEE_BLT_S);
1108 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
1109 mono_mb_patch_short_branch (mb, pos_error);
1110 mono_mb_emit_exception (mb, "OverflowException", NULL);
1112 mono_mb_patch_short_branch (mb, pos);
1114 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
1115 clause->try_offset = mono_mb_get_label (mb);
1117 /* vtable->klass->sizes.element_size */
1118 mono_mb_emit_ldarg (mb, 0);
1119 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
1120 mono_mb_emit_byte (mb, CEE_ADD);
1121 mono_mb_emit_byte (mb, CEE_LDIND_I);
1122 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, sizes));
1123 mono_mb_emit_byte (mb, CEE_ADD);
1124 mono_mb_emit_byte (mb, CEE_LDIND_U4);
1125 mono_mb_emit_byte (mb, CEE_CONV_I);
1128 mono_mb_emit_ldarg (mb, 1);
1129 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
1130 /* + sizeof (MonoArray) */
1131 mono_mb_emit_icon (mb, sizeof (MonoArray));
1132 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
1133 mono_mb_emit_stloc (mb, size_var);
1135 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
1138 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
1139 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
1140 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
1141 "System", "OverflowException");
1142 g_assert (clause->data.catch_class);
1143 clause->handler_offset = mono_mb_get_label (mb);
1145 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
1146 "System", "OutOfMemoryException");
1147 g_assert (oom_exc_class);
1148 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
1151 mono_mb_emit_byte (mb, CEE_POP);
1152 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
1153 mono_mb_emit_byte (mb, CEE_THROW);
1155 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
1156 mono_mb_set_clauses (mb, 1, clause);
1157 mono_mb_patch_branch (mb, pos_leave);
1159 } else if (atype == ATYPE_STRING) {
1163 * a string allocator method takes the args: (vtable, len)
1165 * bytes = offsetof (MonoString, chars) + ((len + 1) * 2)
1169 * bytes <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1173 * offsetof (MonoString, chars) + ((len + 1) * 2) <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1174 * len <= (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - offsetof (MonoString, chars)) / 2 - 1
1176 mono_mb_emit_ldarg (mb, 1);
1177 mono_mb_emit_icon (mb, (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - MONO_STRUCT_OFFSET (MonoString, chars)) / 2 - 1);
1178 pos = mono_mb_emit_short_branch (mb, MONO_CEE_BLE_UN_S);
1180 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1181 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1182 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
1183 mono_mb_patch_short_branch (mb, pos);
1185 mono_mb_emit_ldarg (mb, 1);
1186 mono_mb_emit_icon (mb, 1);
1187 mono_mb_emit_byte (mb, MONO_CEE_SHL);
1188 //WE manually fold the above + 2 here
1189 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, chars) + 2);
1190 mono_mb_emit_byte (mb, CEE_ADD);
1191 mono_mb_emit_stloc (mb, size_var);
1193 g_assert_not_reached ();
1196 if (atype != ATYPE_SMALL) {
1197 /* size += ALLOC_ALIGN - 1; */
1198 mono_mb_emit_ldloc (mb, size_var);
1199 mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
1200 mono_mb_emit_byte (mb, CEE_ADD);
1201 /* size &= ~(ALLOC_ALIGN - 1); */
1202 mono_mb_emit_icon (mb, ~(SGEN_ALLOC_ALIGN - 1));
1203 mono_mb_emit_byte (mb, CEE_AND);
1204 mono_mb_emit_stloc (mb, size_var);
1207 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
1208 if (atype != ATYPE_SMALL) {
1209 mono_mb_emit_ldloc (mb, size_var);
1210 mono_mb_emit_icon (mb, SGEN_MAX_SMALL_OBJ_SIZE);
1211 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_UN_S);
1215 * We need to modify tlab_next, but the JIT only supports reading, so we read
1216 * another tls var holding its address instead.
1219 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
1220 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1221 EMIT_TLS_ACCESS_NEXT_ADDR (mb);
1222 mono_mb_emit_stloc (mb, tlab_next_addr_var);
1224 /* p = (void**)tlab_next; */
1225 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1226 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
1227 mono_mb_emit_byte (mb, CEE_LDIND_I);
1228 mono_mb_emit_stloc (mb, p_var);
1230 /* new_next = (char*)p + size; */
1231 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
1232 mono_mb_emit_ldloc (mb, p_var);
1233 mono_mb_emit_ldloc (mb, size_var);
1234 mono_mb_emit_byte (mb, CEE_CONV_I);
1235 mono_mb_emit_byte (mb, CEE_ADD);
1236 mono_mb_emit_stloc (mb, new_next_var);
1238 /* if (G_LIKELY (new_next < tlab_temp_end)) */
1239 mono_mb_emit_ldloc (mb, new_next_var);
1240 EMIT_TLS_ACCESS_TEMP_END (mb);
1241 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
1244 if (atype != ATYPE_SMALL)
1245 mono_mb_patch_short_branch (mb, max_size_branch);
1247 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1248 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
1250 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
1251 mono_mb_emit_ldarg (mb, 0);
1252 mono_mb_emit_ldloc (mb, size_var);
1253 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
1254 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
1255 } else if (atype == ATYPE_VECTOR) {
1256 mono_mb_emit_ldarg (mb, 1);
1257 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
1258 } else if (atype == ATYPE_STRING) {
1259 mono_mb_emit_ldarg (mb, 1);
1260 mono_mb_emit_icall (mb, mono_gc_alloc_string);
1262 g_assert_not_reached ();
1264 mono_mb_emit_byte (mb, CEE_RET);
1267 mono_mb_patch_short_branch (mb, slowpath_branch);
1269 /* FIXME: Memory barrier */
1271 /* tlab_next = new_next */
1272 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
1273 mono_mb_emit_ldloc (mb, new_next_var);
1274 mono_mb_emit_byte (mb, CEE_STIND_I);
1276 /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
1277 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1278 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1279 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1282 mono_mb_emit_ldloc (mb, p_var);
1283 mono_mb_emit_ldarg (mb, 0);
1284 mono_mb_emit_byte (mb, CEE_STIND_I);
1286 if (atype == ATYPE_VECTOR) {
1287 /* arr->max_length = max_length; */
1288 mono_mb_emit_ldloc (mb, p_var);
1289 mono_mb_emit_ldflda (mb, MONO_STRUCT_OFFSET (MonoArray, max_length));
1290 mono_mb_emit_ldarg (mb, 1);
1291 #ifdef MONO_BIG_ARRAYS
1292 mono_mb_emit_byte (mb, CEE_STIND_I);
1294 mono_mb_emit_byte (mb, CEE_STIND_I4);
1296 } else if (atype == ATYPE_STRING) {
1297 /* need to set length and clear the last char */
1298 /* s->length = len; */
1299 mono_mb_emit_ldloc (mb, p_var);
1300 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, length));
1301 mono_mb_emit_byte (mb, MONO_CEE_ADD);
1302 mono_mb_emit_ldarg (mb, 1);
1303 mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
1304 /* s->chars [len] = 0; */
1305 mono_mb_emit_ldloc (mb, p_var);
1306 mono_mb_emit_ldloc (mb, size_var);
1307 mono_mb_emit_icon (mb, 2);
1308 mono_mb_emit_byte (mb, MONO_CEE_SUB);
1309 mono_mb_emit_byte (mb, MONO_CEE_ADD);
1310 mono_mb_emit_icon (mb, 0);
1311 mono_mb_emit_byte (mb, MONO_CEE_STIND_I2);
1315 We must make sure both vtable and max_length are globaly visible before returning to managed land.
1317 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1318 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1319 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1322 mono_mb_emit_ldloc (mb, p_var);
1323 mono_mb_emit_byte (mb, CEE_RET);
1326 res = mono_mb_create_method (mb, csig, 8);
1328 mono_method_get_header (res)->init_locals = FALSE;
1330 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
1331 info->gc_name = "sgen";
1332 info->alloc_type = atype;
1333 mono_marshal_set_wrapper_info (res, info);
1340 mono_gc_get_aligned_size_for_allocator (int size)
1342 int aligned_size = size;
1343 aligned_size += SGEN_ALLOC_ALIGN - 1;
1344 aligned_size &= ~(SGEN_ALLOC_ALIGN - 1);
1345 return aligned_size;
1349 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1350 * The signature of the called method is:
1351 * object allocate (MonoVTable *vtable)
1354 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1356 #ifdef MANAGED_ALLOCATION
1357 if (collect_before_allocs)
1359 if (!mono_runtime_has_tls_get ())
1361 if (klass->instance_size > tlab_size)
1363 if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1365 if (klass->has_finalize || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
1369 if (klass->byval_arg.type == MONO_TYPE_STRING)
1370 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING);
1371 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1372 if (known_instance_size)
1373 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
1375 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
1382 mono_gc_get_managed_array_allocator (MonoClass *klass)
1384 #ifdef MANAGED_ALLOCATION
1385 if (klass->rank != 1)
1387 if (!mono_runtime_has_tls_get ())
1389 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
1391 if (has_per_allocation_action)
1393 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1395 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
1402 sgen_set_use_managed_allocator (gboolean flag)
1404 use_managed_allocator = flag;
1408 mono_gc_get_managed_allocator_by_type (int atype)
1410 #ifdef MANAGED_ALLOCATION
1413 if (!use_managed_allocator)
1416 if (!mono_runtime_has_tls_get ())
1419 res = alloc_method_cache [atype];
1423 res = create_allocator (atype);
1425 if (alloc_method_cache [atype]) {
1426 mono_free_method (res);
1427 res = alloc_method_cache [atype];
1429 mono_memory_barrier ();
1430 alloc_method_cache [atype] = res;
1441 mono_gc_get_managed_allocator_types (void)
1447 sgen_is_managed_allocator (MonoMethod *method)
1451 for (i = 0; i < ATYPE_NUM; ++i)
1452 if (method == alloc_method_cache [i])
1458 sgen_has_managed_allocator (void)
1462 for (i = 0; i < ATYPE_NUM; ++i)
1463 if (alloc_method_cache [i])
1469 * Cardtable scanning
1472 #define MWORD_MASK (sizeof (mword) - 1)
1475 find_card_offset (mword card)
1477 /*XXX Use assembly as this generates some pretty bad code */
1478 #if defined(__i386__) && defined(__GNUC__)
1479 return (__builtin_ffs (card) - 1) / 8;
1480 #elif defined(__x86_64__) && defined(__GNUC__)
1481 return (__builtin_ffsll (card) - 1) / 8;
1482 #elif defined(__s390x__)
1483 return (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1486 guint8 *ptr = (guint8 *) &card;
1487 for (i = 0; i < sizeof (mword); ++i) {
1496 find_next_card (guint8 *card_data, guint8 *end)
1498 mword *cards, *cards_end;
1501 while ((((mword)card_data) & MWORD_MASK) && card_data < end) {
1507 if (card_data == end)
1510 cards = (mword*)card_data;
1511 cards_end = (mword*)((mword)end & ~MWORD_MASK);
1512 while (cards < cards_end) {
1515 return (guint8*)cards + find_card_offset (card);
1519 card_data = (guint8*)cards_end;
1520 while (card_data < end) {
1529 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1532 sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
1534 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1535 MonoClass *klass = vt->klass;
1537 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vt), "Why would we ever call this on reference-free objects?");
1540 guint8 *card_data, *card_base;
1541 guint8 *card_data_end;
1542 char *obj_start = sgen_card_table_align_pointer (obj);
1543 mword obj_size = sgen_client_par_object_get_size (vt, (GCObject*)obj);
1544 char *obj_end = obj + obj_size;
1546 size_t extra_idx = 0;
1548 MonoArray *arr = (MonoArray*)obj;
1549 mword desc = (mword)klass->element_class->gc_descr;
1550 int elem_size = mono_array_element_size (klass);
1552 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1553 guint8 *overflow_scan_end = NULL;
1556 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1557 if (klass->element_class->valuetype)
1558 sgen_object_layout_scanned_vtype_array ();
1560 sgen_object_layout_scanned_ref_array ();
1566 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1568 card_base = card_data;
1569 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1570 card_data_end = card_data + card_count;
1573 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1574 /*Check for overflow and if so, setup to scan in two steps*/
1575 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1576 overflow_scan_end = sgen_shadow_cardtable + (card_data_end - SGEN_SHADOW_CARDTABLE_END);
1577 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1583 card_data = find_next_card (card_data, card_data_end);
1584 for (; card_data < card_data_end; card_data = find_next_card (card_data + 1, card_data_end)) {
1586 size_t idx = (card_data - card_base) + extra_idx;
1587 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1588 char *card_end = start + CARD_SIZE_IN_BYTES;
1589 char *first_elem, *elem;
1591 HEAVY_STAT (++los_marked_cards);
1594 sgen_card_table_prepare_card_for_scanning (card_data);
1596 card_end = MIN (card_end, obj_end);
1598 if (start <= (char*)arr->vector)
1601 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1603 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1604 if (klass->element_class->valuetype) {
1605 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1607 for (; elem < card_end; elem += elem_size)
1608 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1610 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
1612 HEAVY_STAT (++los_array_cards);
1613 for (; elem < card_end; elem += SIZEOF_VOID_P) {
1614 gpointer new, old = *(gpointer*)elem;
1615 if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
1616 HEAVY_STAT (++los_array_remsets);
1617 copy_func ((void**)elem, ctx.queue);
1618 new = *(gpointer*)elem;
1619 if (G_UNLIKELY (sgen_ptr_in_nursery (new)))
1620 sgen_add_to_global_remset (elem, new);
1625 binary_protocol_card_scan (first_elem, elem - first_elem);
1628 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1629 if (overflow_scan_end) {
1630 extra_idx = card_data - card_base;
1631 card_base = card_data = sgen_shadow_cardtable;
1632 card_data_end = overflow_scan_end;
1633 overflow_scan_end = NULL;
1644 * Array and string allocation
1648 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1653 if (!SGEN_CAN_ALIGN_UP (size))
1656 #ifndef DISABLE_CRITICAL_REGION
1657 ENTER_CRITICAL_REGION;
1658 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1660 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1661 arr->max_length = (mono_array_size_t)max_length;
1662 EXIT_CRITICAL_REGION;
1665 EXIT_CRITICAL_REGION;
1670 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1671 if (G_UNLIKELY (!arr)) {
1673 return mono_gc_out_of_memory (size);
1676 arr->max_length = (mono_array_size_t)max_length;
1681 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Vector has incorrect size.");
1686 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1689 MonoArrayBounds *bounds;
1692 if (!SGEN_CAN_ALIGN_UP (size))
1695 #ifndef DISABLE_CRITICAL_REGION
1696 ENTER_CRITICAL_REGION;
1697 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1699 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1700 arr->max_length = (mono_array_size_t)max_length;
1702 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1703 arr->bounds = bounds;
1704 EXIT_CRITICAL_REGION;
1707 EXIT_CRITICAL_REGION;
1712 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1713 if (G_UNLIKELY (!arr)) {
1715 return mono_gc_out_of_memory (size);
1718 arr->max_length = (mono_array_size_t)max_length;
1720 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1721 arr->bounds = bounds;
1726 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Array has incorrect size.");
1731 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1736 if (!SGEN_CAN_ALIGN_UP (size))
1739 #ifndef DISABLE_CRITICAL_REGION
1740 ENTER_CRITICAL_REGION;
1741 str = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1743 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1745 EXIT_CRITICAL_REGION;
1748 EXIT_CRITICAL_REGION;
1753 str = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1754 if (G_UNLIKELY (!str)) {
1756 return mono_gc_out_of_memory (size);
1771 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1773 mono_unichar2 *new_end = str->chars + new_length;
1775 /* zero the discarded string. This null-delimits the string and allows
1776 * the space to be reclaimed by SGen. */
1778 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1779 CHECK_CANARY_FOR_OBJECT (str);
1780 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1781 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1783 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1786 str->length = new_length;
1793 #define GC_ROOT_NUM 32
1795 int count; /* must be the first field */
1796 void *objects [GC_ROOT_NUM];
1797 int root_types [GC_ROOT_NUM];
1798 uintptr_t extra_info [GC_ROOT_NUM];
1802 notify_gc_roots (GCRootReport *report)
1806 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
1811 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
1813 if (report->count == GC_ROOT_NUM)
1814 notify_gc_roots (report);
1815 report->objects [report->count] = object;
1816 report->root_types [report->count] = rtype;
1817 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)SGEN_LOAD_VTABLE (object))->klass;
1821 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1823 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1824 GCRootReport report;
1827 for (idx = 0; idx < count; ++idx)
1828 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1829 notify_gc_roots (&report);
1834 report_finalizer_roots_from_queue (SgenPointerQueue *queue)
1836 GCRootReport report;
1840 for (i = 0; i < queue->next_slot; ++i) {
1841 void *obj = queue->data [i];
1844 add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1846 notify_gc_roots (&report);
1850 report_finalizer_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1852 report_finalizer_roots_from_queue (fin_ready_queue);
1853 report_finalizer_roots_from_queue (critical_fin_queue);
1856 static GCRootReport *root_report;
1859 single_arg_report_root (void **obj, void *gc_data)
1862 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1866 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1868 switch (desc & ROOT_DESC_TYPE_MASK) {
1869 case ROOT_DESC_BITMAP:
1870 desc >>= ROOT_DESC_TYPE_SHIFT;
1872 if ((desc & 1) && *start_root) {
1873 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1879 case ROOT_DESC_COMPLEX: {
1880 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1881 gsize bwords = (*bitmap_data) - 1;
1882 void **start_run = start_root;
1884 while (bwords-- > 0) {
1885 gsize bmap = *bitmap_data++;
1886 void **objptr = start_run;
1888 if ((bmap & 1) && *objptr) {
1889 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1894 start_run += GC_BITS_PER_WORD;
1898 case ROOT_DESC_USER: {
1899 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1900 root_report = report;
1901 marker (start_root, single_arg_report_root, NULL);
1904 case ROOT_DESC_RUN_LEN:
1905 g_assert_not_reached ();
1907 g_assert_not_reached ();
1912 report_registered_roots_by_type (int root_type)
1914 GCRootReport report;
1918 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1919 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1920 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1921 } SGEN_HASH_TABLE_FOREACH_END;
1922 notify_gc_roots (&report);
1926 report_registered_roots (void)
1928 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1929 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1933 sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1935 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1936 report_registered_roots ();
1937 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1938 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1941 static GCRootReport major_root_report;
1942 static gboolean profile_roots;
1945 sgen_client_collecting_major_1 (void)
1947 profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
1948 memset (&major_root_report, 0, sizeof (GCRootReport));
1952 sgen_client_pinned_los_object (char *obj)
1955 add_profile_gc_root (&major_root_report, obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1959 sgen_client_collecting_major_2 (void)
1962 notify_gc_roots (&major_root_report);
1964 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1965 report_registered_roots ();
1969 sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1971 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1972 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1975 #define MOVED_OBJECTS_NUM 64
1976 static void *moved_objects [MOVED_OBJECTS_NUM];
1977 static int moved_objects_idx = 0;
1980 mono_sgen_register_moved_object (void *obj, void *destination)
1982 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1984 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1985 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1986 moved_objects_idx = 0;
1988 moved_objects [moved_objects_idx++] = obj;
1989 moved_objects [moved_objects_idx++] = destination;
1993 mono_sgen_gc_event_moves (void)
1995 if (moved_objects_idx) {
1996 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1997 moved_objects_idx = 0;
2005 #define REFS_SIZE 128
2008 MonoGCReferences callback;
2012 MonoObject *refs [REFS_SIZE];
2013 uintptr_t offsets [REFS_SIZE];
2017 #define HANDLE_PTR(ptr,obj) do { \
2019 if (hwi->count == REFS_SIZE) { \
2020 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
2024 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
2025 hwi->refs [hwi->count++] = *(ptr); \
2030 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
2032 mword desc = sgen_obj_get_descriptor (start);
2034 #include "sgen-scan-object.h"
2038 walk_references (char *start, size_t size, void *data)
2040 HeapWalkInfo *hwi = data;
2043 collect_references (hwi, start, size);
2044 if (hwi->count || !hwi->called)
2045 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
2049 * mono_gc_walk_heap:
2050 * @flags: flags for future use
2051 * @callback: a function pointer called for each object in the heap
2052 * @data: a user data pointer that is passed to callback
2054 * This function can be used to iterate over all the live objects in the heap:
2055 * for each object, @callback is invoked, providing info about the object's
2056 * location in memory, its class, its size and the objects it references.
2057 * For each referenced object it's offset from the object address is
2058 * reported in the offsets array.
2059 * The object references may be buffered, so the callback may be invoked
2060 * multiple times for the same object: in all but the first call, the size
2061 * argument will be zero.
2062 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
2063 * profiler event handler.
2065 * Returns: a non-zero value if the GC doesn't support heap walking
2068 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
2073 hwi.callback = callback;
2076 sgen_clear_nursery_fragments ();
2077 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
2079 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
2080 sgen_los_iterate_objects (walk_references, &hwi);
2090 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
2092 gc_callbacks = *callbacks;
2096 mono_gc_get_gc_callbacks ()
2098 return &gc_callbacks;
2102 sgen_client_thread_register (SgenThreadInfo* info, void *stack_bottom_fallback)
2105 guint8 *staddr = NULL;
2107 #ifndef HAVE_KW_THREAD
2108 g_assert (!mono_native_tls_get_value (thread_info_key));
2109 mono_native_tls_set_value (thread_info_key, info);
2111 sgen_thread_info = info;
2114 info->client_info.skip = 0;
2115 info->client_info.stopped_ip = NULL;
2116 info->client_info.stopped_domain = NULL;
2118 info->client_info.stack_start = NULL;
2120 #ifdef SGEN_POSIX_STW
2121 info->client_info.stop_count = -1;
2122 info->client_info.signal = 0;
2125 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
2126 mono_thread_info_get_stack_bounds (&staddr, &stsize);
2129 info->client_info.stack_start_limit = staddr;
2131 info->client_info.stack_end = staddr + stsize;
2133 gsize stack_bottom = (gsize)stack_bottom_fallback;
2134 stack_bottom += 4095;
2135 stack_bottom &= ~4095;
2136 info->client_info.stack_end = (char*)stack_bottom;
2140 memset (&info->client_info.ctx, 0, sizeof (MonoContext));
2142 memset (&info->client_info.regs, 0, sizeof (info->regs));
2145 if (mono_gc_get_gc_callbacks ()->thread_attach_func)
2146 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2148 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
2150 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.stack_end);
2154 sgen_client_thread_unregister (SgenThreadInfo *p)
2156 MonoNativeThreadId tid;
2158 #ifndef HAVE_KW_THREAD
2159 mono_native_tls_set_value (thread_info_key, NULL);
2161 sgen_thread_info = NULL;
2164 tid = mono_thread_info_get_tid (p);
2166 if (p->client_info.info.runtime_thread)
2167 mono_threads_add_joinable_thread ((gpointer)tid);
2169 if (mono_gc_get_gc_callbacks ()->thread_detach_func) {
2170 mono_gc_get_gc_callbacks ()->thread_detach_func (p->client_info.runtime_data);
2171 p->client_info.runtime_data = NULL;
2174 binary_protocol_thread_unregister ((gpointer)tid);
2175 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
2179 mono_gc_set_skip_thread (gboolean skip)
2181 SgenThreadInfo *info = mono_thread_info_current ();
2184 info->client_info.gc_disabled = skip;
2189 is_critical_method (MonoMethod *method)
2191 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
2195 thread_in_critical_region (SgenThreadInfo *info)
2197 return info->client_info.in_critical_region;
2201 sgen_thread_attach (SgenThreadInfo *info)
2203 if (mono_gc_get_gc_callbacks ()->thread_attach_func && !info->client_info.runtime_data)
2204 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2208 sgen_thread_detach (SgenThreadInfo *p)
2210 /* If a delegate is passed to native code and invoked on a thread we dont
2211 * know about, the jit will register it with mono_jit_thread_attach, but
2212 * we have no way of knowing when that thread goes away. SGen has a TSD
2213 * so we assume that if the domain is still registered, we can detach
2216 if (mono_domain_get ())
2217 mono_thread_detach_internal (mono_thread_internal_current ());
2221 mono_gc_register_thread (void *baseptr)
2223 return mono_thread_info_attach (baseptr) != NULL;
2227 mono_gc_is_gc_thread (void)
2231 result = mono_thread_info_current () != NULL;
2237 sgen_client_thread_register_worker (void)
2239 mono_thread_info_register_small_id ();
2242 /* Variables holding start/end nursery so it won't have to be passed at every call */
2243 static void *scan_area_arg_start, *scan_area_arg_end;
2246 mono_gc_conservatively_scan_area (void *start, void *end)
2248 sgen_conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
2252 mono_gc_scan_object (void *obj, void *gc_data)
2254 ScanCopyContext *ctx = gc_data;
2255 ctx->ops->copy_or_mark_object (&obj, ctx->queue);
2260 * Mark from thread stacks and registers.
2263 sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx)
2265 SgenThreadInfo *info;
2267 scan_area_arg_start = start_nursery;
2268 scan_area_arg_end = end_nursery;
2270 FOREACH_THREAD (info) {
2271 int skip_reason = 0;
2272 if (info->client_info.skip) {
2273 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
2275 } else if (info->client_info.gc_disabled) {
2276 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start);
2278 } else if (!mono_thread_info_is_live (info)) {
2279 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %x)", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
2283 binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.stack_end, skip_reason);
2288 g_assert (info->client_info.suspend_done);
2289 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
2290 if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
2291 mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, info->client_info.stack_start, info->client_info.stack_end, precise, &ctx);
2292 } else if (!precise) {
2293 if (!conservative_stack_mark) {
2294 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
2295 conservative_stack_mark = TRUE;
2297 sgen_conservatively_pin_objects_from (info->client_info.stack_start, info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
2302 sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)&info->client_info.ctx + ARCH_NUM_REGS,
2303 start_nursery, end_nursery, PIN_TYPE_STACK);
2305 sgen_conservatively_pin_objects_from ((void**)&info->client_info.regs, (void**)&info->client_info.regs + ARCH_NUM_REGS,
2306 start_nursery, end_nursery, PIN_TYPE_STACK);
2309 } END_FOREACH_THREAD
2313 * mono_gc_set_stack_end:
2315 * Set the end of the current threads stack to STACK_END. The stack space between
2316 * STACK_END and the real end of the threads stack will not be scanned during collections.
2319 mono_gc_set_stack_end (void *stack_end)
2321 SgenThreadInfo *info;
2324 info = mono_thread_info_current ();
2326 SGEN_ASSERT (0, stack_end < info->client_info.stack_end, "Can only lower stack end");
2327 info->client_info.stack_end = stack_end;
2337 mono_gc_register_root (char *start, size_t size, void *descr)
2339 return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
2343 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
2345 return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER);
2349 mono_gc_deregister_root (char* addr)
2351 sgen_deregister_root (addr);
2358 #if USE_PTHREAD_INTERCEPT
2361 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
2363 return pthread_create (new_thread, attr, start_routine, arg);
2367 mono_gc_pthread_join (pthread_t thread, void **retval)
2369 return pthread_join (thread, retval);
2373 mono_gc_pthread_detach (pthread_t thread)
2375 return pthread_detach (thread);
2379 mono_gc_pthread_exit (void *retval)
2381 mono_thread_info_detach ();
2382 pthread_exit (retval);
2383 g_assert_not_reached ();
2386 #endif /* USE_PTHREAD_INTERCEPT */
2393 sgen_client_total_allocated_heap_changed (size_t allocated_heap)
2395 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP, allocated_heap);
2399 mono_gc_user_markers_supported (void)
2405 mono_object_is_alive (MonoObject* o)
2411 mono_gc_get_generation (MonoObject *obj)
2413 if (sgen_ptr_in_nursery (obj))
2419 mono_gc_enable_events (void)
2424 mono_gc_get_gc_name (void)
2430 mono_gc_get_description (void)
2432 return g_strdup ("sgen");
2436 mono_gc_set_desktop_mode (void)
2441 mono_gc_is_moving (void)
2447 mono_gc_is_disabled (void)
2453 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
2460 mono_gc_max_generation (void)
2466 mono_gc_precise_stack_mark_enabled (void)
2468 return !conservative_stack_mark;
2472 mono_gc_collect (int generation)
2474 sgen_gc_collect (generation);
2478 mono_gc_collection_count (int generation)
2480 return sgen_gc_collection_count (generation);
2484 mono_gc_get_used_size (void)
2486 return (int64_t)sgen_gc_get_used_size ();
2490 mono_gc_get_heap_size (void)
2492 return (int64_t)sgen_gc_get_total_heap_allocation ();
2496 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
2498 return sgen_make_user_root_descriptor (marker);
2502 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
2504 return (void*)SGEN_DESC_STRING;
2508 mono_gc_get_nursery (int *shift_bits, size_t *size)
2510 *size = sgen_nursery_size;
2511 *shift_bits = DEFAULT_NURSERY_BITS;
2512 return sgen_get_nursery_start ();
2516 mono_gc_get_los_limit (void)
2518 return SGEN_MAX_SMALL_OBJ_SIZE;
2522 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
2524 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
2528 mono_gc_weak_link_remove (void **link_addr, gboolean track)
2530 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
2534 mono_gc_weak_link_get (void **link_addr)
2536 return sgen_weak_link_get (link_addr);
2540 mono_gc_set_allow_synchronous_major (gboolean flag)
2542 return sgen_set_allow_synchronous_major (flag);
2546 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
2550 result = func (data);
2551 UNLOCK_INTERRUPTION;
2556 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
2562 sgen_client_out_of_memory (size_t size)
2564 mono_gc_out_of_memory (size);
2568 mono_gc_get_card_table (int *shift_bits, gpointer *mask)
2570 return sgen_get_card_table_configuration (shift_bits, mask);
2574 mono_gc_card_table_nursery_check (void)
2576 return !sgen_get_major_collector ()->is_concurrent;
2579 /* Negative value to remove */
2581 mono_gc_add_memory_pressure (gint64 value)
2583 /* FIXME: Implement at some point? */
2591 sgen_client_degraded_allocation (size_t size)
2593 static int last_major_gc_warned = -1;
2594 static int num_degraded = 0;
2596 if (last_major_gc_warned < gc_stats.major_gc_count) {
2598 if (num_degraded == 1 || num_degraded == 3)
2599 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2600 else if (num_degraded == 10)
2601 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2602 last_major_gc_warned = gc_stats.major_gc_count;
2607 sgen_client_log_timing (GGTimingInfo *info, mword last_major_num_sections, mword last_los_memory_usage)
2609 SgenMajorCollector *major_collector = sgen_get_major_collector ();
2610 mword num_major_sections = major_collector->get_num_major_sections ();
2611 char full_timing_buff [1024];
2612 full_timing_buff [0] = '\0';
2614 if (!info->is_overflow)
2615 sprintf (full_timing_buff, "total %.2fms, bridge %.2fms", info->stw_time / 10000.0f, (int)info->bridge_time / 10000.0f);
2616 if (info->generation == GENERATION_OLD)
2617 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR%s: (%s) pause %.2fms, %s major %dK/%dK los %dK/%dK",
2618 info->is_overflow ? "_OVERFLOW" : "",
2619 info->reason ? info->reason : "",
2620 (int)info->total_time / 10000.0f,
2622 major_collector->section_size * num_major_sections / 1024,
2623 major_collector->section_size * last_major_num_sections / 1024,
2624 los_memory_usage / 1024,
2625 last_los_memory_usage / 1024);
2627 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MINOR%s: (%s) pause %.2fms, %s promoted %dK major %dK los %dK",
2628 info->is_overflow ? "_OVERFLOW" : "",
2629 info->reason ? info->reason : "",
2630 (int)info->total_time / 10000.0f,
2632 (num_major_sections - last_major_num_sections) * major_collector->section_size / 1024,
2633 major_collector->section_size * num_major_sections / 1024,
2634 los_memory_usage / 1024);
2642 sgen_client_description_for_internal_mem_type (int type)
2645 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
2652 sgen_client_pre_collection_checks (void)
2654 if (sgen_mono_xdomain_checks) {
2655 sgen_clear_nursery_fragments ();
2656 sgen_check_for_xdomain_refs ();
2661 sgen_client_vtable_is_inited (GCVTable *gc_vtable)
2663 MonoVTable *vt = (MonoVTable*)gc_vtable;
2664 return vt->klass->inited;
2668 sgen_client_vtable_get_namespace (GCVTable *gc_vtable)
2670 MonoVTable *vt = (MonoVTable*)gc_vtable;
2671 return vt->klass->name_space;
2675 sgen_client_vtable_get_name (GCVTable *gc_vtable)
2677 MonoVTable *vt = (MonoVTable*)gc_vtable;
2678 return vt->klass->name;
2686 sgen_client_init (void)
2689 MonoThreadInfoCallbacks cb;
2691 cb.thread_register = sgen_thread_register;
2692 cb.thread_detach = sgen_thread_detach;
2693 cb.thread_unregister = sgen_thread_unregister;
2694 cb.thread_attach = sgen_thread_attach;
2695 cb.mono_method_is_critical = (gpointer)is_critical_method;
2696 cb.mono_thread_in_critical_region = thread_in_critical_region;
2698 cb.thread_exit = mono_gc_pthread_exit;
2699 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
2702 mono_threads_init (&cb, sizeof (SgenThreadInfo));
2704 ///* Keep this the default for now */
2705 /* Precise marking is broken on all supported targets. Disable until fixed. */
2706 conservative_stack_mark = TRUE;
2708 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
2710 mono_sgen_init_stw ();
2712 #ifndef HAVE_KW_THREAD
2713 mono_native_tls_alloc (&thread_info_key, NULL);
2714 #if defined(__APPLE__) || defined (HOST_WIN32)
2716 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
2717 * where the two are the same.
2719 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
2723 int tls_offset = -1;
2724 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
2725 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
2730 * This needs to happen before any internal allocations because
2731 * it inits the small id which is required for hazard pointer
2736 mono_gc_register_thread (&dummy);
2740 sgen_client_handle_gc_param (const char *opt)
2742 if (g_str_has_prefix (opt, "stack-mark=")) {
2743 opt = strchr (opt, '=') + 1;
2744 if (!strcmp (opt, "precise")) {
2745 conservative_stack_mark = FALSE;
2746 } else if (!strcmp (opt, "conservative")) {
2747 conservative_stack_mark = TRUE;
2749 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
2750 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
2752 } else if (g_str_has_prefix (opt, "bridge-implementation=")) {
2753 opt = strchr (opt, '=') + 1;
2754 sgen_set_bridge_implementation (opt);
2755 } else if (g_str_has_prefix (opt, "toggleref-test")) {
2756 /* FIXME: This should probably in MONO_GC_DEBUG */
2757 sgen_register_test_toggleref_callback ();
2765 sgen_client_print_gc_params_usage (void)
2767 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2771 sgen_client_handle_gc_debug (const char *opt)
2773 if (!strcmp (opt, "xdomain-checks")) {
2774 sgen_mono_xdomain_checks = TRUE;
2775 } else if (!strcmp (opt, "do-not-finalize")) {
2776 do_not_finalize = TRUE;
2777 } else if (!strcmp (opt, "log-finalizers")) {
2778 log_finalizers = TRUE;
2779 } else if (!strcmp (opt, "no-managed-allocator")) {
2780 sgen_set_use_managed_allocator (FALSE);
2781 } else if (!sgen_bridge_handle_gc_debug (opt)) {
2788 sgen_client_print_gc_debug_usage (void)
2790 fprintf (stderr, " xdomain-checks\n");
2791 fprintf (stderr, " do-not-finalize\n");
2792 fprintf (stderr, " log-finalizers\n");
2793 fprintf (stderr, " no-managed-allocator\n");
2794 sgen_bridge_print_gc_debug_usage ();
2798 mono_gc_base_init (void)
2800 mono_counters_init ();
2804 if (nursery_canaries_enabled ())
2805 sgen_set_use_managed_allocator (FALSE);