2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
193 #include "metadata/sgen-gc.h"
194 #include "metadata/metadata-internals.h"
195 #include "metadata/class-internals.h"
196 #include "metadata/gc-internal.h"
197 #include "metadata/object-internals.h"
198 #include "metadata/threads.h"
199 #include "metadata/sgen-cardtable.h"
200 #include "metadata/sgen-protocol.h"
201 #include "metadata/sgen-archdep.h"
202 #include "metadata/sgen-bridge.h"
203 #include "metadata/sgen-memory-governor.h"
204 #include "metadata/sgen-hash-table.h"
205 #include "metadata/mono-gc.h"
206 #include "metadata/method-builder.h"
207 #include "metadata/profiler-private.h"
208 #include "metadata/monitor.h"
209 #include "metadata/mempool-internals.h"
210 #include "metadata/marshal.h"
211 #include "metadata/runtime.h"
212 #include "metadata/sgen-cardtable.h"
213 #include "metadata/sgen-pinning.h"
214 #include "metadata/sgen-workers.h"
215 #include "metadata/sgen-layout-stats.h"
216 #include "utils/mono-mmap.h"
217 #include "utils/mono-time.h"
218 #include "utils/mono-semaphore.h"
219 #include "utils/mono-counters.h"
220 #include "utils/mono-proclib.h"
221 #include "utils/mono-memory-model.h"
222 #include "utils/mono-logger-internal.h"
223 #include "utils/dtrace.h"
225 #include <mono/utils/mono-logger-internal.h>
226 #include <mono/utils/memcheck.h>
228 #if defined(__MACH__)
229 #include "utils/mach-support.h"
232 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
236 #include "mono/cil/opcode.def"
242 #undef pthread_create
244 #undef pthread_detach
247 * ######################################################################
248 * ######## Types and constants used by the GC.
249 * ######################################################################
252 /* 0 means not initialized, 1 is initialized, -1 means in progress */
253 static int gc_initialized = 0;
254 /* If set, check if we need to do something every X allocations */
255 gboolean has_per_allocation_action;
256 /* If set, do a heap check every X allocation */
257 guint32 verify_before_allocs = 0;
258 /* If set, do a minor collection before every X allocation */
259 guint32 collect_before_allocs = 0;
260 /* If set, do a whole heap check before each collection */
261 static gboolean whole_heap_check_before_collection = FALSE;
262 /* If set, do a heap consistency check before each minor collection */
263 static gboolean consistency_check_at_minor_collection = FALSE;
264 /* If set, do a mod union consistency check before each finishing collection pause */
265 static gboolean mod_union_consistency_check = FALSE;
266 /* If set, check whether mark bits are consistent after major collections */
267 static gboolean check_mark_bits_after_major_collection = FALSE;
268 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
269 static gboolean check_nursery_objects_pinned = FALSE;
270 /* If set, do a few checks when the concurrent collector is used */
271 static gboolean do_concurrent_checks = FALSE;
272 /* If set, check that there are no references to the domain left at domain unload */
273 static gboolean xdomain_checks = FALSE;
274 /* If not null, dump the heap after each collection into this file */
275 static FILE *heap_dump_file = NULL;
276 /* If set, mark stacks conservatively, even if precise marking is possible */
277 static gboolean conservative_stack_mark = FALSE;
278 /* If set, do a plausibility check on the scan_starts before and after
280 static gboolean do_scan_starts_check = FALSE;
282 * If the major collector is concurrent and this is FALSE, we will
283 * never initiate a synchronous major collection, unless requested via
286 static gboolean allow_synchronous_major = TRUE;
287 static gboolean disable_minor_collections = FALSE;
288 static gboolean disable_major_collections = FALSE;
289 gboolean do_pin_stats = FALSE;
290 static gboolean do_verify_nursery = FALSE;
291 static gboolean do_dump_nursery_content = FALSE;
292 static gboolean enable_nursery_canaries = FALSE;
294 #ifdef HEAVY_STATISTICS
295 guint64 stat_objects_alloced_degraded = 0;
296 guint64 stat_bytes_alloced_degraded = 0;
298 guint64 stat_copy_object_called_nursery = 0;
299 guint64 stat_objects_copied_nursery = 0;
300 guint64 stat_copy_object_called_major = 0;
301 guint64 stat_objects_copied_major = 0;
303 guint64 stat_scan_object_called_nursery = 0;
304 guint64 stat_scan_object_called_major = 0;
306 guint64 stat_slots_allocated_in_vain;
308 guint64 stat_nursery_copy_object_failed_from_space = 0;
309 guint64 stat_nursery_copy_object_failed_forwarded = 0;
310 guint64 stat_nursery_copy_object_failed_pinned = 0;
311 guint64 stat_nursery_copy_object_failed_to_space = 0;
313 static int stat_wbarrier_add_to_global_remset = 0;
314 static int stat_wbarrier_set_field = 0;
315 static int stat_wbarrier_set_arrayref = 0;
316 static int stat_wbarrier_arrayref_copy = 0;
317 static int stat_wbarrier_generic_store = 0;
318 static int stat_wbarrier_generic_store_atomic = 0;
319 static int stat_wbarrier_set_root = 0;
320 static int stat_wbarrier_value_copy = 0;
321 static int stat_wbarrier_object_copy = 0;
324 static guint64 stat_pinned_objects = 0;
326 static guint64 time_minor_pre_collection_fragment_clear = 0;
327 static guint64 time_minor_pinning = 0;
328 static guint64 time_minor_scan_remsets = 0;
329 static guint64 time_minor_scan_pinned = 0;
330 static guint64 time_minor_scan_registered_roots = 0;
331 static guint64 time_minor_scan_thread_data = 0;
332 static guint64 time_minor_finish_gray_stack = 0;
333 static guint64 time_minor_fragment_creation = 0;
335 static guint64 time_major_pre_collection_fragment_clear = 0;
336 static guint64 time_major_pinning = 0;
337 static guint64 time_major_scan_pinned = 0;
338 static guint64 time_major_scan_registered_roots = 0;
339 static guint64 time_major_scan_thread_data = 0;
340 static guint64 time_major_scan_alloc_pinned = 0;
341 static guint64 time_major_scan_finalized = 0;
342 static guint64 time_major_scan_big_objects = 0;
343 static guint64 time_major_finish_gray_stack = 0;
344 static guint64 time_major_free_bigobjs = 0;
345 static guint64 time_major_los_sweep = 0;
346 static guint64 time_major_sweep = 0;
347 static guint64 time_major_fragment_creation = 0;
349 static guint64 time_max = 0;
351 static SGEN_TV_DECLARE (time_major_conc_collection_start);
352 static SGEN_TV_DECLARE (time_major_conc_collection_end);
354 static SGEN_TV_DECLARE (last_minor_collection_start_tv);
355 static SGEN_TV_DECLARE (last_minor_collection_end_tv);
357 int gc_debug_level = 0;
360 static MonoGCFinalizerCallbacks fin_callbacks;
364 mono_gc_flush_info (void)
366 fflush (gc_debug_file);
370 #define TV_DECLARE SGEN_TV_DECLARE
371 #define TV_GETTIME SGEN_TV_GETTIME
372 #define TV_ELAPSED SGEN_TV_ELAPSED
374 SGEN_TV_DECLARE (sgen_init_timestamp);
376 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
378 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
380 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
381 #define object_is_pinned SGEN_OBJECT_IS_PINNED
382 #define pin_object SGEN_PIN_OBJECT
384 #define ptr_in_nursery sgen_ptr_in_nursery
386 #define LOAD_VTABLE SGEN_LOAD_VTABLE
389 safe_name (void* obj)
391 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
392 return vt->klass->name;
396 nursery_canaries_enabled (void)
398 return enable_nursery_canaries;
401 #define safe_object_get_size sgen_safe_object_get_size
404 sgen_safe_name (void* obj)
406 return safe_name (obj);
410 * ######################################################################
411 * ######## Global data.
412 * ######################################################################
414 LOCK_DECLARE (gc_mutex);
415 gboolean sgen_try_free_some_memory;
417 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
419 static mword pagesize = 4096;
420 size_t degraded_mode = 0;
422 static mword bytes_pinned_from_failed_allocation = 0;
424 GCMemSection *nursery_section = NULL;
425 static volatile mword lowest_heap_address = ~(mword)0;
426 static volatile mword highest_heap_address = 0;
428 LOCK_DECLARE (sgen_interruption_mutex);
430 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
431 struct _FinalizeReadyEntry {
432 FinalizeReadyEntry *next;
436 typedef struct _EphemeronLinkNode EphemeronLinkNode;
438 struct _EphemeronLinkNode {
439 EphemeronLinkNode *next;
448 int current_collection_generation = -1;
449 volatile gboolean concurrent_collection_in_progress = FALSE;
451 /* objects that are ready to be finalized */
452 static FinalizeReadyEntry *fin_ready_list = NULL;
453 static FinalizeReadyEntry *critical_fin_list = NULL;
455 static EphemeronLinkNode *ephemeron_list;
457 /* registered roots: the key to the hash is the root start address */
459 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
461 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
462 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
463 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
464 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
466 static mword roots_size = 0; /* amount of memory in the root set */
468 #define GC_ROOT_NUM 32
470 int count; /* must be the first field */
471 void *objects [GC_ROOT_NUM];
472 int root_types [GC_ROOT_NUM];
473 uintptr_t extra_info [GC_ROOT_NUM];
477 notify_gc_roots (GCRootReport *report)
481 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
486 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
488 if (report->count == GC_ROOT_NUM)
489 notify_gc_roots (report);
490 report->objects [report->count] = object;
491 report->root_types [report->count] = rtype;
492 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
495 MonoNativeTlsKey thread_info_key;
497 #ifdef HAVE_KW_THREAD
498 __thread SgenThreadInfo *sgen_thread_info;
499 __thread char *stack_end;
502 /* The size of a TLAB */
503 /* The bigger the value, the less often we have to go to the slow path to allocate a new
504 * one, but the more space is wasted by threads not allocating much memory.
506 * FIXME: Make this self-tuning for each thread.
508 guint32 tlab_size = (1024 * 4);
510 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
512 /* Functions supplied by the runtime to be called by the GC */
513 static MonoGCCallbacks gc_callbacks;
515 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
517 #define ALIGN_UP SGEN_ALIGN_UP
519 #define MOVED_OBJECTS_NUM 64
520 static void *moved_objects [MOVED_OBJECTS_NUM];
521 static int moved_objects_idx = 0;
523 /* Vtable of the objects used to fill out nursery fragments before a collection */
524 static MonoVTable *array_fill_vtable;
526 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
527 MonoNativeThreadId main_gc_thread = NULL;
530 /*Object was pinned during the current collection*/
531 static mword objects_pinned;
534 * ######################################################################
535 * ######## Macros and function declarations.
536 * ######################################################################
540 align_pointer (void *ptr)
542 mword p = (mword)ptr;
543 p += sizeof (gpointer) - 1;
544 p &= ~ (sizeof (gpointer) - 1);
548 typedef SgenGrayQueue GrayQueue;
550 /* forward declarations */
551 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
552 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
553 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
554 static void report_finalizer_roots (void);
555 static void report_registered_roots (void);
557 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
558 static void finish_gray_stack (int generation, GrayQueue *queue);
560 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
563 static void init_stats (void);
565 static int mark_ephemerons_in_range (ScanCopyContext ctx);
566 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
567 static void null_ephemerons_for_domain (MonoDomain *domain);
569 SgenObjectOperations current_object_ops;
570 SgenMajorCollector major_collector;
571 SgenMinorCollector sgen_minor_collector;
572 static GrayQueue gray_queue;
574 static SgenRemeberedSet remset;
576 /* The gray queue to use from the main collection thread. */
577 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
580 * The gray queue a worker job must use. If we're not parallel or
581 * concurrent, we use the main gray queue.
583 static SgenGrayQueue*
584 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
586 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
590 gray_queue_redirect (SgenGrayQueue *queue)
592 gboolean wake = FALSE;
595 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
598 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
603 g_assert (concurrent_collection_in_progress);
604 if (sgen_workers_have_started ()) {
605 sgen_workers_ensure_awake ();
607 if (concurrent_collection_in_progress)
608 g_assert (current_collection_generation == -1);
614 gray_queue_enable_redirect (SgenGrayQueue *queue)
616 if (!concurrent_collection_in_progress)
619 sgen_gray_queue_set_alloc_prepare (queue, gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
620 gray_queue_redirect (queue);
624 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
626 while (start < end) {
630 if (!*(void**)start) {
631 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
636 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
642 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable) {
643 CHECK_CANARY_FOR_OBJECT (obj);
644 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
645 callback (obj, size, data);
646 CANARIFY_SIZE (size);
648 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
656 need_remove_object_for_domain (char *start, MonoDomain *domain)
658 if (mono_object_domain (start) == domain) {
659 SGEN_LOG (4, "Need to cleanup object %p", start);
660 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
667 process_object_for_domain_clearing (char *start, MonoDomain *domain)
669 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
670 if (vt->klass == mono_defaults.internal_thread_class)
671 g_assert (mono_object_domain (start) == mono_get_root_domain ());
672 /* The object could be a proxy for an object in the domain
674 #ifndef DISABLE_REMOTING
675 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
676 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
678 /* The server could already have been zeroed out, so
679 we need to check for that, too. */
680 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
681 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
682 ((MonoRealProxy*)start)->unwrapped_server = NULL;
689 clear_domain_process_object (char *obj, MonoDomain *domain)
693 process_object_for_domain_clearing (obj, domain);
694 remove = need_remove_object_for_domain (obj, domain);
696 if (remove && ((MonoObject*)obj)->synchronisation) {
697 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
699 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
706 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
708 if (clear_domain_process_object (obj, domain)) {
709 CANARIFY_SIZE (size);
710 memset (obj, 0, size);
715 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
717 clear_domain_process_object (obj, domain);
721 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
723 if (need_remove_object_for_domain (obj, domain))
724 major_collector.free_non_pinned_object (obj, size);
728 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
730 if (need_remove_object_for_domain (obj, domain))
731 major_collector.free_pinned_object (obj, size);
735 * When appdomains are unloaded we can easily remove objects that have finalizers,
736 * but all the others could still be present in random places on the heap.
737 * We need a sweep to get rid of them even though it's going to be costly
739 * The reason we need to remove them is because we access the vtable and class
740 * structures to know the object size and the reference bitmap: once the domain is
741 * unloaded the point to random memory.
744 mono_gc_clear_domain (MonoDomain * domain)
746 LOSObject *bigobj, *prev;
751 binary_protocol_domain_unload_begin (domain);
755 if (concurrent_collection_in_progress)
756 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
757 g_assert (!concurrent_collection_in_progress);
759 sgen_process_fin_stage_entries ();
760 sgen_process_dislink_stage_entries ();
762 sgen_clear_nursery_fragments ();
764 if (xdomain_checks && domain != mono_get_root_domain ()) {
765 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
766 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
767 sgen_check_for_xdomain_refs ();
770 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
771 to memory returned to the OS.*/
772 null_ephemerons_for_domain (domain);
774 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
775 sgen_null_links_for_domain (domain, i);
777 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
778 sgen_remove_finalizers_for_domain (domain, i);
780 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
781 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
783 /* We need two passes over major and large objects because
784 freeing such objects might give their memory back to the OS
785 (in the case of large objects) or obliterate its vtable
786 (pinned objects with major-copying or pinned and non-pinned
787 objects with major-mark&sweep), but we might need to
788 dereference a pointer from an object to another object if
789 the first object is a proxy. */
790 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
791 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
792 clear_domain_process_object (bigobj->data, domain);
795 for (bigobj = los_object_list; bigobj;) {
796 if (need_remove_object_for_domain (bigobj->data, domain)) {
797 LOSObject *to_free = bigobj;
799 prev->next = bigobj->next;
801 los_object_list = bigobj->next;
802 bigobj = bigobj->next;
803 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
804 sgen_los_free_object (to_free);
808 bigobj = bigobj->next;
810 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
811 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
813 if (domain == mono_get_root_domain ()) {
814 if (G_UNLIKELY (do_pin_stats))
815 sgen_pin_stats_print_class_stats ();
816 sgen_object_layout_dump (stdout);
819 sgen_restart_world (0, NULL);
821 binary_protocol_domain_unload_end (domain);
822 binary_protocol_flush_buffers (FALSE);
828 * sgen_add_to_global_remset:
830 * The global remset contains locations which point into newspace after
831 * a minor collection. This can happen if the objects they point to are pinned.
833 * LOCKING: If called from a parallel collector, the global remset
834 * lock must be held. For serial collectors that is not necessary.
837 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
839 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
841 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
843 if (!major_collector.is_concurrent) {
844 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
846 if (current_collection_generation == -1)
847 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
850 if (!object_is_pinned (obj))
851 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
852 else if (sgen_cement_lookup_or_register (obj))
855 remset.record_pointer (ptr);
857 if (G_UNLIKELY (do_pin_stats))
858 sgen_pin_stats_register_global_remset (obj);
860 SGEN_LOG (8, "Adding global remset for %p", ptr);
861 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
865 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
866 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
867 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
868 vt->klass->name_space, vt->klass->name);
874 * sgen_drain_gray_stack:
876 * Scan objects in the gray stack until the stack is empty. This should be called
877 * frequently after each object is copied, to achieve better locality and cache
880 * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
884 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
886 ScanObjectFunc scan_func = ctx.scan_func;
887 GrayQueue *queue = ctx.queue;
889 if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
890 return major_collector.drain_gray_stack (ctx);
894 for (i = 0; i != max_objs; ++i) {
897 GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
900 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
901 scan_func (obj, desc, queue);
903 } while (max_objs < 0);
908 * Addresses in the pin queue are already sorted. This function finds
909 * the object header for each address and pins the object. The
910 * addresses must be inside the nursery section. The (start of the)
911 * address array is overwritten with the addresses of the actually
912 * pinned objects. Return the number of pinned objects.
915 pin_objects_from_nursery_pin_queue (ScanCopyContext ctx)
917 GCMemSection *section = nursery_section;
918 void **start = sgen_pinning_get_entry (section->pin_queue_first_entry);
919 void **end = sgen_pinning_get_entry (section->pin_queue_last_entry);
920 void *start_nursery = section->data;
921 void *end_nursery = section->next_data;
926 void *pinning_front = start_nursery;
928 void **definitely_pinned = start;
929 ScanObjectFunc scan_func = ctx.scan_func;
930 SgenGrayQueue *queue = ctx.queue;
932 sgen_nursery_allocator_prepare_for_pinning ();
934 while (start < end) {
935 void *obj_to_pin = NULL;
936 size_t obj_to_pin_size = 0;
941 SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
942 SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
949 SGEN_LOG (5, "Considering pinning addr %p", addr);
950 /* We've already processed everything up to pinning_front. */
951 if (addr < pinning_front) {
957 * Find the closest scan start <= addr. We might search backward in the
958 * scan_starts array because entries might be NULL. In the worst case we
959 * start at start_nursery.
961 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
962 SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
963 search_start = (void*)section->scan_starts [idx];
964 if (!search_start || search_start > addr) {
967 search_start = section->scan_starts [idx];
968 if (search_start && search_start <= addr)
971 if (!search_start || search_start > addr)
972 search_start = start_nursery;
976 * If the pinning front is closer than the scan start we found, start
977 * searching at the front.
979 if (search_start < pinning_front)
980 search_start = pinning_front;
983 * Now addr should be in an object a short distance from search_start.
985 * search_start must point to zeroed mem or point to an object.
988 size_t obj_size, canarified_obj_size;
991 if (!*(void**)search_start) {
992 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
993 /* The loop condition makes sure we don't overrun addr. */
997 canarified_obj_size = obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1000 * Filler arrays are marked by an invalid sync word. We don't
1001 * consider them for pinning. They are not delimited by canaries,
1004 if (((MonoObject*)search_start)->synchronisation != GINT_TO_POINTER (-1)) {
1005 CHECK_CANARY_FOR_OBJECT (search_start);
1006 CANARIFY_SIZE (canarified_obj_size);
1008 if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
1009 /* This is the object we're looking for. */
1010 obj_to_pin = search_start;
1011 obj_to_pin_size = canarified_obj_size;
1016 /* Skip to the next object */
1017 search_start = (void*)((char*)search_start + canarified_obj_size);
1018 } while (search_start <= addr);
1020 /* We've searched past the address we were looking for. */
1022 pinning_front = search_start;
1023 goto next_pin_queue_entry;
1027 * We've found an object to pin. It might still be a dummy array, but we
1028 * can advance the pinning front in any case.
1030 pinning_front = (char*)obj_to_pin + obj_to_pin_size;
1033 * If this is a dummy array marking the beginning of a nursery
1034 * fragment, we don't pin it.
1036 if (((MonoObject*)obj_to_pin)->synchronisation == GINT_TO_POINTER (-1))
1037 goto next_pin_queue_entry;
1040 * Finally - pin the object!
1042 desc = sgen_obj_get_descriptor_safe (obj_to_pin);
1044 scan_func (obj_to_pin, desc, queue);
1046 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1047 obj_to_pin, *(void**)obj_to_pin, safe_name (obj_to_pin), count);
1048 binary_protocol_pin (obj_to_pin,
1049 (gpointer)LOAD_VTABLE (obj_to_pin),
1050 safe_object_get_size (obj_to_pin));
1052 #ifdef ENABLE_DTRACE
1053 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1054 int gen = sgen_ptr_in_nursery (obj_to_pin) ? GENERATION_NURSERY : GENERATION_OLD;
1055 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj_to_pin);
1056 MONO_GC_OBJ_PINNED ((mword)obj_to_pin,
1057 sgen_safe_object_get_size (obj_to_pin),
1058 vt->klass->name_space, vt->klass->name, gen);
1062 pin_object (obj_to_pin);
1063 GRAY_OBJECT_ENQUEUE (queue, obj_to_pin, desc);
1064 if (G_UNLIKELY (do_pin_stats))
1065 sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
1066 definitely_pinned [count] = obj_to_pin;
1070 next_pin_queue_entry:
1074 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1075 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1076 GCRootReport report;
1078 for (idx = 0; idx < count; ++idx)
1079 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1080 notify_gc_roots (&report);
1082 stat_pinned_objects += count;
1087 pin_objects_in_nursery (ScanCopyContext ctx)
1091 if (nursery_section->pin_queue_first_entry == nursery_section->pin_queue_last_entry)
1094 reduced_to = pin_objects_from_nursery_pin_queue (ctx);
1095 nursery_section->pin_queue_last_entry = nursery_section->pin_queue_first_entry + reduced_to;
1100 sgen_pin_object (void *object, GrayQueue *queue)
1102 SGEN_PIN_OBJECT (object);
1103 sgen_pin_stage_ptr (object);
1105 if (G_UNLIKELY (do_pin_stats))
1106 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1108 GRAY_OBJECT_ENQUEUE (queue, object, sgen_obj_get_descriptor_safe (object));
1109 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1111 #ifdef ENABLE_DTRACE
1112 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1113 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1114 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1115 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1121 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1125 gboolean major_pinned = FALSE;
1127 if (sgen_ptr_in_nursery (obj)) {
1128 if (SGEN_CAS_PTR (obj, SGEN_POINTER_TAG_PINNED (vt), vt) == vt) {
1129 sgen_pin_object (obj, queue);
1133 major_collector.pin_major_object (obj, queue);
1134 major_pinned = TRUE;
1137 vtable_word = *(mword*)obj;
1138 /*someone else forwarded it, update the pointer and bail out*/
1139 if (SGEN_POINTER_IS_TAGGED_FORWARDED (vtable_word)) {
1140 *ptr = SGEN_POINTER_UNTAG_VTABLE (vtable_word);
1144 /*someone pinned it, nothing to do.*/
1145 if (SGEN_POINTER_IS_TAGGED_PINNED (vtable_word) || major_pinned)
1150 /* Sort the addresses in array in increasing order.
1151 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1154 sgen_sort_addresses (void **array, size_t size)
1159 for (i = 1; i < size; ++i) {
1162 size_t parent = (child - 1) / 2;
1164 if (array [parent] >= array [child])
1167 tmp = array [parent];
1168 array [parent] = array [child];
1169 array [child] = tmp;
1175 for (i = size - 1; i > 0; --i) {
1178 array [i] = array [0];
1184 while (root * 2 + 1 <= end) {
1185 size_t child = root * 2 + 1;
1187 if (child < end && array [child] < array [child + 1])
1189 if (array [root] >= array [child])
1193 array [root] = array [child];
1194 array [child] = tmp;
1202 * Scan the memory between start and end and queue values which could be pointers
1203 * to the area between start_nursery and end_nursery for later consideration.
1204 * Typically used for thread stacks.
1207 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1211 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1212 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1215 while (start < end) {
1216 if (*start >= start_nursery && *start < end_nursery) {
1218 * *start can point to the middle of an object
1219 * note: should we handle pointing at the end of an object?
1220 * pinning in C# code disallows pointing at the end of an object
1221 * but there is some small chance that an optimizing C compiler
1222 * may keep the only reference to an object by pointing
1223 * at the end of it. We ignore this small chance for now.
1224 * Pointers to the end of an object are indistinguishable
1225 * from pointers to the start of the next object in memory
1226 * so if we allow that we'd need to pin two objects...
1227 * We queue the pointer in an array, the
1228 * array will then be sorted and uniqued. This way
1229 * we can coalesce several pinning pointers and it should
1230 * be faster since we'd do a memory scan with increasing
1231 * addresses. Note: we can align the address to the allocation
1232 * alignment, so the unique process is more effective.
1234 mword addr = (mword)*start;
1235 addr &= ~(ALLOC_ALIGN - 1);
1236 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1237 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1238 sgen_pin_stage_ptr ((void*)addr);
1239 binary_protocol_pin_stage (start, (void*)addr);
1242 if (G_UNLIKELY (do_pin_stats)) {
1243 if (ptr_in_nursery ((void*)addr))
1244 sgen_pin_stats_register_address ((char*)addr, pin_type);
1250 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1254 * The first thing we do in a collection is to identify pinned objects.
1255 * This function considers all the areas of memory that need to be
1256 * conservatively scanned.
1259 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1263 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1264 /* objects pinned from the API are inside these roots */
1265 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1266 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1267 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1268 } SGEN_HASH_TABLE_FOREACH_END;
1269 /* now deal with the thread stacks
1270 * in the future we should be able to conservatively scan only:
1271 * *) the cpu registers
1272 * *) the unmanaged stack frames
1273 * *) the _last_ managed stack frame
1274 * *) pointers slots in managed frames
1276 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1280 unpin_objects_from_queue (SgenGrayQueue *queue)
1285 GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
1288 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1289 SGEN_UNPIN_OBJECT (addr);
1294 CopyOrMarkObjectFunc func;
1296 } UserCopyOrMarkData;
1299 single_arg_user_copy_or_mark (void **obj, void *gc_data)
1301 UserCopyOrMarkData *data = gc_data;
1303 data->func (obj, data->queue);
1307 * The memory area from start_root to end_root contains pointers to objects.
1308 * Their position is precisely described by @desc (this means that the pointer
1309 * can be either NULL or the pointer to the start of an object).
1310 * This functions copies them to to_space updates them.
1312 * This function is not thread-safe!
1315 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1317 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1318 SgenGrayQueue *queue = ctx.queue;
1320 switch (desc & ROOT_DESC_TYPE_MASK) {
1321 case ROOT_DESC_BITMAP:
1322 desc >>= ROOT_DESC_TYPE_SHIFT;
1324 if ((desc & 1) && *start_root) {
1325 copy_func (start_root, queue);
1326 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1332 case ROOT_DESC_COMPLEX: {
1333 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1334 gsize bwords = (*bitmap_data) - 1;
1335 void **start_run = start_root;
1337 while (bwords-- > 0) {
1338 gsize bmap = *bitmap_data++;
1339 void **objptr = start_run;
1341 if ((bmap & 1) && *objptr) {
1342 copy_func (objptr, queue);
1343 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1348 start_run += GC_BITS_PER_WORD;
1352 case ROOT_DESC_USER: {
1353 UserCopyOrMarkData data = { copy_func, queue };
1354 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1355 marker (start_root, single_arg_user_copy_or_mark, &data);
1358 case ROOT_DESC_RUN_LEN:
1359 g_assert_not_reached ();
1361 g_assert_not_reached ();
1366 reset_heap_boundaries (void)
1368 lowest_heap_address = ~(mword)0;
1369 highest_heap_address = 0;
1373 sgen_update_heap_boundaries (mword low, mword high)
1378 old = lowest_heap_address;
1381 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1384 old = highest_heap_address;
1387 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1391 * Allocate and setup the data structures needed to be able to allocate objects
1392 * in the nursery. The nursery is stored in nursery_section.
1395 alloc_nursery (void)
1397 GCMemSection *section;
1402 if (nursery_section)
1404 SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
1405 /* later we will alloc a larger area for the nursery but only activate
1406 * what we need. The rest will be used as expansion if we have too many pinned
1407 * objects in the existing nursery.
1409 /* FIXME: handle OOM */
1410 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1412 alloc_size = sgen_nursery_size;
1414 /* If there isn't enough space even for the nursery we should simply abort. */
1415 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1417 #ifdef SGEN_ALIGN_NURSERY
1418 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1420 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1422 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1423 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1424 section->data = section->next_data = data;
1425 section->size = alloc_size;
1426 section->end_data = data + sgen_nursery_size;
1427 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1428 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1429 section->num_scan_start = scan_starts;
1431 nursery_section = section;
1433 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1437 mono_gc_get_nursery (int *shift_bits, size_t *size)
1439 *size = sgen_nursery_size;
1440 #ifdef SGEN_ALIGN_NURSERY
1441 *shift_bits = DEFAULT_NURSERY_BITS;
1445 return sgen_get_nursery_start ();
1449 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1451 SgenThreadInfo *info = mono_thread_info_current ();
1453 /* Could be called from sgen_thread_unregister () with a NULL info */
1456 info->stopped_domain = domain;
1461 mono_gc_precise_stack_mark_enabled (void)
1463 return !conservative_stack_mark;
1467 mono_gc_get_logfile (void)
1469 return gc_debug_file;
1473 report_finalizer_roots_list (FinalizeReadyEntry *list)
1475 GCRootReport report;
1476 FinalizeReadyEntry *fin;
1479 for (fin = list; fin; fin = fin->next) {
1482 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1484 notify_gc_roots (&report);
1488 report_finalizer_roots (void)
1490 report_finalizer_roots_list (fin_ready_list);
1491 report_finalizer_roots_list (critical_fin_list);
1494 static GCRootReport *root_report;
1497 single_arg_report_root (void **obj, void *gc_data)
1500 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1504 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1506 switch (desc & ROOT_DESC_TYPE_MASK) {
1507 case ROOT_DESC_BITMAP:
1508 desc >>= ROOT_DESC_TYPE_SHIFT;
1510 if ((desc & 1) && *start_root) {
1511 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1517 case ROOT_DESC_COMPLEX: {
1518 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1519 gsize bwords = (*bitmap_data) - 1;
1520 void **start_run = start_root;
1522 while (bwords-- > 0) {
1523 gsize bmap = *bitmap_data++;
1524 void **objptr = start_run;
1526 if ((bmap & 1) && *objptr) {
1527 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1532 start_run += GC_BITS_PER_WORD;
1536 case ROOT_DESC_USER: {
1537 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1538 root_report = report;
1539 marker (start_root, single_arg_report_root, NULL);
1542 case ROOT_DESC_RUN_LEN:
1543 g_assert_not_reached ();
1545 g_assert_not_reached ();
1550 report_registered_roots_by_type (int root_type)
1552 GCRootReport report;
1556 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1557 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1558 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1559 } SGEN_HASH_TABLE_FOREACH_END;
1560 notify_gc_roots (&report);
1564 report_registered_roots (void)
1566 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1567 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1571 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1573 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1574 SgenGrayQueue *queue = ctx.queue;
1575 FinalizeReadyEntry *fin;
1577 for (fin = list; fin; fin = fin->next) {
1580 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1581 copy_func (&fin->object, queue);
1586 generation_name (int generation)
1588 switch (generation) {
1589 case GENERATION_NURSERY: return "nursery";
1590 case GENERATION_OLD: return "old";
1591 default: g_assert_not_reached ();
1596 sgen_generation_name (int generation)
1598 return generation_name (generation);
1601 SgenObjectOperations *
1602 sgen_get_current_object_ops (void){
1603 return ¤t_object_ops;
1608 finish_gray_stack (int generation, GrayQueue *queue)
1612 int done_with_ephemerons, ephemeron_rounds = 0;
1613 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1614 ScanObjectFunc scan_func = current_object_ops.scan_object;
1615 ScanCopyContext ctx = { scan_func, copy_func, queue };
1616 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1617 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1620 * We copied all the reachable objects. Now it's the time to copy
1621 * the objects that were not referenced by the roots, but by the copied objects.
1622 * we built a stack of objects pointed to by gray_start: they are
1623 * additional roots and we may add more items as we go.
1624 * We loop until gray_start == gray_objects which means no more objects have
1625 * been added. Note this is iterative: no recursion is involved.
1626 * We need to walk the LO list as well in search of marked big objects
1627 * (use a flag since this is needed only on major collections). We need to loop
1628 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1629 * To achieve better cache locality and cache usage, we drain the gray stack
1630 * frequently, after each object is copied, and just finish the work here.
1632 sgen_drain_gray_stack (-1, ctx);
1634 SGEN_LOG (2, "%s generation done", generation_name (generation));
1637 Reset bridge data, we might have lingering data from a previous collection if this is a major
1638 collection trigged by minor overflow.
1640 We must reset the gathered bridges since their original block might be evacuated due to major
1641 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1643 if (sgen_need_bridge_processing ())
1644 sgen_bridge_reset_data ();
1647 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1648 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1649 * objects that are in fact reachable.
1651 done_with_ephemerons = 0;
1653 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1654 sgen_drain_gray_stack (-1, ctx);
1656 } while (!done_with_ephemerons);
1658 sgen_mark_togglerefs (start_addr, end_addr, ctx);
1660 if (sgen_need_bridge_processing ()) {
1661 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1662 sgen_drain_gray_stack (-1, ctx);
1663 sgen_collect_bridge_objects (generation, ctx);
1664 if (generation == GENERATION_OLD)
1665 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1668 Do the first bridge step here, as the collector liveness state will become useless after that.
1670 An important optimization is to only proccess the possibly dead part of the object graph and skip
1671 over all live objects as we transitively know everything they point must be alive too.
1673 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1675 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1676 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1679 sgen_bridge_processing_stw_step ();
1683 Make sure we drain the gray stack before processing disappearing links and finalizers.
1684 If we don't make sure it is empty we might wrongly see a live object as dead.
1686 sgen_drain_gray_stack (-1, ctx);
1689 We must clear weak links that don't track resurrection before processing object ready for
1690 finalization so they can be cleared before that.
1692 sgen_null_link_in_range (generation, TRUE, ctx);
1693 if (generation == GENERATION_OLD)
1694 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1697 /* walk the finalization queue and move also the objects that need to be
1698 * finalized: use the finalized objects as new roots so the objects they depend
1699 * on are also not reclaimed. As with the roots above, only objects in the nursery
1700 * are marked/copied.
1702 sgen_finalize_in_range (generation, ctx);
1703 if (generation == GENERATION_OLD)
1704 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1705 /* drain the new stack that might have been created */
1706 SGEN_LOG (6, "Precise scan of gray area post fin");
1707 sgen_drain_gray_stack (-1, ctx);
1710 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1712 done_with_ephemerons = 0;
1714 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1715 sgen_drain_gray_stack (-1, ctx);
1717 } while (!done_with_ephemerons);
1720 * Clear ephemeron pairs with unreachable keys.
1721 * We pass the copy func so we can figure out if an array was promoted or not.
1723 clear_unreachable_ephemerons (ctx);
1726 * We clear togglerefs only after all possible chances of revival are done.
1727 * This is semantically more inline with what users expect and it allows for
1728 * user finalizers to correctly interact with TR objects.
1730 sgen_clear_togglerefs (start_addr, end_addr, ctx);
1733 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1736 * handle disappearing links
1737 * Note we do this after checking the finalization queue because if an object
1738 * survives (at least long enough to be finalized) we don't clear the link.
1739 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1740 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1743 g_assert (sgen_gray_object_queue_is_empty (queue));
1745 sgen_null_link_in_range (generation, FALSE, ctx);
1746 if (generation == GENERATION_OLD)
1747 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1748 if (sgen_gray_object_queue_is_empty (queue))
1750 sgen_drain_gray_stack (-1, ctx);
1753 g_assert (sgen_gray_object_queue_is_empty (queue));
1755 sgen_gray_object_queue_trim_free_list (queue);
1759 sgen_check_section_scan_starts (GCMemSection *section)
1762 for (i = 0; i < section->num_scan_start; ++i) {
1763 if (section->scan_starts [i]) {
1764 mword size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1765 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1771 check_scan_starts (void)
1773 if (!do_scan_starts_check)
1775 sgen_check_section_scan_starts (nursery_section);
1776 major_collector.check_scan_starts ();
1780 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
1784 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1785 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1786 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
1787 } SGEN_HASH_TABLE_FOREACH_END;
1791 sgen_dump_occupied (char *start, char *end, char *section_start)
1793 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1797 sgen_dump_section (GCMemSection *section, const char *type)
1799 char *start = section->data;
1800 char *end = section->data + section->size;
1801 char *occ_start = NULL;
1803 char *old_start = NULL; /* just for debugging */
1805 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1807 while (start < end) {
1811 if (!*(void**)start) {
1813 sgen_dump_occupied (occ_start, start, section->data);
1816 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1819 g_assert (start < section->next_data);
1824 vt = (GCVTable*)LOAD_VTABLE (start);
1827 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1830 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1831 start - section->data,
1832 vt->klass->name_space, vt->klass->name,
1840 sgen_dump_occupied (occ_start, start, section->data);
1842 fprintf (heap_dump_file, "</section>\n");
1846 dump_object (MonoObject *obj, gboolean dump_location)
1848 static char class_name [1024];
1850 MonoClass *class = mono_object_class (obj);
1854 * Python's XML parser is too stupid to parse angle brackets
1855 * in strings, so we just ignore them;
1858 while (class->name [i] && j < sizeof (class_name) - 1) {
1859 if (!strchr ("<>\"", class->name [i]))
1860 class_name [j++] = class->name [i];
1863 g_assert (j < sizeof (class_name));
1866 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
1867 class->name_space, class_name,
1868 safe_object_get_size (obj));
1869 if (dump_location) {
1870 const char *location;
1871 if (ptr_in_nursery (obj))
1872 location = "nursery";
1873 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
1877 fprintf (heap_dump_file, " location=\"%s\"", location);
1879 fprintf (heap_dump_file, "/>\n");
1883 dump_heap (const char *type, int num, const char *reason)
1888 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
1890 fprintf (heap_dump_file, " reason=\"%s\"", reason);
1891 fprintf (heap_dump_file, ">\n");
1892 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
1893 sgen_dump_internal_mem_usage (heap_dump_file);
1894 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
1895 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
1896 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
1898 fprintf (heap_dump_file, "<pinned-objects>\n");
1899 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
1900 dump_object (list->obj, TRUE);
1901 fprintf (heap_dump_file, "</pinned-objects>\n");
1903 sgen_dump_section (nursery_section, "nursery");
1905 major_collector.dump_heap (heap_dump_file);
1907 fprintf (heap_dump_file, "<los>\n");
1908 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1909 dump_object ((MonoObject*)bigobj->data, FALSE);
1910 fprintf (heap_dump_file, "</los>\n");
1912 fprintf (heap_dump_file, "</collection>\n");
1916 sgen_register_moved_object (void *obj, void *destination)
1918 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1920 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1921 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1922 moved_objects_idx = 0;
1924 moved_objects [moved_objects_idx++] = obj;
1925 moved_objects [moved_objects_idx++] = destination;
1931 static gboolean inited = FALSE;
1936 mono_counters_register ("Collection max time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME | MONO_COUNTER_MONOTONIC, &time_max);
1938 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
1939 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pinning);
1940 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
1941 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
1942 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_registered_roots);
1943 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_thread_data);
1944 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_finish_gray_stack);
1945 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
1947 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
1948 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
1949 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
1950 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_registered_roots);
1951 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_thread_data);
1952 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_alloc_pinned);
1953 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_finalized);
1954 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_big_objects);
1955 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
1956 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
1957 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
1958 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_sweep);
1959 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
1961 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_pinned_objects);
1963 #ifdef HEAVY_STATISTICS
1964 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
1965 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
1966 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
1967 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
1968 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
1969 mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
1970 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
1971 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
1972 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
1974 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced_degraded);
1975 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_degraded);
1977 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_nursery);
1978 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_nursery);
1979 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_major);
1980 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_major);
1982 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_nursery);
1983 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_major);
1985 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_slots_allocated_in_vain);
1987 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_from_space);
1988 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_forwarded);
1989 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_pinned);
1990 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_to_space);
1992 sgen_nursery_allocator_init_heavy_stats ();
1993 sgen_alloc_init_heavy_stats ();
2001 reset_pinned_from_failed_allocation (void)
2003 bytes_pinned_from_failed_allocation = 0;
2007 sgen_set_pinned_from_failed_allocation (mword objsize)
2009 bytes_pinned_from_failed_allocation += objsize;
2013 sgen_collection_is_concurrent (void)
2015 switch (current_collection_generation) {
2016 case GENERATION_NURSERY:
2018 case GENERATION_OLD:
2019 return concurrent_collection_in_progress;
2021 g_error ("Invalid current generation %d", current_collection_generation);
2026 sgen_concurrent_collection_in_progress (void)
2028 return concurrent_collection_in_progress;
2035 } FinishRememberedSetScanJobData;
2038 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2040 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2042 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2043 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2048 CopyOrMarkObjectFunc copy_or_mark_func;
2049 ScanObjectFunc scan_func;
2053 } ScanFromRegisteredRootsJobData;
2056 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2058 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2059 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2060 sgen_workers_get_job_gray_queue (worker_data) };
2062 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2063 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2070 } ScanThreadDataJobData;
2073 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2075 ScanThreadDataJobData *job_data = job_data_untyped;
2077 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2078 sgen_workers_get_job_gray_queue (worker_data));
2079 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2083 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2085 FinalizeReadyEntry *list = job_data_untyped;
2086 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2088 scan_finalizer_entries (list, ctx);
2092 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2094 g_assert (concurrent_collection_in_progress);
2095 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2099 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2101 g_assert (concurrent_collection_in_progress);
2102 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2106 verify_scan_starts (char *start, char *end)
2110 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2111 char *addr = nursery_section->scan_starts [i];
2112 if (addr > start && addr < end)
2113 SGEN_LOG (1, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
2118 verify_nursery (void)
2120 char *start, *end, *cur, *hole_start;
2122 if (!do_verify_nursery)
2125 if (nursery_canaries_enabled ())
2126 SGEN_LOG (1, "Checking nursery canaries...");
2128 /*This cleans up unused fragments */
2129 sgen_nursery_allocator_prepare_for_pinning ();
2131 hole_start = start = cur = sgen_get_nursery_start ();
2132 end = sgen_get_nursery_end ();
2137 if (!*(void**)cur) {
2138 cur += sizeof (void*);
2142 if (object_is_forwarded (cur))
2143 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2144 else if (object_is_pinned (cur))
2145 SGEN_LOG (1, "PINNED OBJ %p", cur);
2147 ss = safe_object_get_size ((MonoObject*)cur);
2148 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2149 verify_scan_starts (cur, cur + size);
2150 if (do_dump_nursery_content) {
2151 if (cur > hole_start)
2152 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2153 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2155 if (nursery_canaries_enabled () && (MonoVTable*)SGEN_LOAD_VTABLE (cur) != array_fill_vtable) {
2156 CHECK_CANARY_FOR_OBJECT (cur);
2157 CANARIFY_SIZE (size);
2165 * Checks that no objects in the nursery are fowarded or pinned. This
2166 * is a precondition to restarting the mutator while doing a
2167 * concurrent collection. Note that we don't clear fragments because
2168 * we depend on that having happened earlier.
2171 check_nursery_is_clean (void)
2173 char *start, *end, *cur;
2175 start = cur = sgen_get_nursery_start ();
2176 end = sgen_get_nursery_end ();
2181 if (!*(void**)cur) {
2182 cur += sizeof (void*);
2186 g_assert (!object_is_forwarded (cur));
2187 g_assert (!object_is_pinned (cur));
2189 ss = safe_object_get_size ((MonoObject*)cur);
2190 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2191 verify_scan_starts (cur, cur + size);
2198 init_gray_queue (void)
2200 if (sgen_collection_is_concurrent ())
2201 sgen_workers_init_distribute_gray_queue ();
2202 sgen_gray_object_queue_init (&gray_queue, NULL);
2206 * Perform a nursery collection.
2208 * Return whether any objects were late-pinned due to being out of memory.
2211 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2213 gboolean needs_major;
2214 size_t max_garbage_amount;
2216 FinishRememberedSetScanJobData *frssjd;
2217 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2218 ScanThreadDataJobData *stdjd;
2219 mword fragment_total;
2220 ScanCopyContext ctx;
2224 if (disable_minor_collections)
2227 TV_GETTIME (last_minor_collection_start_tv);
2228 atv = last_minor_collection_start_tv;
2230 MONO_GC_BEGIN (GENERATION_NURSERY);
2231 binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
2235 #ifndef DISABLE_PERFCOUNTERS
2236 mono_perfcounters->gc_collections0++;
2239 current_collection_generation = GENERATION_NURSERY;
2240 current_object_ops = sgen_minor_collector.serial_ops;
2242 reset_pinned_from_failed_allocation ();
2244 check_scan_starts ();
2246 sgen_nursery_alloc_prepare_for_minor ();
2250 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2251 /* FIXME: optimize later to use the higher address where an object can be present */
2252 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2254 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2255 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2256 g_assert (nursery_section->size >= max_garbage_amount);
2258 /* world must be stopped already */
2260 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2262 if (xdomain_checks) {
2263 sgen_clear_nursery_fragments ();
2264 sgen_check_for_xdomain_refs ();
2267 nursery_section->next_data = nursery_next;
2269 major_collector.start_nursery_collection ();
2271 sgen_memgov_minor_collection_start ();
2275 gc_stats.minor_gc_count ++;
2277 if (whole_heap_check_before_collection) {
2278 sgen_clear_nursery_fragments ();
2279 sgen_check_whole_heap (finish_up_concurrent_mark);
2281 if (consistency_check_at_minor_collection)
2282 sgen_check_consistency ();
2284 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2286 sgen_process_fin_stage_entries ();
2287 sgen_process_dislink_stage_entries ();
2289 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2291 /* pin from pinned handles */
2292 sgen_init_pinning ();
2293 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2294 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2295 /* pin cemented objects */
2296 sgen_pin_cemented_objects ();
2297 /* identify pinned objects */
2298 sgen_optimize_pin_queue ();
2299 sgen_pinning_setup_section (nursery_section);
2300 ctx.scan_func = NULL;
2301 ctx.copy_func = NULL;
2302 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2303 pin_objects_in_nursery (ctx);
2304 sgen_pinning_trim_queue_to_section (nursery_section);
2307 time_minor_pinning += TV_ELAPSED (btv, atv);
2308 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2309 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2311 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2313 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2314 frssjd->heap_start = sgen_get_nursery_start ();
2315 frssjd->heap_end = nursery_next;
2316 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2318 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2320 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2321 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2323 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2325 /* FIXME: why is this here? */
2326 ctx.scan_func = current_object_ops.scan_object;
2327 ctx.copy_func = NULL;
2328 ctx.queue = &gray_queue;
2329 sgen_drain_gray_stack (-1, ctx);
2331 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2332 report_registered_roots ();
2333 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2334 report_finalizer_roots ();
2336 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2338 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2340 /* registered roots, this includes static fields */
2341 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2342 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2343 scrrjd_normal->scan_func = current_object_ops.scan_object;
2344 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2345 scrrjd_normal->heap_end = nursery_next;
2346 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2347 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2349 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2350 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2351 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2352 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2353 scrrjd_wbarrier->heap_end = nursery_next;
2354 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2355 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2358 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2360 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2363 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2364 stdjd->heap_start = sgen_get_nursery_start ();
2365 stdjd->heap_end = nursery_next;
2366 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2369 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2372 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2374 g_assert (!sgen_collection_is_concurrent ());
2376 /* Scan the list of objects ready for finalization. If */
2377 sgen_workers_enqueue_job (job_scan_finalizer_entries, fin_ready_list);
2378 sgen_workers_enqueue_job (job_scan_finalizer_entries, critical_fin_list);
2380 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2382 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2384 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2385 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2387 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2390 * The (single-threaded) finalization code might have done
2391 * some copying/marking so we can only reset the GC thread's
2392 * worker data here instead of earlier when we joined the
2395 sgen_workers_reset_data ();
2397 if (objects_pinned) {
2398 sgen_optimize_pin_queue ();
2399 sgen_pinning_setup_section (nursery_section);
2402 /* walk the pin_queue, build up the fragment list of free memory, unmark
2403 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2406 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2407 fragment_total = sgen_build_nursery_fragments (nursery_section, unpin_queue);
2408 if (!fragment_total)
2411 /* Clear TLABs for all threads */
2412 sgen_clear_tlabs ();
2414 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2416 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2417 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2419 if (consistency_check_at_minor_collection)
2420 sgen_check_major_refs ();
2422 major_collector.finish_nursery_collection ();
2424 TV_GETTIME (last_minor_collection_end_tv);
2425 gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
2428 dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
2430 /* prepare the pin queue for the next collection */
2431 sgen_finish_pinning ();
2432 if (fin_ready_list || critical_fin_list) {
2433 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2434 mono_gc_finalize_notify ();
2436 sgen_pin_stats_reset ();
2437 /* clear cemented hash */
2438 sgen_cement_clear_below_threshold ();
2440 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2442 remset.finish_minor_collection ();
2444 check_scan_starts ();
2446 binary_protocol_flush_buffers (FALSE);
2448 sgen_memgov_minor_collection_end ();
2450 /*objects are late pinned because of lack of memory, so a major is a good call*/
2451 needs_major = objects_pinned > 0;
2452 current_collection_generation = -1;
2455 MONO_GC_END (GENERATION_NURSERY);
2456 binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
2458 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2459 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2465 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2468 * This is called on all objects in the nursery, including pinned ones, so we need
2469 * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
2471 ctx->scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
2475 scan_nursery_objects (ScanCopyContext ctx)
2477 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2478 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2482 major_copy_or_mark_from_roots (size_t *old_next_pin_slot, gboolean start_concurrent_mark, gboolean finish_up_concurrent_mark, gboolean scan_mod_union, gboolean scan_whole_nursery)
2487 /* FIXME: only use these values for the precise scan
2488 * note that to_space pointers should be excluded anyway...
2490 char *heap_start = NULL;
2491 char *heap_end = (char*)-1;
2492 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2493 GCRootReport root_report = { 0 };
2494 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2495 ScanThreadDataJobData *stdjd;
2496 ScanCopyContext ctx;
2498 if (concurrent_collection_in_progress) {
2499 /*This cleans up unused fragments */
2500 sgen_nursery_allocator_prepare_for_pinning ();
2502 if (do_concurrent_checks)
2503 check_nursery_is_clean ();
2505 /* The concurrent collector doesn't touch the nursery. */
2506 sgen_nursery_alloc_prepare_for_major ();
2513 /* Pinning depends on this */
2514 sgen_clear_nursery_fragments ();
2516 if (whole_heap_check_before_collection)
2517 sgen_check_whole_heap (finish_up_concurrent_mark);
2520 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2522 if (!sgen_collection_is_concurrent ())
2523 nursery_section->next_data = sgen_get_nursery_end ();
2524 /* we should also coalesce scanning from sections close to each other
2525 * and deal with pointers outside of the sections later.
2529 *major_collector.have_swept = FALSE;
2531 if (xdomain_checks) {
2532 sgen_clear_nursery_fragments ();
2533 sgen_check_for_xdomain_refs ();
2536 if (!concurrent_collection_in_progress) {
2537 /* Remsets are not useful for a major collection */
2538 remset.prepare_for_major_collection ();
2541 sgen_process_fin_stage_entries ();
2542 sgen_process_dislink_stage_entries ();
2545 sgen_init_pinning ();
2546 SGEN_LOG (6, "Collecting pinned addresses");
2547 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2549 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2550 if (major_collector.is_concurrent) {
2552 * The concurrent major collector cannot evict
2553 * yet, so we need to pin cemented objects to
2554 * not break some asserts.
2556 * FIXME: We could evict now!
2558 sgen_pin_cemented_objects ();
2561 if (!concurrent_collection_in_progress)
2562 sgen_cement_reset ();
2565 sgen_optimize_pin_queue ();
2568 * pin_queue now contains all candidate pointers, sorted and
2569 * uniqued. We must do two passes now to figure out which
2570 * objects are pinned.
2572 * The first is to find within the pin_queue the area for each
2573 * section. This requires that the pin_queue be sorted. We
2574 * also process the LOS objects and pinned chunks here.
2576 * The second, destructive, pass is to reduce the section
2577 * areas to pointers to the actually pinned objects.
2579 SGEN_LOG (6, "Pinning from sections");
2580 /* first pass for the sections */
2581 sgen_find_section_pin_queue_start_end (nursery_section);
2582 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2583 /* identify possible pointers to the insize of large objects */
2584 SGEN_LOG (6, "Pinning from large objects");
2585 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2587 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
2588 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2590 #ifdef ENABLE_DTRACE
2591 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2592 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2593 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2597 if (sgen_los_object_is_pinned (bigobj->data)) {
2598 g_assert (finish_up_concurrent_mark);
2601 sgen_los_pin_object (bigobj->data);
2602 if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
2603 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
2604 if (G_UNLIKELY (do_pin_stats))
2605 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2606 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2609 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2613 notify_gc_roots (&root_report);
2614 /* second pass for the sections */
2615 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2616 ctx.copy_func = NULL;
2617 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2620 * Concurrent mark never follows references into the nursery. In the start and
2621 * finish pauses we must scan live nursery objects, though.
2623 * In the finish pause we do this conservatively by scanning all nursery objects.
2624 * Previously we would only scan pinned objects here. We assumed that all objects
2625 * that were pinned during the nursery collection immediately preceding this finish
2626 * mark would be pinned again here. Due to the way we get the stack end for the GC
2627 * thread, however, that's not necessarily the case: we scan part of the stack used
2628 * by the GC itself, which changes constantly, so pinning isn't entirely
2631 * The split nursery also complicates things because non-pinned objects can survive
2632 * in the nursery. That's why we need to do a full scan of the nursery for it, too.
2634 * In the future we shouldn't do a preceding nursery collection at all and instead
2635 * do the finish pause with promotion from the nursery.
2637 * A further complication arises when we have late-pinned objects from the preceding
2638 * nursery collection. Those are the result of being out of memory when trying to
2639 * evacuate objects. They won't be found from the roots, so we just scan the whole
2642 * Non-concurrent mark evacuates from the nursery, so it's
2643 * sufficient to just scan pinned nursery objects.
2645 if (scan_whole_nursery || finish_up_concurrent_mark || (concurrent_collection_in_progress && sgen_minor_collector.is_split)) {
2646 scan_nursery_objects (ctx);
2648 pin_objects_in_nursery (ctx);
2649 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2650 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2653 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2654 if (old_next_pin_slot)
2655 *old_next_pin_slot = sgen_get_pinned_count ();
2658 time_major_pinning += TV_ELAPSED (atv, btv);
2659 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2660 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2662 major_collector.init_to_space ();
2665 * The concurrent collector doesn't move objects, neither on
2666 * the major heap nor in the nursery, so we can mark even
2667 * before pinning has finished. For the non-concurrent
2668 * collector we start the workers after pinning.
2670 if (start_concurrent_mark) {
2671 sgen_workers_start_all_workers ();
2672 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2675 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2676 main_gc_thread = mono_native_thread_self ();
2679 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2680 report_registered_roots ();
2682 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2684 /* registered roots, this includes static fields */
2685 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2686 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2687 scrrjd_normal->scan_func = current_object_ops.scan_object;
2688 scrrjd_normal->heap_start = heap_start;
2689 scrrjd_normal->heap_end = heap_end;
2690 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2691 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2693 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2694 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2695 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2696 scrrjd_wbarrier->heap_start = heap_start;
2697 scrrjd_wbarrier->heap_end = heap_end;
2698 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2699 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2702 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2705 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2706 stdjd->heap_start = heap_start;
2707 stdjd->heap_end = heap_end;
2708 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2711 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2714 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2716 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2717 report_finalizer_roots ();
2719 /* scan the list of objects ready for finalization */
2720 sgen_workers_enqueue_job (job_scan_finalizer_entries, fin_ready_list);
2721 sgen_workers_enqueue_job (job_scan_finalizer_entries, critical_fin_list);
2723 if (scan_mod_union) {
2724 g_assert (finish_up_concurrent_mark);
2726 /* Mod union card table */
2727 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
2728 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
2732 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2733 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
2736 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2740 major_finish_copy_or_mark (void)
2742 if (!concurrent_collection_in_progress)
2746 * Prepare the pin queue for the next collection. Since pinning runs on the worker
2747 * threads we must wait for the jobs to finish before we can reset it.
2749 sgen_workers_wait_for_jobs_finished ();
2750 sgen_finish_pinning ();
2752 sgen_pin_stats_reset ();
2754 if (do_concurrent_checks)
2755 check_nursery_is_clean ();
2759 major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
2761 MONO_GC_BEGIN (GENERATION_OLD);
2762 binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
2764 current_collection_generation = GENERATION_OLD;
2765 #ifndef DISABLE_PERFCOUNTERS
2766 mono_perfcounters->gc_collections1++;
2769 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2772 g_assert (major_collector.is_concurrent);
2773 concurrent_collection_in_progress = TRUE;
2775 sgen_cement_concurrent_start ();
2777 current_object_ops = major_collector.major_concurrent_ops;
2779 current_object_ops = major_collector.major_ops;
2782 reset_pinned_from_failed_allocation ();
2784 sgen_memgov_major_collection_start ();
2786 //count_ref_nonref_objs ();
2787 //consistency_check ();
2789 check_scan_starts ();
2792 SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
2793 gc_stats.major_gc_count ++;
2795 if (major_collector.start_major_collection)
2796 major_collector.start_major_collection ();
2798 major_copy_or_mark_from_roots (old_next_pin_slot, concurrent, FALSE, FALSE, FALSE);
2799 major_finish_copy_or_mark ();
2803 wait_for_workers_to_finish (void)
2805 while (!sgen_workers_all_done ())
2810 major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean scan_mod_union, gboolean scan_whole_nursery)
2812 ScannedObjectCounts counts;
2813 LOSObject *bigobj, *prevbo;
2819 if (concurrent_collection_in_progress) {
2820 sgen_workers_signal_start_nursery_collection_and_wait ();
2822 current_object_ops = major_collector.major_concurrent_ops;
2824 major_copy_or_mark_from_roots (NULL, FALSE, TRUE, scan_mod_union, scan_whole_nursery);
2826 sgen_workers_signal_finish_nursery_collection ();
2828 major_finish_copy_or_mark ();
2829 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2831 sgen_workers_join ();
2833 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
2835 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2836 main_gc_thread = NULL;
2839 if (do_concurrent_checks)
2840 check_nursery_is_clean ();
2842 SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
2843 current_object_ops = major_collector.major_ops;
2847 * The workers have stopped so we need to finish gray queue
2848 * work that might result from finalization in the main GC
2849 * thread. Redirection must therefore be turned off.
2851 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
2852 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2854 /* all the objects in the heap */
2855 finish_gray_stack (GENERATION_OLD, &gray_queue);
2857 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2859 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after joining");
2862 * The (single-threaded) finalization code might have done
2863 * some copying/marking so we can only reset the GC thread's
2864 * worker data here instead of earlier when we joined the
2867 sgen_workers_reset_data ();
2869 if (objects_pinned) {
2870 g_assert (!concurrent_collection_in_progress);
2873 * This is slow, but we just OOM'd.
2875 * See comment at `sgen_pin_queue_clear_discarded_entries` for how the pin
2876 * queue is laid out at this point.
2878 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2880 * We need to reestablish all pinned nursery objects in the pin queue
2881 * because they're needed for fragment creation. Unpinning happens by
2882 * walking the whole queue, so it's not necessary to reestablish where major
2883 * heap block pins are - all we care is that they're still in there
2886 sgen_optimize_pin_queue ();
2887 sgen_find_section_pin_queue_start_end (nursery_section);
2891 reset_heap_boundaries ();
2892 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2894 if (!concurrent_collection_in_progress) {
2895 /* walk the pin_queue, build up the fragment list of free memory, unmark
2896 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2899 if (!sgen_build_nursery_fragments (nursery_section, NULL))
2902 /* prepare the pin queue for the next collection */
2903 sgen_finish_pinning ();
2905 /* Clear TLABs for all threads */
2906 sgen_clear_tlabs ();
2908 sgen_pin_stats_reset ();
2911 if (concurrent_collection_in_progress)
2912 sgen_cement_concurrent_finish ();
2913 sgen_cement_clear_below_threshold ();
2915 if (check_mark_bits_after_major_collection)
2916 sgen_check_heap_marked (concurrent_collection_in_progress);
2919 time_major_fragment_creation += TV_ELAPSED (atv, btv);
2922 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
2924 /* sweep the big objects list */
2926 for (bigobj = los_object_list; bigobj;) {
2927 g_assert (!object_is_pinned (bigobj->data));
2928 if (sgen_los_object_is_pinned (bigobj->data)) {
2929 sgen_los_unpin_object (bigobj->data);
2930 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2933 /* not referenced anywhere, so we can free it */
2935 prevbo->next = bigobj->next;
2937 los_object_list = bigobj->next;
2939 bigobj = bigobj->next;
2940 sgen_los_free_object (to_free);
2944 bigobj = bigobj->next;
2948 time_major_free_bigobjs += TV_ELAPSED (btv, atv);
2953 time_major_los_sweep += TV_ELAPSED (atv, btv);
2955 major_collector.sweep ();
2957 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
2960 time_major_sweep += TV_ELAPSED (btv, atv);
2963 dump_heap ("major", gc_stats.major_gc_count - 1, reason);
2965 if (fin_ready_list || critical_fin_list) {
2966 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2967 mono_gc_finalize_notify ();
2970 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2972 sgen_memgov_major_collection_end ();
2973 current_collection_generation = -1;
2975 memset (&counts, 0, sizeof (ScannedObjectCounts));
2976 major_collector.finish_major_collection (&counts);
2978 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2980 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
2981 if (concurrent_collection_in_progress)
2982 concurrent_collection_in_progress = FALSE;
2984 check_scan_starts ();
2986 binary_protocol_flush_buffers (FALSE);
2988 //consistency_check ();
2990 MONO_GC_END (GENERATION_OLD);
2991 binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
2995 major_do_collection (const char *reason)
2997 TV_DECLARE (time_start);
2998 TV_DECLARE (time_end);
2999 size_t old_next_pin_slot;
3001 if (disable_major_collections)
3004 if (major_collector.get_and_reset_num_major_objects_marked) {
3005 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3006 g_assert (!num_marked);
3009 /* world must be stopped already */
3010 TV_GETTIME (time_start);
3012 major_start_collection (FALSE, &old_next_pin_slot);
3013 major_finish_collection (reason, old_next_pin_slot, FALSE, FALSE);
3015 TV_GETTIME (time_end);
3016 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3018 /* FIXME: also report this to the user, preferably in gc-end. */
3019 if (major_collector.get_and_reset_num_major_objects_marked)
3020 major_collector.get_and_reset_num_major_objects_marked ();
3022 return bytes_pinned_from_failed_allocation > 0;
3026 major_start_concurrent_collection (const char *reason)
3028 TV_DECLARE (time_start);
3029 TV_DECLARE (time_end);
3030 long long num_objects_marked;
3032 if (disable_major_collections)
3035 TV_GETTIME (time_start);
3036 SGEN_TV_GETTIME (time_major_conc_collection_start);
3038 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3039 g_assert (num_objects_marked == 0);
3041 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3042 binary_protocol_concurrent_start ();
3044 // FIXME: store reason and pass it when finishing
3045 major_start_collection (TRUE, NULL);
3047 gray_queue_redirect (&gray_queue);
3049 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3050 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3052 TV_GETTIME (time_end);
3053 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3055 current_collection_generation = -1;
3059 * Returns whether the major collection has finished.
3062 major_should_finish_concurrent_collection (void)
3064 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty before we have started doing anything?");
3065 return sgen_workers_all_done ();
3069 major_update_concurrent_collection (void)
3071 TV_DECLARE (total_start);
3072 TV_DECLARE (total_end);
3074 TV_GETTIME (total_start);
3076 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3077 binary_protocol_concurrent_update ();
3079 major_collector.update_cardtable_mod_union ();
3080 sgen_los_update_cardtable_mod_union ();
3082 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3084 TV_GETTIME (total_end);
3085 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
3089 major_finish_concurrent_collection (void)
3091 TV_DECLARE (total_start);
3092 TV_DECLARE (total_end);
3093 gboolean late_pinned;
3094 SgenGrayQueue unpin_queue;
3095 memset (&unpin_queue, 0, sizeof (unpin_queue));
3097 TV_GETTIME (total_start);
3099 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3100 binary_protocol_concurrent_finish ();
3103 * The major collector can add global remsets which are processed in the finishing
3104 * nursery collection, below. That implies that the workers must have finished
3105 * marking before the nursery collection is allowed to run, otherwise we might miss
3108 wait_for_workers_to_finish ();
3110 SGEN_TV_GETTIME (time_major_conc_collection_end);
3111 gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
3113 major_collector.update_cardtable_mod_union ();
3114 sgen_los_update_cardtable_mod_union ();
3116 late_pinned = collect_nursery (&unpin_queue, TRUE);
3118 if (mod_union_consistency_check)
3119 sgen_check_mod_union_consistency ();
3121 current_collection_generation = GENERATION_OLD;
3122 major_finish_collection ("finishing", -1, TRUE, late_pinned);
3124 if (whole_heap_check_before_collection)
3125 sgen_check_whole_heap (FALSE);
3127 unpin_objects_from_queue (&unpin_queue);
3128 sgen_gray_object_queue_deinit (&unpin_queue);
3130 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3132 TV_GETTIME (total_end);
3133 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
3135 current_collection_generation = -1;
3139 * Ensure an allocation request for @size will succeed by freeing enough memory.
3141 * LOCKING: The GC lock MUST be held.
3144 sgen_ensure_free_space (size_t size)
3146 int generation_to_collect = -1;
3147 const char *reason = NULL;
3150 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3151 if (sgen_need_major_collection (size)) {
3152 reason = "LOS overflow";
3153 generation_to_collect = GENERATION_OLD;
3156 if (degraded_mode) {
3157 if (sgen_need_major_collection (size)) {
3158 reason = "Degraded mode overflow";
3159 generation_to_collect = GENERATION_OLD;
3161 } else if (sgen_need_major_collection (size)) {
3162 reason = "Minor allowance";
3163 generation_to_collect = GENERATION_OLD;
3165 generation_to_collect = GENERATION_NURSERY;
3166 reason = "Nursery full";
3170 if (generation_to_collect == -1) {
3171 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3172 generation_to_collect = GENERATION_OLD;
3173 reason = "Finish concurrent collection";
3177 if (generation_to_collect == -1)
3179 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3183 * LOCKING: Assumes the GC lock is held.
3186 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3188 TV_DECLARE (gc_start);
3189 TV_DECLARE (gc_end);
3190 TV_DECLARE (gc_total_start);
3191 TV_DECLARE (gc_total_end);
3192 GGTimingInfo infos [2];
3193 int overflow_generation_to_collect = -1;
3194 int oldest_generation_collected = generation_to_collect;
3195 const char *overflow_reason = NULL;
3197 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3199 binary_protocol_collection_force (generation_to_collect);
3201 SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD, "What generation is this?");
3203 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3205 TV_GETTIME (gc_start);
3207 sgen_stop_world (generation_to_collect);
3209 TV_GETTIME (gc_total_start);
3211 if (concurrent_collection_in_progress) {
3213 * We update the concurrent collection. If it finished, we're done. If
3214 * not, and we've been asked to do a nursery collection, we do that.
3216 gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
3219 major_finish_concurrent_collection ();
3220 oldest_generation_collected = GENERATION_OLD;
3222 sgen_workers_signal_start_nursery_collection_and_wait ();
3224 major_update_concurrent_collection ();
3225 if (generation_to_collect == GENERATION_NURSERY)
3226 collect_nursery (NULL, FALSE);
3228 sgen_workers_signal_finish_nursery_collection ();
3235 * If we've been asked to do a major collection, and the major collector wants to
3236 * run synchronously (to evacuate), we set the flag to do that.
3238 if (generation_to_collect == GENERATION_OLD &&
3239 allow_synchronous_major &&
3240 major_collector.want_synchronous_collection &&
3241 *major_collector.want_synchronous_collection) {
3242 wait_to_finish = TRUE;
3245 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3248 * There's no concurrent collection in progress. Collect the generation we're asked
3249 * to collect. If the major collector is concurrent and we're not forced to wait,
3250 * start a concurrent collection.
3252 // FIXME: extract overflow reason
3253 if (generation_to_collect == GENERATION_NURSERY) {
3254 if (collect_nursery (NULL, FALSE)) {
3255 overflow_generation_to_collect = GENERATION_OLD;
3256 overflow_reason = "Minor overflow";
3259 if (major_collector.is_concurrent && !wait_to_finish) {
3260 collect_nursery (NULL, FALSE);
3261 major_start_concurrent_collection (reason);
3262 // FIXME: set infos[0] properly
3266 if (major_do_collection (reason)) {
3267 overflow_generation_to_collect = GENERATION_NURSERY;
3268 overflow_reason = "Excessive pinning";
3272 TV_GETTIME (gc_end);
3274 memset (infos, 0, sizeof (infos));
3275 infos [0].generation = generation_to_collect;
3276 infos [0].reason = reason;
3277 infos [0].is_overflow = FALSE;
3278 infos [1].generation = -1;
3279 infos [0].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
3281 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3283 if (overflow_generation_to_collect != -1) {
3285 * We need to do an overflow collection, either because we ran out of memory
3286 * or the nursery is fully pinned.
3289 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3290 infos [1].generation = overflow_generation_to_collect;
3291 infos [1].reason = overflow_reason;
3292 infos [1].is_overflow = TRUE;
3293 infos [1].total_time = gc_end;
3295 if (overflow_generation_to_collect == GENERATION_NURSERY)
3296 collect_nursery (NULL, FALSE);
3298 major_do_collection (overflow_reason);
3300 TV_GETTIME (gc_end);
3301 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3303 /* keep events symmetric */
3304 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3306 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3309 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3311 /* this also sets the proper pointers for the next allocation */
3312 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3313 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3314 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
3315 sgen_dump_pin_queue ();
3320 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3322 TV_GETTIME (gc_total_end);
3323 time_max = MAX (time_max, TV_ELAPSED (gc_total_start, gc_total_end));
3325 sgen_restart_world (oldest_generation_collected, infos);
3327 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3331 * ######################################################################
3332 * ######## Memory allocation from the OS
3333 * ######################################################################
3334 * This section of code deals with getting memory from the OS and
3335 * allocating memory for GC-internal data structures.
3336 * Internal memory can be handled with a freelist for small objects.
3342 G_GNUC_UNUSED static void
3343 report_internal_mem_usage (void)
3345 printf ("Internal memory usage:\n");
3346 sgen_report_internal_mem_usage ();
3347 printf ("Pinned memory usage:\n");
3348 major_collector.report_pinned_memory_usage ();
3352 * ######################################################################
3353 * ######## Finalization support
3354 * ######################################################################
3357 static inline gboolean
3358 sgen_major_is_object_alive (void *object)
3362 /* Oldgen objects can be pinned and forwarded too */
3363 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3367 * FIXME: major_collector.is_object_live() also calculates the
3368 * size. Avoid the double calculation.
3370 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3371 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3372 return sgen_los_object_is_pinned (object);
3374 return major_collector.is_object_live (object);
3378 * If the object has been forwarded it means it's still referenced from a root.
3379 * If it is pinned it's still alive as well.
3380 * A LOS object is only alive if we have pinned it.
3381 * Return TRUE if @obj is ready to be finalized.
3383 static inline gboolean
3384 sgen_is_object_alive (void *object)
3386 if (ptr_in_nursery (object))
3387 return sgen_nursery_is_object_alive (object);
3389 return sgen_major_is_object_alive (object);
3393 * This function returns true if @object is either alive or it belongs to the old gen
3394 * and we're currently doing a minor collection.
3397 sgen_is_object_alive_for_current_gen (char *object)
3399 if (ptr_in_nursery (object))
3400 return sgen_nursery_is_object_alive (object);
3402 if (current_collection_generation == GENERATION_NURSERY)
3405 return sgen_major_is_object_alive (object);
3409 * This function returns true if @object is either alive and belongs to the
3410 * current collection - major collections are full heap, so old gen objects
3411 * are never alive during a minor collection.
3414 sgen_is_object_alive_and_on_current_collection (char *object)
3416 if (ptr_in_nursery (object))
3417 return sgen_nursery_is_object_alive (object);
3419 if (current_collection_generation == GENERATION_NURSERY)
3422 return sgen_major_is_object_alive (object);
3427 sgen_gc_is_object_ready_for_finalization (void *object)
3429 return !sgen_is_object_alive (object);
3433 has_critical_finalizer (MonoObject *obj)
3437 if (!mono_defaults.critical_finalizer_object)
3440 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3442 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3446 is_finalization_aware (MonoObject *obj)
3448 MonoVTable *vt = ((MonoVTable*)LOAD_VTABLE (obj));
3449 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
3453 sgen_queue_finalization_entry (MonoObject *obj)
3455 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3456 gboolean critical = has_critical_finalizer (obj);
3457 entry->object = obj;
3459 entry->next = critical_fin_list;
3460 critical_fin_list = entry;
3462 entry->next = fin_ready_list;
3463 fin_ready_list = entry;
3466 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
3467 fin_callbacks.object_queued_for_finalization (obj);
3469 #ifdef ENABLE_DTRACE
3470 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3471 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3472 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3473 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3474 vt->klass->name_space, vt->klass->name, gen, critical);
3480 sgen_object_is_live (void *obj)
3482 return sgen_is_object_alive_and_on_current_collection (obj);
3485 /* LOCKING: requires that the GC lock is held */
3487 null_ephemerons_for_domain (MonoDomain *domain)
3489 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3492 MonoObject *object = (MonoObject*)current->array;
3495 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
3497 if (object && object->vtable->domain == domain) {
3498 EphemeronLinkNode *tmp = current;
3501 prev->next = current->next;
3503 ephemeron_list = current->next;
3505 current = current->next;
3506 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3509 current = current->next;
3514 /* LOCKING: requires that the GC lock is held */
3516 clear_unreachable_ephemerons (ScanCopyContext ctx)
3518 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3519 GrayQueue *queue = ctx.queue;
3520 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3522 Ephemeron *cur, *array_end;
3526 char *object = current->array;
3528 if (!sgen_is_object_alive_for_current_gen (object)) {
3529 EphemeronLinkNode *tmp = current;
3531 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3534 prev->next = current->next;
3536 ephemeron_list = current->next;
3538 current = current->next;
3539 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3544 copy_func ((void**)&object, queue);
3545 current->array = object;
3547 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3549 array = (MonoArray*)object;
3550 cur = mono_array_addr (array, Ephemeron, 0);
3551 array_end = cur + mono_array_length_fast (array);
3552 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3554 for (; cur < array_end; ++cur) {
3555 char *key = (char*)cur->key;
3557 if (!key || key == tombstone)
3560 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3561 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3562 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3564 if (!sgen_is_object_alive_for_current_gen (key)) {
3565 cur->key = tombstone;
3571 current = current->next;
3576 LOCKING: requires that the GC lock is held
3578 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3581 mark_ephemerons_in_range (ScanCopyContext ctx)
3583 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3584 GrayQueue *queue = ctx.queue;
3585 int nothing_marked = 1;
3586 EphemeronLinkNode *current = ephemeron_list;
3588 Ephemeron *cur, *array_end;
3591 for (current = ephemeron_list; current; current = current->next) {
3592 char *object = current->array;
3593 SGEN_LOG (5, "Ephemeron array at %p", object);
3595 /*It has to be alive*/
3596 if (!sgen_is_object_alive_for_current_gen (object)) {
3597 SGEN_LOG (5, "\tnot reachable");
3601 copy_func ((void**)&object, queue);
3603 array = (MonoArray*)object;
3604 cur = mono_array_addr (array, Ephemeron, 0);
3605 array_end = cur + mono_array_length_fast (array);
3606 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3608 for (; cur < array_end; ++cur) {
3609 char *key = cur->key;
3611 if (!key || key == tombstone)
3614 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3615 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3616 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3618 if (sgen_is_object_alive_for_current_gen (key)) {
3619 char *value = cur->value;
3621 copy_func ((void**)&cur->key, queue);
3623 if (!sgen_is_object_alive_for_current_gen (value))
3625 copy_func ((void**)&cur->value, queue);
3631 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3632 return nothing_marked;
3636 mono_gc_invoke_finalizers (void)
3638 FinalizeReadyEntry *entry = NULL;
3639 gboolean entry_is_critical = FALSE;
3642 /* FIXME: batch to reduce lock contention */
3643 while (fin_ready_list || critical_fin_list) {
3647 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3649 /* We have finalized entry in the last
3650 interation, now we need to remove it from
3653 *list = entry->next;
3655 FinalizeReadyEntry *e = *list;
3656 while (e->next != entry)
3658 e->next = entry->next;
3660 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3664 /* Now look for the first non-null entry. */
3665 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3668 entry_is_critical = FALSE;
3670 entry_is_critical = TRUE;
3671 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3676 g_assert (entry->object);
3677 num_ready_finalizers--;
3678 obj = entry->object;
3679 entry->object = NULL;
3680 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3688 g_assert (entry->object == NULL);
3690 /* the object is on the stack so it is pinned */
3691 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3692 mono_gc_run_finalize (obj, NULL);
3699 mono_gc_pending_finalizers (void)
3701 return fin_ready_list || critical_fin_list;
3705 * ######################################################################
3706 * ######## registered roots support
3707 * ######################################################################
3711 * We do not coalesce roots.
3714 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3716 RootRecord new_root;
3719 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3720 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3721 /* we allow changing the size and the descriptor (for thread statics etc) */
3723 size_t old_size = root->end_root - start;
3724 root->end_root = start + size;
3725 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3726 ((root->root_desc == 0) && (descr == NULL)));
3727 root->root_desc = (mword)descr;
3729 roots_size -= old_size;
3735 new_root.end_root = start + size;
3736 new_root.root_desc = (mword)descr;
3738 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3741 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3748 mono_gc_register_root (char *start, size_t size, void *descr)
3750 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3754 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3756 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3760 mono_gc_deregister_root (char* addr)
3766 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3767 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3768 roots_size -= (root.end_root - addr);
3774 * ######################################################################
3775 * ######## Thread handling (stop/start code)
3776 * ######################################################################
3779 unsigned int sgen_global_stop_count = 0;
3782 sgen_get_current_collection_generation (void)
3784 return current_collection_generation;
3788 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3790 gc_callbacks = *callbacks;
3794 mono_gc_get_gc_callbacks ()
3796 return &gc_callbacks;
3799 /* Variables holding start/end nursery so it won't have to be passed at every call */
3800 static void *scan_area_arg_start, *scan_area_arg_end;
3803 mono_gc_conservatively_scan_area (void *start, void *end)
3805 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3809 mono_gc_scan_object (void *obj, void *gc_data)
3811 UserCopyOrMarkData *data = gc_data;
3812 current_object_ops.copy_or_mark_object (&obj, data->queue);
3817 * Mark from thread stacks and registers.
3820 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3822 SgenThreadInfo *info;
3824 scan_area_arg_start = start_nursery;
3825 scan_area_arg_end = end_nursery;
3827 FOREACH_THREAD (info) {
3829 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3832 if (info->gc_disabled) {
3833 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3836 if (mono_thread_info_run_state (info) != STATE_RUNNING) {
3837 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %d)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_thread_info_run_state (info));
3840 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3841 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3842 UserCopyOrMarkData data = { NULL, queue };
3843 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise, &data);
3844 } else if (!precise) {
3845 if (!conservative_stack_mark) {
3846 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
3847 conservative_stack_mark = TRUE;
3849 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3854 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3855 start_nursery, end_nursery, PIN_TYPE_STACK);
3857 conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
3858 start_nursery, end_nursery, PIN_TYPE_STACK);
3861 } END_FOREACH_THREAD
3865 ptr_on_stack (void *ptr)
3867 gpointer stack_start = &stack_start;
3868 SgenThreadInfo *info = mono_thread_info_current ();
3870 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3876 sgen_thread_register (SgenThreadInfo* info, void *addr)
3879 guint8 *staddr = NULL;
3881 #ifndef HAVE_KW_THREAD
3882 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3884 g_assert (!mono_native_tls_get_value (thread_info_key));
3885 mono_native_tls_set_value (thread_info_key, info);
3887 sgen_thread_info = info;
3890 #ifdef SGEN_POSIX_STW
3891 info->stop_count = -1;
3895 info->stack_start = NULL;
3896 info->stopped_ip = NULL;
3897 info->stopped_domain = NULL;
3899 memset (&info->ctx, 0, sizeof (MonoContext));
3901 memset (&info->regs, 0, sizeof (info->regs));
3904 sgen_init_tlab_info (info);
3906 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3908 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
3909 mono_thread_info_get_stack_bounds (&staddr, &stsize);
3912 info->stack_start_limit = staddr;
3914 info->stack_end = staddr + stsize;
3916 gsize stack_bottom = (gsize)addr;
3917 stack_bottom += 4095;
3918 stack_bottom &= ~4095;
3919 info->stack_end = (char*)stack_bottom;
3922 #ifdef HAVE_KW_THREAD
3923 stack_end = info->stack_end;
3926 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3928 if (gc_callbacks.thread_attach_func)
3929 info->runtime_data = gc_callbacks.thread_attach_func ();
3934 sgen_thread_detach (SgenThreadInfo *p)
3936 /* If a delegate is passed to native code and invoked on a thread we dont
3937 * know about, the jit will register it with mono_jit_thread_attach, but
3938 * we have no way of knowing when that thread goes away. SGen has a TSD
3939 * so we assume that if the domain is still registered, we can detach
3942 if (mono_domain_get ())
3943 mono_thread_detach_internal (mono_thread_internal_current ());
3947 sgen_thread_unregister (SgenThreadInfo *p)
3949 MonoNativeThreadId tid;
3951 tid = mono_thread_info_get_tid (p);
3952 binary_protocol_thread_unregister ((gpointer)tid);
3953 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
3955 #ifndef HAVE_KW_THREAD
3956 mono_native_tls_set_value (thread_info_key, NULL);
3958 sgen_thread_info = NULL;
3961 if (p->info.runtime_thread)
3962 mono_threads_add_joinable_thread ((gpointer)tid);
3964 if (gc_callbacks.thread_detach_func) {
3965 gc_callbacks.thread_detach_func (p->runtime_data);
3966 p->runtime_data = NULL;
3972 sgen_thread_attach (SgenThreadInfo *info)
3975 /*this is odd, can we get attached before the gc is inited?*/
3979 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3980 info->runtime_data = gc_callbacks.thread_attach_func ();
3983 mono_gc_register_thread (void *baseptr)
3985 return mono_thread_info_attach (baseptr) != NULL;
3989 * mono_gc_set_stack_end:
3991 * Set the end of the current threads stack to STACK_END. The stack space between
3992 * STACK_END and the real end of the threads stack will not be scanned during collections.
3995 mono_gc_set_stack_end (void *stack_end)
3997 SgenThreadInfo *info;
4000 info = mono_thread_info_current ();
4002 g_assert (stack_end < info->stack_end);
4003 info->stack_end = stack_end;
4008 #if USE_PTHREAD_INTERCEPT
4012 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4014 return pthread_create (new_thread, attr, start_routine, arg);
4018 mono_gc_pthread_join (pthread_t thread, void **retval)
4020 return pthread_join (thread, retval);
4024 mono_gc_pthread_detach (pthread_t thread)
4026 return pthread_detach (thread);
4030 mono_gc_pthread_exit (void *retval)
4032 mono_thread_info_detach ();
4033 pthread_exit (retval);
4034 g_assert_not_reached ();
4037 #endif /* USE_PTHREAD_INTERCEPT */
4040 * ######################################################################
4041 * ######## Write barriers
4042 * ######################################################################
4046 * Note: the write barriers first do the needed GC work and then do the actual store:
4047 * this way the value is visible to the conservative GC scan after the write barrier
4048 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4049 * the conservative scan, otherwise by the remembered set scan.
4052 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4054 HEAVY_STAT (++stat_wbarrier_set_field);
4055 if (ptr_in_nursery (field_ptr)) {
4056 *(void**)field_ptr = value;
4059 SGEN_LOG (8, "Adding remset at %p", field_ptr);
4061 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4063 remset.wbarrier_set_field (obj, field_ptr, value);
4067 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4069 HEAVY_STAT (++stat_wbarrier_set_arrayref);
4070 if (ptr_in_nursery (slot_ptr)) {
4071 *(void**)slot_ptr = value;
4074 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4076 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4078 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4082 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4084 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4085 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4086 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4087 mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
4091 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4092 if (binary_protocol_is_heavy_enabled ()) {
4094 for (i = 0; i < count; ++i) {
4095 gpointer dest = (gpointer*)dest_ptr + i;
4096 gpointer obj = *((gpointer*)src_ptr + i);
4098 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4103 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4106 static char *found_obj;
4109 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4111 char *ptr = user_data;
4113 if (ptr >= obj && ptr < obj + size) {
4114 g_assert (!found_obj);
4119 /* for use in the debugger */
4120 char* find_object_for_ptr (char *ptr);
4122 find_object_for_ptr (char *ptr)
4124 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4126 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4127 find_object_for_ptr_callback, ptr, TRUE);
4133 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4138 * Very inefficient, but this is debugging code, supposed to
4139 * be called from gdb, so we don't care.
4142 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
4147 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4151 HEAVY_STAT (++stat_wbarrier_generic_store);
4153 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4154 /* FIXME: ptr_in_heap must be called with the GC lock held */
4155 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4156 char *start = find_object_for_ptr (ptr);
4157 MonoObject *value = *(MonoObject**)ptr;
4161 MonoObject *obj = (MonoObject*)start;
4162 if (obj->vtable->domain != value->vtable->domain)
4163 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4169 obj = *(gpointer*)ptr;
4171 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4173 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4174 SGEN_LOG (8, "Skipping remset at %p", ptr);
4179 * We need to record old->old pointer locations for the
4180 * concurrent collector.
4182 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4183 SGEN_LOG (8, "Skipping remset at %p", ptr);
4187 SGEN_LOG (8, "Adding remset at %p", ptr);
4189 remset.wbarrier_generic_nostore (ptr);
4193 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4195 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4196 SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
4197 if (ptr_in_nursery (value))
4198 mono_gc_wbarrier_generic_nostore (ptr);
4199 sgen_dummy_use (value);
4202 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4203 * as an atomic operation with release semantics.
4206 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4208 HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4210 SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4212 InterlockedWritePointer (ptr, value);
4214 if (ptr_in_nursery (value))
4215 mono_gc_wbarrier_generic_nostore (ptr);
4217 sgen_dummy_use (value);
4220 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4222 mword *dest = _dest;
4227 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4229 SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, *src);
4232 size -= SIZEOF_VOID_P;
4237 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4239 #define HANDLE_PTR(ptr,obj) do { \
4240 gpointer o = *(gpointer*)(ptr); \
4242 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4243 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4248 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4250 #define SCAN_OBJECT_NOVTABLE
4251 #include "sgen-scan-object.h"
4256 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4258 HEAVY_STAT (++stat_wbarrier_value_copy);
4259 g_assert (klass->valuetype);
4261 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4263 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4264 size_t element_size = mono_class_value_size (klass, NULL);
4265 size_t size = count * element_size;
4266 mono_gc_memmove_atomic (dest, src, size);
4270 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4271 if (binary_protocol_is_heavy_enabled ()) {
4272 size_t element_size = mono_class_value_size (klass, NULL);
4274 for (i = 0; i < count; ++i) {
4275 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4276 (char*)src + i * element_size - sizeof (MonoObject),
4277 (mword) klass->gc_descr);
4282 remset.wbarrier_value_copy (dest, src, count, klass);
4286 * mono_gc_wbarrier_object_copy:
4288 * Write barrier to call when obj is the result of a clone or copy of an object.
4291 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4295 HEAVY_STAT (++stat_wbarrier_object_copy);
4297 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4298 size = mono_object_class (obj)->instance_size;
4299 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4300 size - sizeof (MonoObject));
4304 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4305 if (binary_protocol_is_heavy_enabled ())
4306 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4309 remset.wbarrier_object_copy (obj, src);
4314 * ######################################################################
4315 * ######## Other mono public interface functions.
4316 * ######################################################################
4319 #define REFS_SIZE 128
4322 MonoGCReferences callback;
4326 MonoObject *refs [REFS_SIZE];
4327 uintptr_t offsets [REFS_SIZE];
4331 #define HANDLE_PTR(ptr,obj) do { \
4333 if (hwi->count == REFS_SIZE) { \
4334 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4338 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4339 hwi->refs [hwi->count++] = *(ptr); \
4344 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4346 mword desc = sgen_obj_get_descriptor (start);
4348 #include "sgen-scan-object.h"
4352 walk_references (char *start, size_t size, void *data)
4354 HeapWalkInfo *hwi = data;
4357 collect_references (hwi, start, size);
4358 if (hwi->count || !hwi->called)
4359 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4363 * mono_gc_walk_heap:
4364 * @flags: flags for future use
4365 * @callback: a function pointer called for each object in the heap
4366 * @data: a user data pointer that is passed to callback
4368 * This function can be used to iterate over all the live objects in the heap:
4369 * for each object, @callback is invoked, providing info about the object's
4370 * location in memory, its class, its size and the objects it references.
4371 * For each referenced object it's offset from the object address is
4372 * reported in the offsets array.
4373 * The object references may be buffered, so the callback may be invoked
4374 * multiple times for the same object: in all but the first call, the size
4375 * argument will be zero.
4376 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4377 * profiler event handler.
4379 * Returns: a non-zero value if the GC doesn't support heap walking
4382 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4387 hwi.callback = callback;
4390 sgen_clear_nursery_fragments ();
4391 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4393 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
4394 sgen_los_iterate_objects (walk_references, &hwi);
4400 mono_gc_collect (int generation)
4405 sgen_perform_collection (0, generation, "user request", TRUE);
4410 mono_gc_max_generation (void)
4416 mono_gc_collection_count (int generation)
4418 if (generation == 0)
4419 return gc_stats.minor_gc_count;
4420 return gc_stats.major_gc_count;
4424 mono_gc_get_used_size (void)
4428 tot = los_memory_usage;
4429 tot += nursery_section->next_data - nursery_section->data;
4430 tot += major_collector.get_used_size ();
4431 /* FIXME: account for pinned objects */
4437 mono_gc_get_los_limit (void)
4439 return MAX_SMALL_OBJ_SIZE;
4443 mono_gc_set_string_length (MonoString *str, gint32 new_length)
4445 mono_unichar2 *new_end = str->chars + new_length;
4447 /* zero the discarded string. This null-delimits the string and allows
4448 * the space to be reclaimed by SGen. */
4450 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
4451 CHECK_CANARY_FOR_OBJECT (str);
4452 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
4453 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
4455 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
4458 str->length = new_length;
4462 mono_gc_user_markers_supported (void)
4468 mono_object_is_alive (MonoObject* o)
4474 mono_gc_get_generation (MonoObject *obj)
4476 if (ptr_in_nursery (obj))
4482 mono_gc_enable_events (void)
4487 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4489 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4493 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4495 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4499 mono_gc_weak_link_get (void **link_addr)
4501 void * volatile *link_addr_volatile;
4505 link_addr_volatile = link_addr;
4506 ptr = (void*)*link_addr_volatile;
4508 * At this point we have a hidden pointer. If the GC runs
4509 * here, it will not recognize the hidden pointer as a
4510 * reference, and if the object behind it is not referenced
4511 * elsewhere, it will be freed. Once the world is restarted
4512 * we reveal the pointer, giving us a pointer to a freed
4513 * object. To make sure we don't return it, we load the
4514 * hidden pointer again. If it's still the same, we can be
4515 * sure the object reference is valid.
4518 obj = (MonoObject*) REVEAL_POINTER (ptr);
4522 mono_memory_barrier ();
4525 * During the second bridge processing step the world is
4526 * running again. That step processes all weak links once
4527 * more to null those that refer to dead objects. Before that
4528 * is completed, those links must not be followed, so we
4529 * conservatively wait for bridge processing when any weak
4530 * link is dereferenced.
4532 if (G_UNLIKELY (bridge_processing_in_progress))
4533 mono_gc_wait_for_bridge_processing ();
4535 if ((void*)*link_addr_volatile != ptr)
4542 mono_gc_ephemeron_array_add (MonoObject *obj)
4544 EphemeronLinkNode *node;
4548 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4553 node->array = (char*)obj;
4554 node->next = ephemeron_list;
4555 ephemeron_list = node;
4557 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4564 mono_gc_set_allow_synchronous_major (gboolean flag)
4566 if (!major_collector.is_concurrent)
4569 allow_synchronous_major = flag;
4574 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4578 result = func (data);
4579 UNLOCK_INTERRUPTION;
4584 mono_gc_is_gc_thread (void)
4588 result = mono_thread_info_current () != NULL;
4594 is_critical_method (MonoMethod *method)
4596 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4600 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4604 va_start (ap, description_format);
4606 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4607 vfprintf (stderr, description_format, ap);
4609 fprintf (stderr, " - %s", fallback);
4610 fprintf (stderr, "\n");
4616 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4619 double val = strtod (opt, &endptr);
4620 if (endptr == opt) {
4621 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4624 else if (val < min || val > max) {
4625 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4633 mono_gc_base_init (void)
4635 MonoThreadInfoCallbacks cb;
4638 char *major_collector_opt = NULL;
4639 char *minor_collector_opt = NULL;
4640 size_t max_heap = 0;
4641 size_t soft_limit = 0;
4644 gboolean debug_print_allowance = FALSE;
4645 double allowance_ratio = 0, save_target = 0;
4646 gboolean have_split_nursery = FALSE;
4647 gboolean cement_enabled = TRUE;
4649 mono_counters_init ();
4652 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4655 /* already inited */
4658 /* being inited by another thread */
4662 /* we will init it */
4665 g_assert_not_reached ();
4667 } while (result != 0);
4669 SGEN_TV_GETTIME (sgen_init_timestamp);
4671 LOCK_INIT (gc_mutex);
4673 pagesize = mono_pagesize ();
4674 gc_debug_file = stderr;
4676 cb.thread_register = sgen_thread_register;
4677 cb.thread_detach = sgen_thread_detach;
4678 cb.thread_unregister = sgen_thread_unregister;
4679 cb.thread_attach = sgen_thread_attach;
4680 cb.mono_method_is_critical = (gpointer)is_critical_method;
4682 cb.thread_exit = mono_gc_pthread_exit;
4683 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4686 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4688 LOCK_INIT (sgen_interruption_mutex);
4690 if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4691 opts = g_strsplit (env, ",", -1);
4692 for (ptr = opts; *ptr; ++ptr) {
4694 if (g_str_has_prefix (opt, "major=")) {
4695 opt = strchr (opt, '=') + 1;
4696 major_collector_opt = g_strdup (opt);
4697 } else if (g_str_has_prefix (opt, "minor=")) {
4698 opt = strchr (opt, '=') + 1;
4699 minor_collector_opt = g_strdup (opt);
4707 sgen_init_internal_allocator ();
4708 sgen_init_nursery_allocator ();
4709 sgen_init_fin_weak_hash ();
4711 sgen_init_hash_table ();
4712 sgen_init_descriptors ();
4713 sgen_init_gray_queues ();
4715 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4716 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4717 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4718 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4720 #ifndef HAVE_KW_THREAD
4721 mono_native_tls_alloc (&thread_info_key, NULL);
4722 #if defined(__APPLE__) || defined (HOST_WIN32)
4724 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4725 * where the two are the same.
4727 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4731 int tls_offset = -1;
4732 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4733 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4738 * This needs to happen before any internal allocations because
4739 * it inits the small id which is required for hazard pointer
4744 mono_thread_info_attach (&dummy);
4746 if (!minor_collector_opt) {
4747 sgen_simple_nursery_init (&sgen_minor_collector);
4749 if (!strcmp (minor_collector_opt, "simple")) {
4751 sgen_simple_nursery_init (&sgen_minor_collector);
4752 } else if (!strcmp (minor_collector_opt, "split")) {
4753 sgen_split_nursery_init (&sgen_minor_collector);
4754 have_split_nursery = TRUE;
4756 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4757 goto use_simple_nursery;
4761 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4762 use_marksweep_major:
4763 sgen_marksweep_init (&major_collector);
4764 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4765 sgen_marksweep_conc_init (&major_collector);
4767 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4768 goto use_marksweep_major;
4771 ///* Keep this the default for now */
4772 /* Precise marking is broken on all supported targets. Disable until fixed. */
4773 conservative_stack_mark = TRUE;
4775 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4778 gboolean usage_printed = FALSE;
4780 for (ptr = opts; *ptr; ++ptr) {
4782 if (!strcmp (opt, ""))
4784 if (g_str_has_prefix (opt, "major="))
4786 if (g_str_has_prefix (opt, "minor="))
4788 if (g_str_has_prefix (opt, "max-heap-size=")) {
4789 size_t max_heap_candidate = 0;
4790 opt = strchr (opt, '=') + 1;
4791 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4792 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(size_t)(mono_pagesize () - 1);
4793 if (max_heap != max_heap_candidate)
4794 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4796 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4800 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4801 opt = strchr (opt, '=') + 1;
4802 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4803 if (soft_limit <= 0) {
4804 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4808 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4812 if (g_str_has_prefix (opt, "stack-mark=")) {
4813 opt = strchr (opt, '=') + 1;
4814 if (!strcmp (opt, "precise")) {
4815 conservative_stack_mark = FALSE;
4816 } else if (!strcmp (opt, "conservative")) {
4817 conservative_stack_mark = TRUE;
4819 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4820 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
4824 if (g_str_has_prefix (opt, "bridge-implementation=")) {
4825 opt = strchr (opt, '=') + 1;
4826 sgen_set_bridge_implementation (opt);
4829 if (g_str_has_prefix (opt, "toggleref-test")) {
4830 sgen_register_test_toggleref_callback ();
4835 if (g_str_has_prefix (opt, "nursery-size=")) {
4837 opt = strchr (opt, '=') + 1;
4838 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4839 #ifdef SGEN_ALIGN_NURSERY
4840 if ((val & (val - 1))) {
4841 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
4845 if (val < SGEN_MAX_NURSERY_WASTE) {
4846 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
4847 "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
4851 sgen_nursery_size = val;
4852 sgen_nursery_bits = 0;
4853 while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
4856 sgen_nursery_size = val;
4859 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
4865 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4867 opt = strchr (opt, '=') + 1;
4868 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
4869 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
4874 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4876 opt = strchr (opt, '=') + 1;
4877 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
4878 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
4879 allowance_ratio = val;
4883 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
4884 if (!major_collector.is_concurrent) {
4885 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
4889 opt = strchr (opt, '=') + 1;
4891 if (!strcmp (opt, "yes")) {
4892 allow_synchronous_major = TRUE;
4893 } else if (!strcmp (opt, "no")) {
4894 allow_synchronous_major = FALSE;
4896 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
4901 if (!strcmp (opt, "cementing")) {
4902 cement_enabled = TRUE;
4905 if (!strcmp (opt, "no-cementing")) {
4906 cement_enabled = FALSE;
4910 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4913 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4916 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4921 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
4922 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4923 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4924 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4925 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
4926 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4927 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4928 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4929 fprintf (stderr, " [no-]cementing\n");
4930 if (major_collector.is_concurrent)
4931 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
4932 if (major_collector.print_gc_param_usage)
4933 major_collector.print_gc_param_usage ();
4934 if (sgen_minor_collector.print_gc_param_usage)
4935 sgen_minor_collector.print_gc_param_usage ();
4936 fprintf (stderr, " Experimental options:\n");
4937 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4938 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4939 fprintf (stderr, "\n");
4941 usage_printed = TRUE;
4946 if (major_collector.is_concurrent)
4947 sgen_workers_init (1);
4949 if (major_collector_opt)
4950 g_free (major_collector_opt);
4952 if (minor_collector_opt)
4953 g_free (minor_collector_opt);
4957 sgen_cement_init (cement_enabled);
4959 if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
4960 gboolean usage_printed = FALSE;
4962 opts = g_strsplit (env, ",", -1);
4963 for (ptr = opts; ptr && *ptr; ptr ++) {
4965 if (!strcmp (opt, ""))
4967 if (opt [0] >= '0' && opt [0] <= '9') {
4968 gc_debug_level = atoi (opt);
4973 char *rf = g_strdup_printf ("%s.%d", opt, mono_process_current_pid ());
4974 gc_debug_file = fopen (rf, "wb");
4976 gc_debug_file = stderr;
4979 } else if (!strcmp (opt, "print-allowance")) {
4980 debug_print_allowance = TRUE;
4981 } else if (!strcmp (opt, "print-pinning")) {
4982 do_pin_stats = TRUE;
4983 } else if (!strcmp (opt, "verify-before-allocs")) {
4984 verify_before_allocs = 1;
4985 has_per_allocation_action = TRUE;
4986 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4987 char *arg = strchr (opt, '=') + 1;
4988 verify_before_allocs = atoi (arg);
4989 has_per_allocation_action = TRUE;
4990 } else if (!strcmp (opt, "collect-before-allocs")) {
4991 collect_before_allocs = 1;
4992 has_per_allocation_action = TRUE;
4993 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4994 char *arg = strchr (opt, '=') + 1;
4995 has_per_allocation_action = TRUE;
4996 collect_before_allocs = atoi (arg);
4997 } else if (!strcmp (opt, "verify-before-collections")) {
4998 whole_heap_check_before_collection = TRUE;
4999 } else if (!strcmp (opt, "check-at-minor-collections")) {
5000 consistency_check_at_minor_collection = TRUE;
5001 nursery_clear_policy = CLEAR_AT_GC;
5002 } else if (!strcmp (opt, "mod-union-consistency-check")) {
5003 if (!major_collector.is_concurrent) {
5004 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
5007 mod_union_consistency_check = TRUE;
5008 } else if (!strcmp (opt, "check-mark-bits")) {
5009 check_mark_bits_after_major_collection = TRUE;
5010 } else if (!strcmp (opt, "check-nursery-pinned")) {
5011 check_nursery_objects_pinned = TRUE;
5012 } else if (!strcmp (opt, "xdomain-checks")) {
5013 xdomain_checks = TRUE;
5014 } else if (!strcmp (opt, "clear-at-gc")) {
5015 nursery_clear_policy = CLEAR_AT_GC;
5016 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5017 nursery_clear_policy = CLEAR_AT_GC;
5018 } else if (!strcmp (opt, "clear-at-tlab-creation")) {
5019 nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
5020 } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
5021 nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
5022 } else if (!strcmp (opt, "check-scan-starts")) {
5023 do_scan_starts_check = TRUE;
5024 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5025 do_verify_nursery = TRUE;
5026 } else if (!strcmp (opt, "check-concurrent")) {
5027 if (!major_collector.is_concurrent) {
5028 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
5031 do_concurrent_checks = TRUE;
5032 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5033 do_dump_nursery_content = TRUE;
5034 } else if (!strcmp (opt, "no-managed-allocator")) {
5035 sgen_set_use_managed_allocator (FALSE);
5036 } else if (!strcmp (opt, "disable-minor")) {
5037 disable_minor_collections = TRUE;
5038 } else if (!strcmp (opt, "disable-major")) {
5039 disable_major_collections = TRUE;
5040 } else if (g_str_has_prefix (opt, "heap-dump=")) {
5041 char *filename = strchr (opt, '=') + 1;
5042 nursery_clear_policy = CLEAR_AT_GC;
5043 heap_dump_file = fopen (filename, "w");
5044 if (heap_dump_file) {
5045 fprintf (heap_dump_file, "<sgen-dump>\n");
5046 do_pin_stats = TRUE;
5048 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5049 char *filename = strchr (opt, '=') + 1;
5050 char *colon = strrchr (filename, ':');
5053 if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
5054 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
5059 binary_protocol_init (filename, (long long)limit);
5060 } else if (!strcmp (opt, "nursery-canaries")) {
5061 do_verify_nursery = TRUE;
5062 sgen_set_use_managed_allocator (FALSE);
5063 enable_nursery_canaries = TRUE;
5064 } else if (!sgen_bridge_handle_gc_debug (opt)) {
5065 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5070 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
5071 fprintf (stderr, "Valid <option>s are:\n");
5072 fprintf (stderr, " collect-before-allocs[=<n>]\n");
5073 fprintf (stderr, " verify-before-allocs[=<n>]\n");
5074 fprintf (stderr, " check-at-minor-collections\n");
5075 fprintf (stderr, " check-mark-bits\n");
5076 fprintf (stderr, " check-nursery-pinned\n");
5077 fprintf (stderr, " verify-before-collections\n");
5078 fprintf (stderr, " verify-nursery-at-minor-gc\n");
5079 fprintf (stderr, " dump-nursery-at-minor-gc\n");
5080 fprintf (stderr, " disable-minor\n");
5081 fprintf (stderr, " disable-major\n");
5082 fprintf (stderr, " xdomain-checks\n");
5083 fprintf (stderr, " check-concurrent\n");
5084 fprintf (stderr, " clear-[nursery-]at-gc\n");
5085 fprintf (stderr, " clear-at-tlab-creation\n");
5086 fprintf (stderr, " debug-clear-at-tlab-creation\n");
5087 fprintf (stderr, " check-scan-starts\n");
5088 fprintf (stderr, " no-managed-allocator\n");
5089 fprintf (stderr, " print-allowance\n");
5090 fprintf (stderr, " print-pinning\n");
5091 fprintf (stderr, " heap-dump=<filename>\n");
5092 fprintf (stderr, " binary-protocol=<filename>[:<file-size-limit>]\n");
5093 fprintf (stderr, " nursery-canaries\n");
5094 sgen_bridge_print_gc_debug_usage ();
5095 fprintf (stderr, "\n");
5097 usage_printed = TRUE;
5103 if (check_mark_bits_after_major_collection)
5104 nursery_clear_policy = CLEAR_AT_GC;
5106 if (major_collector.post_param_init)
5107 major_collector.post_param_init (&major_collector);
5109 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5111 memset (&remset, 0, sizeof (remset));
5113 sgen_card_table_init (&remset);
5119 mono_gc_get_gc_name (void)
5124 static MonoMethod *write_barrier_method;
5127 sgen_is_critical_method (MonoMethod *method)
5129 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5133 sgen_has_critical_method (void)
5135 return write_barrier_method || sgen_has_managed_allocator ();
5141 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5143 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5144 #ifdef SGEN_ALIGN_NURSERY
5145 // if (ptr_in_nursery (ptr)) return;
5147 * Masking out the bits might be faster, but we would have to use 64 bit
5148 * immediates, which might be slower.
5150 mono_mb_emit_ldarg (mb, 0);
5151 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5152 mono_mb_emit_byte (mb, CEE_SHR_UN);
5153 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5154 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5156 if (!major_collector.is_concurrent) {
5157 // if (!ptr_in_nursery (*ptr)) return;
5158 mono_mb_emit_ldarg (mb, 0);
5159 mono_mb_emit_byte (mb, CEE_LDIND_I);
5160 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5161 mono_mb_emit_byte (mb, CEE_SHR_UN);
5162 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5163 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5166 int label_continue1, label_continue2;
5167 int dereferenced_var;
5169 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5170 mono_mb_emit_ldarg (mb, 0);
5171 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5172 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5174 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5175 mono_mb_emit_ldarg (mb, 0);
5176 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5177 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5180 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5183 mono_mb_patch_branch (mb, label_continue_1);
5184 mono_mb_patch_branch (mb, label_continue_2);
5186 // Dereference and store in local var
5187 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5188 mono_mb_emit_ldarg (mb, 0);
5189 mono_mb_emit_byte (mb, CEE_LDIND_I);
5190 mono_mb_emit_stloc (mb, dereferenced_var);
5192 if (!major_collector.is_concurrent) {
5193 // if (*ptr < sgen_get_nursery_start ()) return;
5194 mono_mb_emit_ldloc (mb, dereferenced_var);
5195 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5196 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5198 // if (*ptr >= sgen_get_nursery_end ()) return;
5199 mono_mb_emit_ldloc (mb, dereferenced_var);
5200 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5201 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5208 mono_gc_get_write_barrier (void)
5211 MonoMethodBuilder *mb;
5212 MonoMethodSignature *sig;
5213 #ifdef MANAGED_WBARRIER
5214 int i, nursery_check_labels [3];
5216 #ifdef HAVE_KW_THREAD
5217 int stack_end_offset = -1;
5219 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5220 g_assert (stack_end_offset != -1);
5224 // FIXME: Maybe create a separate version for ctors (the branch would be
5225 // correctly predicted more times)
5226 if (write_barrier_method)
5227 return write_barrier_method;
5229 /* Create the IL version of mono_gc_barrier_generic_store () */
5230 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5231 sig->ret = &mono_defaults.void_class->byval_arg;
5232 sig->params [0] = &mono_defaults.int_class->byval_arg;
5234 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5237 #ifdef MANAGED_WBARRIER
5238 emit_nursery_check (mb, nursery_check_labels);
5240 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5244 LDC_PTR sgen_cardtable
5246 address >> CARD_BITS
5250 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5251 LDC_PTR card_table_mask
5258 mono_mb_emit_ptr (mb, sgen_cardtable);
5259 mono_mb_emit_ldarg (mb, 0);
5260 mono_mb_emit_icon (mb, CARD_BITS);
5261 mono_mb_emit_byte (mb, CEE_SHR_UN);
5262 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5263 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5264 mono_mb_emit_byte (mb, CEE_AND);
5266 mono_mb_emit_byte (mb, CEE_ADD);
5267 mono_mb_emit_icon (mb, 1);
5268 mono_mb_emit_byte (mb, CEE_STIND_I1);
5271 for (i = 0; i < 3; ++i) {
5272 if (nursery_check_labels [i])
5273 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5275 mono_mb_emit_byte (mb, CEE_RET);
5277 mono_mb_emit_ldarg (mb, 0);
5278 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5279 mono_mb_emit_byte (mb, CEE_RET);
5282 res = mono_mb_create_method (mb, sig, 16);
5286 if (write_barrier_method) {
5287 /* Already created */
5288 mono_free_method (res);
5290 /* double-checked locking */
5291 mono_memory_barrier ();
5292 write_barrier_method = res;
5296 return write_barrier_method;
5300 mono_gc_get_description (void)
5302 return g_strdup ("sgen");
5306 mono_gc_set_desktop_mode (void)
5311 mono_gc_is_moving (void)
5317 mono_gc_is_disabled (void)
5323 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5330 sgen_get_nursery_clear_policy (void)
5332 return nursery_clear_policy;
5336 sgen_get_array_fill_vtable (void)
5338 if (!array_fill_vtable) {
5339 static MonoClass klass;
5340 static char _vtable[sizeof(MonoVTable)+8];
5341 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
5344 MonoDomain *domain = mono_get_root_domain ();
5347 klass.element_class = mono_defaults.byte_class;
5349 klass.instance_size = sizeof (MonoArray);
5350 klass.sizes.element_size = 1;
5351 klass.name = "array_filler_type";
5353 vtable->klass = &klass;
5355 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5358 array_fill_vtable = vtable;
5360 return array_fill_vtable;
5370 sgen_gc_unlock (void)
5372 gboolean try_free = sgen_try_free_some_memory;
5373 sgen_try_free_some_memory = FALSE;
5374 mono_mutex_unlock (&gc_mutex);
5375 MONO_GC_UNLOCKED ();
5377 mono_thread_hazardous_try_free_some ();
5381 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5383 major_collector.iterate_live_block_ranges (callback);
5387 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5389 major_collector.scan_card_table (FALSE, queue);
5393 sgen_get_major_collector (void)
5395 return &major_collector;
5398 void mono_gc_set_skip_thread (gboolean skip)
5400 SgenThreadInfo *info = mono_thread_info_current ();
5403 info->gc_disabled = skip;
5408 sgen_get_remset (void)
5414 mono_gc_get_vtable_bits (MonoClass *class)
5417 /* FIXME move this to the bridge code */
5418 if (sgen_need_bridge_processing ()) {
5419 switch (sgen_bridge_class_kind (class)) {
5420 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
5421 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
5422 res = SGEN_GC_BIT_BRIDGE_OBJECT;
5424 case GC_BRIDGE_OPAQUE_CLASS:
5425 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
5429 if (fin_callbacks.is_class_finalization_aware) {
5430 if (fin_callbacks.is_class_finalization_aware (class))
5431 res |= SGEN_GC_BIT_FINALIZER_AWARE;
5437 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5444 sgen_check_whole_heap_stw (void)
5446 sgen_stop_world (0);
5447 sgen_clear_nursery_fragments ();
5448 sgen_check_whole_heap (FALSE);
5449 sgen_restart_world (0, NULL);
5453 sgen_gc_event_moves (void)
5455 if (moved_objects_idx) {
5456 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5457 moved_objects_idx = 0;
5462 sgen_timestamp (void)
5464 SGEN_TV_DECLARE (timestamp);
5465 SGEN_TV_GETTIME (timestamp);
5466 return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
5470 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
5472 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
5473 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
5475 fin_callbacks = *callbacks;
5482 #endif /* HAVE_SGEN_GC */