2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
193 #include "metadata/sgen-gc.h"
194 #include "metadata/metadata-internals.h"
195 #include "metadata/class-internals.h"
196 #include "metadata/gc-internal.h"
197 #include "metadata/object-internals.h"
198 #include "metadata/threads.h"
199 #include "metadata/sgen-cardtable.h"
200 #include "metadata/sgen-protocol.h"
201 #include "metadata/sgen-archdep.h"
202 #include "metadata/sgen-bridge.h"
203 #include "metadata/sgen-memory-governor.h"
204 #include "metadata/sgen-hash-table.h"
205 #include "metadata/mono-gc.h"
206 #include "metadata/method-builder.h"
207 #include "metadata/profiler-private.h"
208 #include "metadata/monitor.h"
209 #include "metadata/mempool-internals.h"
210 #include "metadata/marshal.h"
211 #include "metadata/runtime.h"
212 #include "metadata/sgen-cardtable.h"
213 #include "metadata/sgen-pinning.h"
214 #include "metadata/sgen-workers.h"
215 #include "metadata/sgen-layout-stats.h"
216 #include "utils/mono-mmap.h"
217 #include "utils/mono-time.h"
218 #include "utils/mono-semaphore.h"
219 #include "utils/mono-counters.h"
220 #include "utils/mono-proclib.h"
221 #include "utils/mono-memory-model.h"
222 #include "utils/mono-logger-internal.h"
223 #include "utils/dtrace.h"
225 #include <mono/utils/mono-logger-internal.h>
226 #include <mono/utils/memcheck.h>
228 #if defined(__MACH__)
229 #include "utils/mach-support.h"
232 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
236 #include "mono/cil/opcode.def"
242 #undef pthread_create
244 #undef pthread_detach
247 * ######################################################################
248 * ######## Types and constants used by the GC.
249 * ######################################################################
252 /* 0 means not initialized, 1 is initialized, -1 means in progress */
253 static int gc_initialized = 0;
254 /* If set, check if we need to do something every X allocations */
255 gboolean has_per_allocation_action;
256 /* If set, do a heap check every X allocation */
257 guint32 verify_before_allocs = 0;
258 /* If set, do a minor collection before every X allocation */
259 guint32 collect_before_allocs = 0;
260 /* If set, do a whole heap check before each collection */
261 static gboolean whole_heap_check_before_collection = FALSE;
262 /* If set, do a heap consistency check before each minor collection */
263 static gboolean consistency_check_at_minor_collection = FALSE;
264 /* If set, do a mod union consistency check before each finishing collection pause */
265 static gboolean mod_union_consistency_check = FALSE;
266 /* If set, check whether mark bits are consistent after major collections */
267 static gboolean check_mark_bits_after_major_collection = FALSE;
268 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
269 static gboolean check_nursery_objects_pinned = FALSE;
270 /* If set, do a few checks when the concurrent collector is used */
271 static gboolean do_concurrent_checks = FALSE;
272 /* If set, check that there are no references to the domain left at domain unload */
273 static gboolean xdomain_checks = FALSE;
274 /* If not null, dump the heap after each collection into this file */
275 static FILE *heap_dump_file = NULL;
276 /* If set, mark stacks conservatively, even if precise marking is possible */
277 static gboolean conservative_stack_mark = FALSE;
278 /* If set, do a plausibility check on the scan_starts before and after
280 static gboolean do_scan_starts_check = FALSE;
282 * If the major collector is concurrent and this is FALSE, we will
283 * never initiate a synchronous major collection, unless requested via
286 static gboolean allow_synchronous_major = TRUE;
287 static gboolean disable_minor_collections = FALSE;
288 static gboolean disable_major_collections = FALSE;
289 gboolean do_pin_stats = FALSE;
290 static gboolean do_verify_nursery = FALSE;
291 static gboolean do_dump_nursery_content = FALSE;
292 static gboolean enable_nursery_canaries = FALSE;
294 #ifdef HEAVY_STATISTICS
295 guint64 stat_objects_alloced_degraded = 0;
296 guint64 stat_bytes_alloced_degraded = 0;
298 guint64 stat_copy_object_called_nursery = 0;
299 guint64 stat_objects_copied_nursery = 0;
300 guint64 stat_copy_object_called_major = 0;
301 guint64 stat_objects_copied_major = 0;
303 guint64 stat_scan_object_called_nursery = 0;
304 guint64 stat_scan_object_called_major = 0;
306 guint64 stat_slots_allocated_in_vain;
308 guint64 stat_nursery_copy_object_failed_from_space = 0;
309 guint64 stat_nursery_copy_object_failed_forwarded = 0;
310 guint64 stat_nursery_copy_object_failed_pinned = 0;
311 guint64 stat_nursery_copy_object_failed_to_space = 0;
313 static int stat_wbarrier_add_to_global_remset = 0;
314 static int stat_wbarrier_set_field = 0;
315 static int stat_wbarrier_set_arrayref = 0;
316 static int stat_wbarrier_arrayref_copy = 0;
317 static int stat_wbarrier_generic_store = 0;
318 static int stat_wbarrier_generic_store_atomic = 0;
319 static int stat_wbarrier_set_root = 0;
320 static int stat_wbarrier_value_copy = 0;
321 static int stat_wbarrier_object_copy = 0;
324 static guint64 stat_pinned_objects = 0;
326 static guint64 time_minor_pre_collection_fragment_clear = 0;
327 static guint64 time_minor_pinning = 0;
328 static guint64 time_minor_scan_remsets = 0;
329 static guint64 time_minor_scan_pinned = 0;
330 static guint64 time_minor_scan_registered_roots = 0;
331 static guint64 time_minor_scan_thread_data = 0;
332 static guint64 time_minor_finish_gray_stack = 0;
333 static guint64 time_minor_fragment_creation = 0;
335 static guint64 time_major_pre_collection_fragment_clear = 0;
336 static guint64 time_major_pinning = 0;
337 static guint64 time_major_scan_pinned = 0;
338 static guint64 time_major_scan_registered_roots = 0;
339 static guint64 time_major_scan_thread_data = 0;
340 static guint64 time_major_scan_alloc_pinned = 0;
341 static guint64 time_major_scan_finalized = 0;
342 static guint64 time_major_scan_big_objects = 0;
343 static guint64 time_major_finish_gray_stack = 0;
344 static guint64 time_major_free_bigobjs = 0;
345 static guint64 time_major_los_sweep = 0;
346 static guint64 time_major_sweep = 0;
347 static guint64 time_major_fragment_creation = 0;
349 static guint64 time_max = 0;
351 static SGEN_TV_DECLARE (time_major_conc_collection_start);
352 static SGEN_TV_DECLARE (time_major_conc_collection_end);
354 static SGEN_TV_DECLARE (last_minor_collection_start_tv);
355 static SGEN_TV_DECLARE (last_minor_collection_end_tv);
357 int gc_debug_level = 0;
360 static MonoGCFinalizerCallbacks fin_callbacks;
364 mono_gc_flush_info (void)
366 fflush (gc_debug_file);
370 #define TV_DECLARE SGEN_TV_DECLARE
371 #define TV_GETTIME SGEN_TV_GETTIME
372 #define TV_ELAPSED SGEN_TV_ELAPSED
374 SGEN_TV_DECLARE (sgen_init_timestamp);
376 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
378 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
380 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
381 #define object_is_pinned SGEN_OBJECT_IS_PINNED
382 #define pin_object SGEN_PIN_OBJECT
384 #define ptr_in_nursery sgen_ptr_in_nursery
386 #define LOAD_VTABLE SGEN_LOAD_VTABLE
389 safe_name (void* obj)
391 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
392 return vt->klass->name;
396 nursery_canaries_enabled (void)
398 return enable_nursery_canaries;
401 #define safe_object_get_size sgen_safe_object_get_size
404 sgen_safe_name (void* obj)
406 return safe_name (obj);
410 * ######################################################################
411 * ######## Global data.
412 * ######################################################################
414 LOCK_DECLARE (gc_mutex);
415 gboolean sgen_try_free_some_memory;
417 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
419 static mword pagesize = 4096;
420 size_t degraded_mode = 0;
422 static mword bytes_pinned_from_failed_allocation = 0;
424 GCMemSection *nursery_section = NULL;
425 static volatile mword lowest_heap_address = ~(mword)0;
426 static volatile mword highest_heap_address = 0;
428 LOCK_DECLARE (sgen_interruption_mutex);
430 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
431 struct _FinalizeReadyEntry {
432 FinalizeReadyEntry *next;
436 typedef struct _EphemeronLinkNode EphemeronLinkNode;
438 struct _EphemeronLinkNode {
439 EphemeronLinkNode *next;
448 int current_collection_generation = -1;
449 volatile gboolean concurrent_collection_in_progress = FALSE;
451 /* objects that are ready to be finalized */
452 static FinalizeReadyEntry *fin_ready_list = NULL;
453 static FinalizeReadyEntry *critical_fin_list = NULL;
455 static EphemeronLinkNode *ephemeron_list;
457 /* registered roots: the key to the hash is the root start address */
459 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
461 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
462 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
463 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
464 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
466 static mword roots_size = 0; /* amount of memory in the root set */
468 #define GC_ROOT_NUM 32
470 int count; /* must be the first field */
471 void *objects [GC_ROOT_NUM];
472 int root_types [GC_ROOT_NUM];
473 uintptr_t extra_info [GC_ROOT_NUM];
477 notify_gc_roots (GCRootReport *report)
481 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
486 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
488 if (report->count == GC_ROOT_NUM)
489 notify_gc_roots (report);
490 report->objects [report->count] = object;
491 report->root_types [report->count] = rtype;
492 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
495 MonoNativeTlsKey thread_info_key;
497 #ifdef HAVE_KW_THREAD
498 __thread SgenThreadInfo *sgen_thread_info;
499 __thread char *stack_end;
502 /* The size of a TLAB */
503 /* The bigger the value, the less often we have to go to the slow path to allocate a new
504 * one, but the more space is wasted by threads not allocating much memory.
506 * FIXME: Make this self-tuning for each thread.
508 guint32 tlab_size = (1024 * 4);
510 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
512 /* Functions supplied by the runtime to be called by the GC */
513 static MonoGCCallbacks gc_callbacks;
515 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
517 #define ALIGN_UP SGEN_ALIGN_UP
519 #define MOVED_OBJECTS_NUM 64
520 static void *moved_objects [MOVED_OBJECTS_NUM];
521 static int moved_objects_idx = 0;
523 /* Vtable of the objects used to fill out nursery fragments before a collection */
524 static MonoVTable *array_fill_vtable;
526 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
527 MonoNativeThreadId main_gc_thread = NULL;
530 /*Object was pinned during the current collection*/
531 static mword objects_pinned;
534 * ######################################################################
535 * ######## Macros and function declarations.
536 * ######################################################################
540 align_pointer (void *ptr)
542 mword p = (mword)ptr;
543 p += sizeof (gpointer) - 1;
544 p &= ~ (sizeof (gpointer) - 1);
548 typedef SgenGrayQueue GrayQueue;
550 /* forward declarations */
551 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
552 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
553 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
554 static void report_finalizer_roots (void);
555 static void report_registered_roots (void);
557 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
558 static void finish_gray_stack (int generation, GrayQueue *queue);
560 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
563 static void init_stats (void);
565 static int mark_ephemerons_in_range (ScanCopyContext ctx);
566 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
567 static void null_ephemerons_for_domain (MonoDomain *domain);
569 SgenObjectOperations current_object_ops;
570 SgenMajorCollector major_collector;
571 SgenMinorCollector sgen_minor_collector;
572 static GrayQueue gray_queue;
574 static SgenRemeberedSet remset;
576 /* The gray queue to use from the main collection thread. */
577 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
580 * The gray queue a worker job must use. If we're not parallel or
581 * concurrent, we use the main gray queue.
583 static SgenGrayQueue*
584 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
586 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
590 gray_queue_redirect (SgenGrayQueue *queue)
592 gboolean wake = FALSE;
595 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
598 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
603 g_assert (concurrent_collection_in_progress);
604 if (sgen_workers_have_started ()) {
605 sgen_workers_ensure_awake ();
607 if (concurrent_collection_in_progress)
608 g_assert (current_collection_generation == -1);
614 gray_queue_enable_redirect (SgenGrayQueue *queue)
616 if (!concurrent_collection_in_progress)
619 sgen_gray_queue_set_alloc_prepare (queue, gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
620 gray_queue_redirect (queue);
624 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
626 while (start < end) {
630 if (!*(void**)start) {
631 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
636 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
642 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable) {
643 CHECK_CANARY_FOR_OBJECT (obj);
644 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
645 callback (obj, size, data);
646 CANARIFY_SIZE (size);
648 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
656 need_remove_object_for_domain (char *start, MonoDomain *domain)
658 if (mono_object_domain (start) == domain) {
659 SGEN_LOG (4, "Need to cleanup object %p", start);
660 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
667 process_object_for_domain_clearing (char *start, MonoDomain *domain)
669 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
670 if (vt->klass == mono_defaults.internal_thread_class)
671 g_assert (mono_object_domain (start) == mono_get_root_domain ());
672 /* The object could be a proxy for an object in the domain
674 #ifndef DISABLE_REMOTING
675 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
676 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
678 /* The server could already have been zeroed out, so
679 we need to check for that, too. */
680 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
681 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
682 ((MonoRealProxy*)start)->unwrapped_server = NULL;
689 clear_domain_process_object (char *obj, MonoDomain *domain)
693 process_object_for_domain_clearing (obj, domain);
694 remove = need_remove_object_for_domain (obj, domain);
696 if (remove && ((MonoObject*)obj)->synchronisation) {
697 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
699 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
706 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
708 if (clear_domain_process_object (obj, domain)) {
709 CANARIFY_SIZE (size);
710 memset (obj, 0, size);
715 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
717 clear_domain_process_object (obj, domain);
721 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
723 if (need_remove_object_for_domain (obj, domain))
724 major_collector.free_non_pinned_object (obj, size);
728 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
730 if (need_remove_object_for_domain (obj, domain))
731 major_collector.free_pinned_object (obj, size);
735 * When appdomains are unloaded we can easily remove objects that have finalizers,
736 * but all the others could still be present in random places on the heap.
737 * We need a sweep to get rid of them even though it's going to be costly
739 * The reason we need to remove them is because we access the vtable and class
740 * structures to know the object size and the reference bitmap: once the domain is
741 * unloaded the point to random memory.
744 mono_gc_clear_domain (MonoDomain * domain)
746 LOSObject *bigobj, *prev;
751 binary_protocol_domain_unload_begin (domain);
755 if (concurrent_collection_in_progress)
756 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
757 g_assert (!concurrent_collection_in_progress);
759 sgen_process_fin_stage_entries ();
760 sgen_process_dislink_stage_entries ();
762 sgen_clear_nursery_fragments ();
764 if (xdomain_checks && domain != mono_get_root_domain ()) {
765 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
766 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
767 sgen_check_for_xdomain_refs ();
770 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
771 to memory returned to the OS.*/
772 null_ephemerons_for_domain (domain);
774 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
775 sgen_null_links_for_domain (domain, i);
777 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
778 sgen_remove_finalizers_for_domain (domain, i);
780 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
781 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
783 /* We need two passes over major and large objects because
784 freeing such objects might give their memory back to the OS
785 (in the case of large objects) or obliterate its vtable
786 (pinned objects with major-copying or pinned and non-pinned
787 objects with major-mark&sweep), but we might need to
788 dereference a pointer from an object to another object if
789 the first object is a proxy. */
790 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
791 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
792 clear_domain_process_object (bigobj->data, domain);
795 for (bigobj = los_object_list; bigobj;) {
796 if (need_remove_object_for_domain (bigobj->data, domain)) {
797 LOSObject *to_free = bigobj;
799 prev->next = bigobj->next;
801 los_object_list = bigobj->next;
802 bigobj = bigobj->next;
803 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
804 sgen_los_free_object (to_free);
808 bigobj = bigobj->next;
810 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
811 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
813 if (domain == mono_get_root_domain ()) {
814 if (G_UNLIKELY (do_pin_stats))
815 sgen_pin_stats_print_class_stats ();
816 sgen_object_layout_dump (stdout);
819 sgen_restart_world (0, NULL);
821 binary_protocol_domain_unload_end (domain);
822 binary_protocol_flush_buffers (FALSE);
828 * sgen_add_to_global_remset:
830 * The global remset contains locations which point into newspace after
831 * a minor collection. This can happen if the objects they point to are pinned.
833 * LOCKING: If called from a parallel collector, the global remset
834 * lock must be held. For serial collectors that is not necessary.
837 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
839 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
841 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
843 if (!major_collector.is_concurrent) {
844 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
846 if (current_collection_generation == -1)
847 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
850 if (!object_is_pinned (obj))
851 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
852 else if (sgen_cement_lookup_or_register (obj))
855 remset.record_pointer (ptr);
857 if (G_UNLIKELY (do_pin_stats))
858 sgen_pin_stats_register_global_remset (obj);
860 SGEN_LOG (8, "Adding global remset for %p", ptr);
861 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
865 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
866 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
867 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
868 vt->klass->name_space, vt->klass->name);
874 * sgen_drain_gray_stack:
876 * Scan objects in the gray stack until the stack is empty. This should be called
877 * frequently after each object is copied, to achieve better locality and cache
880 * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
884 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
886 ScanObjectFunc scan_func = ctx.scan_func;
887 GrayQueue *queue = ctx.queue;
889 if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
890 return major_collector.drain_gray_stack (ctx);
894 for (i = 0; i != max_objs; ++i) {
897 GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
900 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
901 scan_func (obj, desc, queue);
903 } while (max_objs < 0);
908 * Addresses in the pin queue are already sorted. This function finds
909 * the object header for each address and pins the object. The
910 * addresses must be inside the nursery section. The (start of the)
911 * address array is overwritten with the addresses of the actually
912 * pinned objects. Return the number of pinned objects.
915 pin_objects_from_nursery_pin_queue (ScanCopyContext ctx)
917 GCMemSection *section = nursery_section;
918 void **start = sgen_pinning_get_entry (section->pin_queue_first_entry);
919 void **end = sgen_pinning_get_entry (section->pin_queue_last_entry);
920 void *start_nursery = section->data;
921 void *end_nursery = section->next_data;
926 void *pinning_front = start_nursery;
928 void **definitely_pinned = start;
929 ScanObjectFunc scan_func = ctx.scan_func;
930 SgenGrayQueue *queue = ctx.queue;
932 sgen_nursery_allocator_prepare_for_pinning ();
934 while (start < end) {
935 void *obj_to_pin = NULL;
936 size_t obj_to_pin_size = 0;
941 SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
942 SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
949 SGEN_LOG (5, "Considering pinning addr %p", addr);
950 /* We've already processed everything up to pinning_front. */
951 if (addr < pinning_front) {
957 * Find the closest scan start <= addr. We might search backward in the
958 * scan_starts array because entries might be NULL. In the worst case we
959 * start at start_nursery.
961 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
962 SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
963 search_start = (void*)section->scan_starts [idx];
964 if (!search_start || search_start > addr) {
967 search_start = section->scan_starts [idx];
968 if (search_start && search_start <= addr)
971 if (!search_start || search_start > addr)
972 search_start = start_nursery;
976 * If the pinning front is closer than the scan start we found, start
977 * searching at the front.
979 if (search_start < pinning_front)
980 search_start = pinning_front;
983 * Now addr should be in an object a short distance from search_start.
985 * search_start must point to zeroed mem or point to an object.
988 size_t obj_size, canarified_obj_size;
991 if (!*(void**)search_start) {
992 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
993 /* The loop condition makes sure we don't overrun addr. */
997 canarified_obj_size = obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1000 * Filler arrays are marked by an invalid sync word. We don't
1001 * consider them for pinning. They are not delimited by canaries,
1004 if (((MonoObject*)search_start)->synchronisation != GINT_TO_POINTER (-1)) {
1005 CHECK_CANARY_FOR_OBJECT (search_start);
1006 CANARIFY_SIZE (canarified_obj_size);
1008 if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
1009 /* This is the object we're looking for. */
1010 obj_to_pin = search_start;
1011 obj_to_pin_size = canarified_obj_size;
1016 /* Skip to the next object */
1017 search_start = (void*)((char*)search_start + canarified_obj_size);
1018 } while (search_start <= addr);
1020 /* We've searched past the address we were looking for. */
1022 pinning_front = search_start;
1023 goto next_pin_queue_entry;
1027 * We've found an object to pin. It might still be a dummy array, but we
1028 * can advance the pinning front in any case.
1030 pinning_front = (char*)obj_to_pin + obj_to_pin_size;
1033 * If this is a dummy array marking the beginning of a nursery
1034 * fragment, we don't pin it.
1036 if (((MonoObject*)obj_to_pin)->synchronisation == GINT_TO_POINTER (-1))
1037 goto next_pin_queue_entry;
1040 * Finally - pin the object!
1042 desc = sgen_obj_get_descriptor_safe (obj_to_pin);
1044 scan_func (obj_to_pin, desc, queue);
1046 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1047 obj_to_pin, *(void**)obj_to_pin, safe_name (obj_to_pin), count);
1048 binary_protocol_pin (obj_to_pin,
1049 (gpointer)LOAD_VTABLE (obj_to_pin),
1050 safe_object_get_size (obj_to_pin));
1052 #ifdef ENABLE_DTRACE
1053 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1054 int gen = sgen_ptr_in_nursery (obj_to_pin) ? GENERATION_NURSERY : GENERATION_OLD;
1055 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj_to_pin);
1056 MONO_GC_OBJ_PINNED ((mword)obj_to_pin,
1057 sgen_safe_object_get_size (obj_to_pin),
1058 vt->klass->name_space, vt->klass->name, gen);
1062 pin_object (obj_to_pin);
1063 GRAY_OBJECT_ENQUEUE (queue, obj_to_pin, desc);
1064 if (G_UNLIKELY (do_pin_stats))
1065 sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
1066 definitely_pinned [count] = obj_to_pin;
1070 next_pin_queue_entry:
1074 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1075 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1076 GCRootReport report;
1078 for (idx = 0; idx < count; ++idx)
1079 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1080 notify_gc_roots (&report);
1082 stat_pinned_objects += count;
1087 pin_objects_in_nursery (ScanCopyContext ctx)
1091 if (nursery_section->pin_queue_first_entry == nursery_section->pin_queue_last_entry)
1094 reduced_to = pin_objects_from_nursery_pin_queue (ctx);
1095 nursery_section->pin_queue_last_entry = nursery_section->pin_queue_first_entry + reduced_to;
1100 sgen_pin_object (void *object, GrayQueue *queue)
1102 SGEN_PIN_OBJECT (object);
1103 sgen_pin_stage_ptr (object);
1105 if (G_UNLIKELY (do_pin_stats))
1106 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1108 GRAY_OBJECT_ENQUEUE (queue, object, sgen_obj_get_descriptor_safe (object));
1109 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1111 #ifdef ENABLE_DTRACE
1112 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1113 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1114 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1115 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1121 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1125 gboolean major_pinned = FALSE;
1127 if (sgen_ptr_in_nursery (obj)) {
1128 if (SGEN_CAS_PTR (obj, SGEN_POINTER_TAG_PINNED (vt), vt) == vt) {
1129 sgen_pin_object (obj, queue);
1133 major_collector.pin_major_object (obj, queue);
1134 major_pinned = TRUE;
1137 vtable_word = *(mword*)obj;
1138 /*someone else forwarded it, update the pointer and bail out*/
1139 if (SGEN_POINTER_IS_TAGGED_FORWARDED (vtable_word)) {
1140 *ptr = SGEN_POINTER_UNTAG_VTABLE (vtable_word);
1144 /*someone pinned it, nothing to do.*/
1145 if (SGEN_POINTER_IS_TAGGED_PINNED (vtable_word) || major_pinned)
1150 /* Sort the addresses in array in increasing order.
1151 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1154 sgen_sort_addresses (void **array, size_t size)
1159 for (i = 1; i < size; ++i) {
1162 size_t parent = (child - 1) / 2;
1164 if (array [parent] >= array [child])
1167 tmp = array [parent];
1168 array [parent] = array [child];
1169 array [child] = tmp;
1175 for (i = size - 1; i > 0; --i) {
1178 array [i] = array [0];
1184 while (root * 2 + 1 <= end) {
1185 size_t child = root * 2 + 1;
1187 if (child < end && array [child] < array [child + 1])
1189 if (array [root] >= array [child])
1193 array [root] = array [child];
1194 array [child] = tmp;
1202 * Scan the memory between start and end and queue values which could be pointers
1203 * to the area between start_nursery and end_nursery for later consideration.
1204 * Typically used for thread stacks.
1207 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1211 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1212 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1215 while (start < end) {
1216 if (*start >= start_nursery && *start < end_nursery) {
1218 * *start can point to the middle of an object
1219 * note: should we handle pointing at the end of an object?
1220 * pinning in C# code disallows pointing at the end of an object
1221 * but there is some small chance that an optimizing C compiler
1222 * may keep the only reference to an object by pointing
1223 * at the end of it. We ignore this small chance for now.
1224 * Pointers to the end of an object are indistinguishable
1225 * from pointers to the start of the next object in memory
1226 * so if we allow that we'd need to pin two objects...
1227 * We queue the pointer in an array, the
1228 * array will then be sorted and uniqued. This way
1229 * we can coalesce several pinning pointers and it should
1230 * be faster since we'd do a memory scan with increasing
1231 * addresses. Note: we can align the address to the allocation
1232 * alignment, so the unique process is more effective.
1234 mword addr = (mword)*start;
1235 addr &= ~(ALLOC_ALIGN - 1);
1236 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1237 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1238 sgen_pin_stage_ptr ((void*)addr);
1239 binary_protocol_pin_stage (start, (void*)addr);
1242 if (G_UNLIKELY (do_pin_stats)) {
1243 if (ptr_in_nursery ((void*)addr))
1244 sgen_pin_stats_register_address ((char*)addr, pin_type);
1250 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1254 * The first thing we do in a collection is to identify pinned objects.
1255 * This function considers all the areas of memory that need to be
1256 * conservatively scanned.
1259 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1263 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1264 /* objects pinned from the API are inside these roots */
1265 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1266 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1267 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1268 } SGEN_HASH_TABLE_FOREACH_END;
1269 /* now deal with the thread stacks
1270 * in the future we should be able to conservatively scan only:
1271 * *) the cpu registers
1272 * *) the unmanaged stack frames
1273 * *) the _last_ managed stack frame
1274 * *) pointers slots in managed frames
1276 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1280 unpin_objects_from_queue (SgenGrayQueue *queue)
1285 GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
1288 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1289 SGEN_UNPIN_OBJECT (addr);
1294 CopyOrMarkObjectFunc func;
1296 } UserCopyOrMarkData;
1299 single_arg_user_copy_or_mark (void **obj, void *gc_data)
1301 UserCopyOrMarkData *data = gc_data;
1303 data->func (obj, data->queue);
1307 * The memory area from start_root to end_root contains pointers to objects.
1308 * Their position is precisely described by @desc (this means that the pointer
1309 * can be either NULL or the pointer to the start of an object).
1310 * This functions copies them to to_space updates them.
1312 * This function is not thread-safe!
1315 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1317 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1318 SgenGrayQueue *queue = ctx.queue;
1320 switch (desc & ROOT_DESC_TYPE_MASK) {
1321 case ROOT_DESC_BITMAP:
1322 desc >>= ROOT_DESC_TYPE_SHIFT;
1324 if ((desc & 1) && *start_root) {
1325 copy_func (start_root, queue);
1326 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1332 case ROOT_DESC_COMPLEX: {
1333 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1334 gsize bwords = (*bitmap_data) - 1;
1335 void **start_run = start_root;
1337 while (bwords-- > 0) {
1338 gsize bmap = *bitmap_data++;
1339 void **objptr = start_run;
1341 if ((bmap & 1) && *objptr) {
1342 copy_func (objptr, queue);
1343 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1348 start_run += GC_BITS_PER_WORD;
1352 case ROOT_DESC_USER: {
1353 UserCopyOrMarkData data = { copy_func, queue };
1354 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1355 marker (start_root, single_arg_user_copy_or_mark, &data);
1358 case ROOT_DESC_RUN_LEN:
1359 g_assert_not_reached ();
1361 g_assert_not_reached ();
1366 reset_heap_boundaries (void)
1368 lowest_heap_address = ~(mword)0;
1369 highest_heap_address = 0;
1373 sgen_update_heap_boundaries (mword low, mword high)
1378 old = lowest_heap_address;
1381 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1384 old = highest_heap_address;
1387 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1391 * Allocate and setup the data structures needed to be able to allocate objects
1392 * in the nursery. The nursery is stored in nursery_section.
1395 alloc_nursery (void)
1397 GCMemSection *section;
1402 if (nursery_section)
1404 SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
1405 /* later we will alloc a larger area for the nursery but only activate
1406 * what we need. The rest will be used as expansion if we have too many pinned
1407 * objects in the existing nursery.
1409 /* FIXME: handle OOM */
1410 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1412 alloc_size = sgen_nursery_size;
1414 /* If there isn't enough space even for the nursery we should simply abort. */
1415 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1417 #ifdef SGEN_ALIGN_NURSERY
1418 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1420 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1422 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1423 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1424 section->data = section->next_data = data;
1425 section->size = alloc_size;
1426 section->end_data = data + sgen_nursery_size;
1427 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1428 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1429 section->num_scan_start = scan_starts;
1431 nursery_section = section;
1433 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1437 mono_gc_get_nursery (int *shift_bits, size_t *size)
1439 *size = sgen_nursery_size;
1440 #ifdef SGEN_ALIGN_NURSERY
1441 *shift_bits = DEFAULT_NURSERY_BITS;
1445 return sgen_get_nursery_start ();
1449 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1451 SgenThreadInfo *info = mono_thread_info_current ();
1453 /* Could be called from sgen_thread_unregister () with a NULL info */
1456 info->stopped_domain = domain;
1461 mono_gc_precise_stack_mark_enabled (void)
1463 return !conservative_stack_mark;
1467 mono_gc_get_logfile (void)
1469 return gc_debug_file;
1473 report_finalizer_roots_list (FinalizeReadyEntry *list)
1475 GCRootReport report;
1476 FinalizeReadyEntry *fin;
1479 for (fin = list; fin; fin = fin->next) {
1482 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1484 notify_gc_roots (&report);
1488 report_finalizer_roots (void)
1490 report_finalizer_roots_list (fin_ready_list);
1491 report_finalizer_roots_list (critical_fin_list);
1494 static GCRootReport *root_report;
1497 single_arg_report_root (void **obj, void *gc_data)
1500 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1504 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1506 switch (desc & ROOT_DESC_TYPE_MASK) {
1507 case ROOT_DESC_BITMAP:
1508 desc >>= ROOT_DESC_TYPE_SHIFT;
1510 if ((desc & 1) && *start_root) {
1511 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1517 case ROOT_DESC_COMPLEX: {
1518 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1519 gsize bwords = (*bitmap_data) - 1;
1520 void **start_run = start_root;
1522 while (bwords-- > 0) {
1523 gsize bmap = *bitmap_data++;
1524 void **objptr = start_run;
1526 if ((bmap & 1) && *objptr) {
1527 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1532 start_run += GC_BITS_PER_WORD;
1536 case ROOT_DESC_USER: {
1537 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1538 root_report = report;
1539 marker (start_root, single_arg_report_root, NULL);
1542 case ROOT_DESC_RUN_LEN:
1543 g_assert_not_reached ();
1545 g_assert_not_reached ();
1550 report_registered_roots_by_type (int root_type)
1552 GCRootReport report;
1556 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1557 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1558 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1559 } SGEN_HASH_TABLE_FOREACH_END;
1560 notify_gc_roots (&report);
1564 report_registered_roots (void)
1566 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1567 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1571 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1573 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1574 SgenGrayQueue *queue = ctx.queue;
1575 FinalizeReadyEntry *fin;
1577 for (fin = list; fin; fin = fin->next) {
1580 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1581 copy_func (&fin->object, queue);
1586 generation_name (int generation)
1588 switch (generation) {
1589 case GENERATION_NURSERY: return "nursery";
1590 case GENERATION_OLD: return "old";
1591 default: g_assert_not_reached ();
1596 sgen_generation_name (int generation)
1598 return generation_name (generation);
1601 SgenObjectOperations *
1602 sgen_get_current_object_ops (void){
1603 return ¤t_object_ops;
1608 finish_gray_stack (int generation, GrayQueue *queue)
1612 int done_with_ephemerons, ephemeron_rounds = 0;
1613 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1614 ScanObjectFunc scan_func = current_object_ops.scan_object;
1615 ScanCopyContext ctx = { scan_func, copy_func, queue };
1616 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1617 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1620 * We copied all the reachable objects. Now it's the time to copy
1621 * the objects that were not referenced by the roots, but by the copied objects.
1622 * we built a stack of objects pointed to by gray_start: they are
1623 * additional roots and we may add more items as we go.
1624 * We loop until gray_start == gray_objects which means no more objects have
1625 * been added. Note this is iterative: no recursion is involved.
1626 * We need to walk the LO list as well in search of marked big objects
1627 * (use a flag since this is needed only on major collections). We need to loop
1628 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1629 * To achieve better cache locality and cache usage, we drain the gray stack
1630 * frequently, after each object is copied, and just finish the work here.
1632 sgen_drain_gray_stack (-1, ctx);
1634 SGEN_LOG (2, "%s generation done", generation_name (generation));
1637 Reset bridge data, we might have lingering data from a previous collection if this is a major
1638 collection trigged by minor overflow.
1640 We must reset the gathered bridges since their original block might be evacuated due to major
1641 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1643 if (sgen_need_bridge_processing ())
1644 sgen_bridge_reset_data ();
1647 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1648 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1649 * objects that are in fact reachable.
1651 done_with_ephemerons = 0;
1653 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1654 sgen_drain_gray_stack (-1, ctx);
1656 } while (!done_with_ephemerons);
1658 sgen_mark_togglerefs (start_addr, end_addr, ctx);
1660 if (sgen_need_bridge_processing ()) {
1661 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1662 sgen_drain_gray_stack (-1, ctx);
1663 sgen_collect_bridge_objects (generation, ctx);
1664 if (generation == GENERATION_OLD)
1665 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1668 Do the first bridge step here, as the collector liveness state will become useless after that.
1670 An important optimization is to only proccess the possibly dead part of the object graph and skip
1671 over all live objects as we transitively know everything they point must be alive too.
1673 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1675 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1676 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1679 sgen_bridge_processing_stw_step ();
1683 Make sure we drain the gray stack before processing disappearing links and finalizers.
1684 If we don't make sure it is empty we might wrongly see a live object as dead.
1686 sgen_drain_gray_stack (-1, ctx);
1689 We must clear weak links that don't track resurrection before processing object ready for
1690 finalization so they can be cleared before that.
1692 sgen_null_link_in_range (generation, TRUE, ctx);
1693 if (generation == GENERATION_OLD)
1694 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1697 /* walk the finalization queue and move also the objects that need to be
1698 * finalized: use the finalized objects as new roots so the objects they depend
1699 * on are also not reclaimed. As with the roots above, only objects in the nursery
1700 * are marked/copied.
1702 sgen_finalize_in_range (generation, ctx);
1703 if (generation == GENERATION_OLD)
1704 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1705 /* drain the new stack that might have been created */
1706 SGEN_LOG (6, "Precise scan of gray area post fin");
1707 sgen_drain_gray_stack (-1, ctx);
1710 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1712 done_with_ephemerons = 0;
1714 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1715 sgen_drain_gray_stack (-1, ctx);
1717 } while (!done_with_ephemerons);
1720 * Clear ephemeron pairs with unreachable keys.
1721 * We pass the copy func so we can figure out if an array was promoted or not.
1723 clear_unreachable_ephemerons (ctx);
1726 * We clear togglerefs only after all possible chances of revival are done.
1727 * This is semantically more inline with what users expect and it allows for
1728 * user finalizers to correctly interact with TR objects.
1730 sgen_clear_togglerefs (start_addr, end_addr, ctx);
1733 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1736 * handle disappearing links
1737 * Note we do this after checking the finalization queue because if an object
1738 * survives (at least long enough to be finalized) we don't clear the link.
1739 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1740 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1743 g_assert (sgen_gray_object_queue_is_empty (queue));
1745 sgen_null_link_in_range (generation, FALSE, ctx);
1746 if (generation == GENERATION_OLD)
1747 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1748 if (sgen_gray_object_queue_is_empty (queue))
1750 sgen_drain_gray_stack (-1, ctx);
1753 g_assert (sgen_gray_object_queue_is_empty (queue));
1755 sgen_gray_object_queue_trim_free_list (queue);
1759 sgen_check_section_scan_starts (GCMemSection *section)
1762 for (i = 0; i < section->num_scan_start; ++i) {
1763 if (section->scan_starts [i]) {
1764 mword size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1765 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1771 check_scan_starts (void)
1773 if (!do_scan_starts_check)
1775 sgen_check_section_scan_starts (nursery_section);
1776 major_collector.check_scan_starts ();
1780 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
1784 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1785 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1786 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
1787 } SGEN_HASH_TABLE_FOREACH_END;
1791 sgen_dump_occupied (char *start, char *end, char *section_start)
1793 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1797 sgen_dump_section (GCMemSection *section, const char *type)
1799 char *start = section->data;
1800 char *end = section->data + section->size;
1801 char *occ_start = NULL;
1803 char *old_start = NULL; /* just for debugging */
1805 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1807 while (start < end) {
1811 if (!*(void**)start) {
1813 sgen_dump_occupied (occ_start, start, section->data);
1816 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1819 g_assert (start < section->next_data);
1824 vt = (GCVTable*)LOAD_VTABLE (start);
1827 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1830 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1831 start - section->data,
1832 vt->klass->name_space, vt->klass->name,
1840 sgen_dump_occupied (occ_start, start, section->data);
1842 fprintf (heap_dump_file, "</section>\n");
1846 dump_object (MonoObject *obj, gboolean dump_location)
1848 static char class_name [1024];
1850 MonoClass *class = mono_object_class (obj);
1854 * Python's XML parser is too stupid to parse angle brackets
1855 * in strings, so we just ignore them;
1858 while (class->name [i] && j < sizeof (class_name) - 1) {
1859 if (!strchr ("<>\"", class->name [i]))
1860 class_name [j++] = class->name [i];
1863 g_assert (j < sizeof (class_name));
1866 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
1867 class->name_space, class_name,
1868 safe_object_get_size (obj));
1869 if (dump_location) {
1870 const char *location;
1871 if (ptr_in_nursery (obj))
1872 location = "nursery";
1873 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
1877 fprintf (heap_dump_file, " location=\"%s\"", location);
1879 fprintf (heap_dump_file, "/>\n");
1883 dump_heap (const char *type, int num, const char *reason)
1888 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
1890 fprintf (heap_dump_file, " reason=\"%s\"", reason);
1891 fprintf (heap_dump_file, ">\n");
1892 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
1893 sgen_dump_internal_mem_usage (heap_dump_file);
1894 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
1895 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
1896 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
1898 fprintf (heap_dump_file, "<pinned-objects>\n");
1899 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
1900 dump_object (list->obj, TRUE);
1901 fprintf (heap_dump_file, "</pinned-objects>\n");
1903 sgen_dump_section (nursery_section, "nursery");
1905 major_collector.dump_heap (heap_dump_file);
1907 fprintf (heap_dump_file, "<los>\n");
1908 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1909 dump_object ((MonoObject*)bigobj->data, FALSE);
1910 fprintf (heap_dump_file, "</los>\n");
1912 fprintf (heap_dump_file, "</collection>\n");
1916 sgen_register_moved_object (void *obj, void *destination)
1918 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1920 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1921 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1922 moved_objects_idx = 0;
1924 moved_objects [moved_objects_idx++] = obj;
1925 moved_objects [moved_objects_idx++] = destination;
1931 static gboolean inited = FALSE;
1936 mono_counters_register ("Collection max time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME | MONO_COUNTER_MONOTONIC, &time_max);
1938 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
1939 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pinning);
1940 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
1941 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
1942 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_registered_roots);
1943 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_thread_data);
1944 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_finish_gray_stack);
1945 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
1947 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
1948 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
1949 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
1950 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_registered_roots);
1951 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_thread_data);
1952 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_alloc_pinned);
1953 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_finalized);
1954 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_big_objects);
1955 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
1956 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
1957 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
1958 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_sweep);
1959 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
1961 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_pinned_objects);
1963 #ifdef HEAVY_STATISTICS
1964 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
1965 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
1966 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
1967 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
1968 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
1969 mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
1970 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
1971 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
1972 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
1974 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced_degraded);
1975 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_degraded);
1977 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_nursery);
1978 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_nursery);
1979 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_major);
1980 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_major);
1982 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_nursery);
1983 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_major);
1985 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_slots_allocated_in_vain);
1987 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_from_space);
1988 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_forwarded);
1989 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_pinned);
1990 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_to_space);
1992 sgen_nursery_allocator_init_heavy_stats ();
1993 sgen_alloc_init_heavy_stats ();
2001 reset_pinned_from_failed_allocation (void)
2003 bytes_pinned_from_failed_allocation = 0;
2007 sgen_set_pinned_from_failed_allocation (mword objsize)
2009 bytes_pinned_from_failed_allocation += objsize;
2013 sgen_collection_is_concurrent (void)
2015 switch (current_collection_generation) {
2016 case GENERATION_NURSERY:
2018 case GENERATION_OLD:
2019 return concurrent_collection_in_progress;
2021 g_error ("Invalid current generation %d", current_collection_generation);
2026 sgen_concurrent_collection_in_progress (void)
2028 return concurrent_collection_in_progress;
2035 } FinishRememberedSetScanJobData;
2038 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2040 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2042 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2043 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2048 CopyOrMarkObjectFunc copy_or_mark_func;
2049 ScanObjectFunc scan_func;
2053 } ScanFromRegisteredRootsJobData;
2056 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2058 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2059 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2060 sgen_workers_get_job_gray_queue (worker_data) };
2062 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2063 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2070 } ScanThreadDataJobData;
2073 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2075 ScanThreadDataJobData *job_data = job_data_untyped;
2077 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2078 sgen_workers_get_job_gray_queue (worker_data));
2079 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2083 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2085 FinalizeReadyEntry *list = job_data_untyped;
2086 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2088 scan_finalizer_entries (list, ctx);
2092 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2094 g_assert (concurrent_collection_in_progress);
2095 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2099 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2101 g_assert (concurrent_collection_in_progress);
2102 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2106 verify_scan_starts (char *start, char *end)
2110 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2111 char *addr = nursery_section->scan_starts [i];
2112 if (addr > start && addr < end)
2113 SGEN_LOG (1, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
2118 verify_nursery (void)
2120 char *start, *end, *cur, *hole_start;
2122 if (!do_verify_nursery)
2125 if (nursery_canaries_enabled ())
2126 SGEN_LOG (1, "Checking nursery canaries...");
2128 /*This cleans up unused fragments */
2129 sgen_nursery_allocator_prepare_for_pinning ();
2131 hole_start = start = cur = sgen_get_nursery_start ();
2132 end = sgen_get_nursery_end ();
2137 if (!*(void**)cur) {
2138 cur += sizeof (void*);
2142 if (object_is_forwarded (cur))
2143 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2144 else if (object_is_pinned (cur))
2145 SGEN_LOG (1, "PINNED OBJ %p", cur);
2147 ss = safe_object_get_size ((MonoObject*)cur);
2148 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2149 verify_scan_starts (cur, cur + size);
2150 if (do_dump_nursery_content) {
2151 if (cur > hole_start)
2152 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2153 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2155 if (nursery_canaries_enabled () && (MonoVTable*)SGEN_LOAD_VTABLE (cur) != array_fill_vtable) {
2156 CHECK_CANARY_FOR_OBJECT (cur);
2157 CANARIFY_SIZE (size);
2165 * Checks that no objects in the nursery are fowarded or pinned. This
2166 * is a precondition to restarting the mutator while doing a
2167 * concurrent collection. Note that we don't clear fragments because
2168 * we depend on that having happened earlier.
2171 check_nursery_is_clean (void)
2173 char *start, *end, *cur;
2175 start = cur = sgen_get_nursery_start ();
2176 end = sgen_get_nursery_end ();
2181 if (!*(void**)cur) {
2182 cur += sizeof (void*);
2186 g_assert (!object_is_forwarded (cur));
2187 g_assert (!object_is_pinned (cur));
2189 ss = safe_object_get_size ((MonoObject*)cur);
2190 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2191 verify_scan_starts (cur, cur + size);
2198 init_gray_queue (void)
2200 if (sgen_collection_is_concurrent ())
2201 sgen_workers_init_distribute_gray_queue ();
2202 sgen_gray_object_queue_init (&gray_queue, NULL);
2206 * Perform a nursery collection.
2208 * Return whether any objects were late-pinned due to being out of memory.
2211 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2213 gboolean needs_major;
2214 size_t max_garbage_amount;
2216 FinishRememberedSetScanJobData *frssjd;
2217 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2218 ScanThreadDataJobData *stdjd;
2219 mword fragment_total;
2220 ScanCopyContext ctx;
2224 if (disable_minor_collections)
2227 TV_GETTIME (last_minor_collection_start_tv);
2228 atv = last_minor_collection_start_tv;
2230 MONO_GC_BEGIN (GENERATION_NURSERY);
2231 binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
2235 #ifndef DISABLE_PERFCOUNTERS
2236 mono_perfcounters->gc_collections0++;
2239 current_collection_generation = GENERATION_NURSERY;
2240 current_object_ops = sgen_minor_collector.serial_ops;
2242 reset_pinned_from_failed_allocation ();
2244 check_scan_starts ();
2246 sgen_nursery_alloc_prepare_for_minor ();
2250 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2251 /* FIXME: optimize later to use the higher address where an object can be present */
2252 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2254 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2255 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2256 g_assert (nursery_section->size >= max_garbage_amount);
2258 /* world must be stopped already */
2260 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2262 if (xdomain_checks) {
2263 sgen_clear_nursery_fragments ();
2264 sgen_check_for_xdomain_refs ();
2267 nursery_section->next_data = nursery_next;
2269 major_collector.start_nursery_collection ();
2271 sgen_memgov_minor_collection_start ();
2275 gc_stats.minor_gc_count ++;
2277 if (whole_heap_check_before_collection) {
2278 sgen_clear_nursery_fragments ();
2279 sgen_check_whole_heap (finish_up_concurrent_mark);
2281 if (consistency_check_at_minor_collection)
2282 sgen_check_consistency ();
2284 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2286 sgen_process_fin_stage_entries ();
2287 sgen_process_dislink_stage_entries ();
2289 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2291 /* pin from pinned handles */
2292 sgen_init_pinning ();
2293 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2294 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2295 /* pin cemented objects */
2296 sgen_pin_cemented_objects ();
2297 /* identify pinned objects */
2298 sgen_optimize_pin_queue ();
2299 sgen_pinning_setup_section (nursery_section);
2300 ctx.scan_func = NULL;
2301 ctx.copy_func = NULL;
2302 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2303 pin_objects_in_nursery (ctx);
2304 sgen_pinning_trim_queue_to_section (nursery_section);
2307 time_minor_pinning += TV_ELAPSED (btv, atv);
2308 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2309 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2311 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2313 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2314 frssjd->heap_start = sgen_get_nursery_start ();
2315 frssjd->heap_end = nursery_next;
2316 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2318 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2320 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2321 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2323 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2325 /* FIXME: why is this here? */
2326 ctx.scan_func = current_object_ops.scan_object;
2327 ctx.copy_func = NULL;
2328 ctx.queue = &gray_queue;
2329 sgen_drain_gray_stack (-1, ctx);
2331 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2332 report_registered_roots ();
2333 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2334 report_finalizer_roots ();
2336 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2338 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2340 /* registered roots, this includes static fields */
2341 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2342 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2343 scrrjd_normal->scan_func = current_object_ops.scan_object;
2344 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2345 scrrjd_normal->heap_end = nursery_next;
2346 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2347 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2349 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2350 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2351 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2352 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2353 scrrjd_wbarrier->heap_end = nursery_next;
2354 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2355 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2358 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2360 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2363 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2364 stdjd->heap_start = sgen_get_nursery_start ();
2365 stdjd->heap_end = nursery_next;
2366 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2369 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2372 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2374 g_assert (!sgen_collection_is_concurrent ());
2376 /* Scan the list of objects ready for finalization. If */
2377 sgen_workers_enqueue_job (job_scan_finalizer_entries, fin_ready_list);
2378 sgen_workers_enqueue_job (job_scan_finalizer_entries, critical_fin_list);
2380 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2382 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2384 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2385 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2387 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2390 * The (single-threaded) finalization code might have done
2391 * some copying/marking so we can only reset the GC thread's
2392 * worker data here instead of earlier when we joined the
2395 sgen_workers_reset_data ();
2397 if (objects_pinned) {
2398 sgen_optimize_pin_queue ();
2399 sgen_pinning_setup_section (nursery_section);
2402 /* walk the pin_queue, build up the fragment list of free memory, unmark
2403 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2406 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2407 fragment_total = sgen_build_nursery_fragments (nursery_section, unpin_queue);
2408 if (!fragment_total)
2411 /* Clear TLABs for all threads */
2412 sgen_clear_tlabs ();
2414 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2416 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2417 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2419 if (consistency_check_at_minor_collection)
2420 sgen_check_major_refs ();
2422 major_collector.finish_nursery_collection ();
2424 TV_GETTIME (last_minor_collection_end_tv);
2425 gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
2428 dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
2430 /* prepare the pin queue for the next collection */
2431 sgen_finish_pinning ();
2432 if (fin_ready_list || critical_fin_list) {
2433 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2434 mono_gc_finalize_notify ();
2436 sgen_pin_stats_reset ();
2437 /* clear cemented hash */
2438 sgen_cement_clear_below_threshold ();
2440 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2442 remset.finish_minor_collection ();
2444 check_scan_starts ();
2446 binary_protocol_flush_buffers (FALSE);
2448 sgen_memgov_minor_collection_end ();
2450 /*objects are late pinned because of lack of memory, so a major is a good call*/
2451 needs_major = objects_pinned > 0;
2452 current_collection_generation = -1;
2455 MONO_GC_END (GENERATION_NURSERY);
2456 binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
2458 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2459 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2465 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2468 * This is called on all objects in the nursery, including pinned ones, so we need
2469 * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
2471 ctx->scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
2475 scan_nursery_objects (ScanCopyContext ctx)
2477 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2478 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2482 major_copy_or_mark_from_roots (size_t *old_next_pin_slot, gboolean start_concurrent_mark, gboolean finish_up_concurrent_mark, gboolean scan_mod_union, gboolean scan_whole_nursery)
2487 /* FIXME: only use these values for the precise scan
2488 * note that to_space pointers should be excluded anyway...
2490 char *heap_start = NULL;
2491 char *heap_end = (char*)-1;
2492 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2493 GCRootReport root_report = { 0 };
2494 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2495 ScanThreadDataJobData *stdjd;
2496 ScanCopyContext ctx;
2498 if (concurrent_collection_in_progress) {
2499 /*This cleans up unused fragments */
2500 sgen_nursery_allocator_prepare_for_pinning ();
2502 if (do_concurrent_checks)
2503 check_nursery_is_clean ();
2505 /* The concurrent collector doesn't touch the nursery. */
2506 sgen_nursery_alloc_prepare_for_major ();
2513 /* Pinning depends on this */
2514 sgen_clear_nursery_fragments ();
2516 if (whole_heap_check_before_collection)
2517 sgen_check_whole_heap (finish_up_concurrent_mark);
2520 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2522 if (!sgen_collection_is_concurrent ())
2523 nursery_section->next_data = sgen_get_nursery_end ();
2524 /* we should also coalesce scanning from sections close to each other
2525 * and deal with pointers outside of the sections later.
2529 *major_collector.have_swept = FALSE;
2531 if (xdomain_checks) {
2532 sgen_clear_nursery_fragments ();
2533 sgen_check_for_xdomain_refs ();
2536 if (!concurrent_collection_in_progress) {
2537 /* Remsets are not useful for a major collection */
2538 remset.prepare_for_major_collection ();
2541 sgen_process_fin_stage_entries ();
2542 sgen_process_dislink_stage_entries ();
2545 sgen_init_pinning ();
2546 SGEN_LOG (6, "Collecting pinned addresses");
2547 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2549 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2550 if (major_collector.is_concurrent) {
2552 * The concurrent major collector cannot evict
2553 * yet, so we need to pin cemented objects to
2554 * not break some asserts.
2556 * FIXME: We could evict now!
2558 sgen_pin_cemented_objects ();
2561 if (!concurrent_collection_in_progress)
2562 sgen_cement_reset ();
2565 sgen_optimize_pin_queue ();
2568 * pin_queue now contains all candidate pointers, sorted and
2569 * uniqued. We must do two passes now to figure out which
2570 * objects are pinned.
2572 * The first is to find within the pin_queue the area for each
2573 * section. This requires that the pin_queue be sorted. We
2574 * also process the LOS objects and pinned chunks here.
2576 * The second, destructive, pass is to reduce the section
2577 * areas to pointers to the actually pinned objects.
2579 SGEN_LOG (6, "Pinning from sections");
2580 /* first pass for the sections */
2581 sgen_find_section_pin_queue_start_end (nursery_section);
2582 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2583 /* identify possible pointers to the insize of large objects */
2584 SGEN_LOG (6, "Pinning from large objects");
2585 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2587 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
2588 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2590 #ifdef ENABLE_DTRACE
2591 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2592 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2593 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2597 if (sgen_los_object_is_pinned (bigobj->data)) {
2598 g_assert (finish_up_concurrent_mark);
2601 sgen_los_pin_object (bigobj->data);
2602 if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
2603 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
2604 if (G_UNLIKELY (do_pin_stats))
2605 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2606 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2609 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2613 notify_gc_roots (&root_report);
2614 /* second pass for the sections */
2615 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2616 ctx.copy_func = NULL;
2617 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2620 * Concurrent mark never follows references into the nursery. In the start and
2621 * finish pauses we must scan live nursery objects, though.
2623 * In the finish pause we do this conservatively by scanning all nursery objects.
2624 * Previously we would only scan pinned objects here. We assumed that all objects
2625 * that were pinned during the nursery collection immediately preceding this finish
2626 * mark would be pinned again here. Due to the way we get the stack end for the GC
2627 * thread, however, that's not necessarily the case: we scan part of the stack used
2628 * by the GC itself, which changes constantly, so pinning isn't entirely
2631 * The split nursery also complicates things because non-pinned objects can survive
2632 * in the nursery. That's why we need to do a full scan of the nursery for it, too.
2634 * In the future we shouldn't do a preceding nursery collection at all and instead
2635 * do the finish pause with promotion from the nursery.
2637 * A further complication arises when we have late-pinned objects from the preceding
2638 * nursery collection. Those are the result of being out of memory when trying to
2639 * evacuate objects. They won't be found from the roots, so we just scan the whole
2642 * Non-concurrent mark evacuates from the nursery, so it's
2643 * sufficient to just scan pinned nursery objects.
2645 if (scan_whole_nursery || finish_up_concurrent_mark || (concurrent_collection_in_progress && sgen_minor_collector.is_split)) {
2646 scan_nursery_objects (ctx);
2648 pin_objects_in_nursery (ctx);
2649 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2650 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2653 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2654 if (old_next_pin_slot)
2655 *old_next_pin_slot = sgen_get_pinned_count ();
2658 time_major_pinning += TV_ELAPSED (atv, btv);
2659 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2660 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2662 major_collector.init_to_space ();
2665 * The concurrent collector doesn't move objects, neither on
2666 * the major heap nor in the nursery, so we can mark even
2667 * before pinning has finished. For the non-concurrent
2668 * collector we start the workers after pinning.
2670 if (start_concurrent_mark) {
2671 sgen_workers_start_all_workers ();
2672 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2675 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2676 main_gc_thread = mono_native_thread_self ();
2679 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2680 report_registered_roots ();
2682 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2684 /* registered roots, this includes static fields */
2685 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2686 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2687 scrrjd_normal->scan_func = current_object_ops.scan_object;
2688 scrrjd_normal->heap_start = heap_start;
2689 scrrjd_normal->heap_end = heap_end;
2690 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2691 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2693 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2694 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2695 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2696 scrrjd_wbarrier->heap_start = heap_start;
2697 scrrjd_wbarrier->heap_end = heap_end;
2698 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2699 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2702 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2705 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2706 stdjd->heap_start = heap_start;
2707 stdjd->heap_end = heap_end;
2708 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2711 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2714 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2716 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2717 report_finalizer_roots ();
2719 /* scan the list of objects ready for finalization */
2720 sgen_workers_enqueue_job (job_scan_finalizer_entries, fin_ready_list);
2721 sgen_workers_enqueue_job (job_scan_finalizer_entries, critical_fin_list);
2723 if (scan_mod_union) {
2724 g_assert (finish_up_concurrent_mark);
2726 /* Mod union card table */
2727 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
2728 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
2732 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2733 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
2736 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2738 if (concurrent_collection_in_progress) {
2739 /* prepare the pin queue for the next collection */
2740 sgen_finish_pinning ();
2742 sgen_pin_stats_reset ();
2744 if (do_concurrent_checks)
2745 check_nursery_is_clean ();
2750 major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
2752 MONO_GC_BEGIN (GENERATION_OLD);
2753 binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
2755 current_collection_generation = GENERATION_OLD;
2756 #ifndef DISABLE_PERFCOUNTERS
2757 mono_perfcounters->gc_collections1++;
2760 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2763 g_assert (major_collector.is_concurrent);
2764 concurrent_collection_in_progress = TRUE;
2766 sgen_cement_concurrent_start ();
2768 current_object_ops = major_collector.major_concurrent_ops;
2770 current_object_ops = major_collector.major_ops;
2773 reset_pinned_from_failed_allocation ();
2775 sgen_memgov_major_collection_start ();
2777 //count_ref_nonref_objs ();
2778 //consistency_check ();
2780 check_scan_starts ();
2783 SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
2784 gc_stats.major_gc_count ++;
2786 if (major_collector.start_major_collection)
2787 major_collector.start_major_collection ();
2789 major_copy_or_mark_from_roots (old_next_pin_slot, concurrent, FALSE, FALSE, FALSE);
2793 wait_for_workers_to_finish (void)
2795 while (!sgen_workers_all_done ())
2800 major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean scan_mod_union, gboolean scan_whole_nursery)
2802 ScannedObjectCounts counts;
2803 LOSObject *bigobj, *prevbo;
2809 if (concurrent_collection_in_progress) {
2810 sgen_workers_signal_start_nursery_collection_and_wait ();
2812 current_object_ops = major_collector.major_concurrent_ops;
2814 major_copy_or_mark_from_roots (NULL, FALSE, TRUE, scan_mod_union, scan_whole_nursery);
2816 sgen_workers_signal_finish_nursery_collection ();
2817 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2819 sgen_workers_join ();
2821 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
2823 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2824 main_gc_thread = NULL;
2827 if (do_concurrent_checks)
2828 check_nursery_is_clean ();
2830 SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
2831 current_object_ops = major_collector.major_ops;
2835 * The workers have stopped so we need to finish gray queue
2836 * work that might result from finalization in the main GC
2837 * thread. Redirection must therefore be turned off.
2839 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
2840 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2842 /* all the objects in the heap */
2843 finish_gray_stack (GENERATION_OLD, &gray_queue);
2845 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2847 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after joining");
2850 * The (single-threaded) finalization code might have done
2851 * some copying/marking so we can only reset the GC thread's
2852 * worker data here instead of earlier when we joined the
2855 sgen_workers_reset_data ();
2857 if (objects_pinned) {
2858 g_assert (!concurrent_collection_in_progress);
2861 * This is slow, but we just OOM'd.
2863 * See comment at `sgen_pin_queue_clear_discarded_entries` for how the pin
2864 * queue is laid out at this point.
2866 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2868 * We need to reestablish all pinned nursery objects in the pin queue
2869 * because they're needed for fragment creation. Unpinning happens by
2870 * walking the whole queue, so it's not necessary to reestablish where major
2871 * heap block pins are - all we care is that they're still in there
2874 sgen_optimize_pin_queue ();
2875 sgen_find_section_pin_queue_start_end (nursery_section);
2879 reset_heap_boundaries ();
2880 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2882 if (!concurrent_collection_in_progress) {
2883 /* walk the pin_queue, build up the fragment list of free memory, unmark
2884 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2887 if (!sgen_build_nursery_fragments (nursery_section, NULL))
2890 /* prepare the pin queue for the next collection */
2891 sgen_finish_pinning ();
2893 /* Clear TLABs for all threads */
2894 sgen_clear_tlabs ();
2896 sgen_pin_stats_reset ();
2899 if (concurrent_collection_in_progress)
2900 sgen_cement_concurrent_finish ();
2901 sgen_cement_clear_below_threshold ();
2903 if (check_mark_bits_after_major_collection)
2904 sgen_check_heap_marked (concurrent_collection_in_progress);
2907 time_major_fragment_creation += TV_ELAPSED (atv, btv);
2910 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
2912 /* sweep the big objects list */
2914 for (bigobj = los_object_list; bigobj;) {
2915 g_assert (!object_is_pinned (bigobj->data));
2916 if (sgen_los_object_is_pinned (bigobj->data)) {
2917 sgen_los_unpin_object (bigobj->data);
2918 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2921 /* not referenced anywhere, so we can free it */
2923 prevbo->next = bigobj->next;
2925 los_object_list = bigobj->next;
2927 bigobj = bigobj->next;
2928 sgen_los_free_object (to_free);
2932 bigobj = bigobj->next;
2936 time_major_free_bigobjs += TV_ELAPSED (btv, atv);
2941 time_major_los_sweep += TV_ELAPSED (atv, btv);
2943 major_collector.sweep ();
2945 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
2948 time_major_sweep += TV_ELAPSED (btv, atv);
2951 dump_heap ("major", gc_stats.major_gc_count - 1, reason);
2953 if (fin_ready_list || critical_fin_list) {
2954 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2955 mono_gc_finalize_notify ();
2958 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2960 sgen_memgov_major_collection_end ();
2961 current_collection_generation = -1;
2963 memset (&counts, 0, sizeof (ScannedObjectCounts));
2964 major_collector.finish_major_collection (&counts);
2966 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2968 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
2969 if (concurrent_collection_in_progress)
2970 concurrent_collection_in_progress = FALSE;
2972 check_scan_starts ();
2974 binary_protocol_flush_buffers (FALSE);
2976 //consistency_check ();
2978 MONO_GC_END (GENERATION_OLD);
2979 binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
2983 major_do_collection (const char *reason)
2985 TV_DECLARE (time_start);
2986 TV_DECLARE (time_end);
2987 size_t old_next_pin_slot;
2989 if (disable_major_collections)
2992 if (major_collector.get_and_reset_num_major_objects_marked) {
2993 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
2994 g_assert (!num_marked);
2997 /* world must be stopped already */
2998 TV_GETTIME (time_start);
3000 major_start_collection (FALSE, &old_next_pin_slot);
3001 major_finish_collection (reason, old_next_pin_slot, FALSE, FALSE);
3003 TV_GETTIME (time_end);
3004 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3006 /* FIXME: also report this to the user, preferably in gc-end. */
3007 if (major_collector.get_and_reset_num_major_objects_marked)
3008 major_collector.get_and_reset_num_major_objects_marked ();
3010 return bytes_pinned_from_failed_allocation > 0;
3014 major_start_concurrent_collection (const char *reason)
3016 TV_DECLARE (time_start);
3017 TV_DECLARE (time_end);
3018 long long num_objects_marked;
3020 if (disable_major_collections)
3023 TV_GETTIME (time_start);
3024 SGEN_TV_GETTIME (time_major_conc_collection_start);
3026 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3027 g_assert (num_objects_marked == 0);
3029 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3030 binary_protocol_concurrent_start ();
3032 // FIXME: store reason and pass it when finishing
3033 major_start_collection (TRUE, NULL);
3035 gray_queue_redirect (&gray_queue);
3036 sgen_workers_wait_for_jobs_finished ();
3038 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3039 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3041 TV_GETTIME (time_end);
3042 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3044 current_collection_generation = -1;
3048 * Returns whether the major collection has finished.
3051 major_should_finish_concurrent_collection (void)
3053 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty before we have started doing anything?");
3054 return sgen_workers_all_done ();
3058 major_update_concurrent_collection (void)
3060 TV_DECLARE (total_start);
3061 TV_DECLARE (total_end);
3063 TV_GETTIME (total_start);
3065 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3066 binary_protocol_concurrent_update ();
3068 major_collector.update_cardtable_mod_union ();
3069 sgen_los_update_cardtable_mod_union ();
3071 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3073 TV_GETTIME (total_end);
3074 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
3078 major_finish_concurrent_collection (void)
3080 TV_DECLARE (total_start);
3081 TV_DECLARE (total_end);
3082 gboolean late_pinned;
3083 SgenGrayQueue unpin_queue;
3084 memset (&unpin_queue, 0, sizeof (unpin_queue));
3086 TV_GETTIME (total_start);
3088 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3089 binary_protocol_concurrent_finish ();
3092 * The major collector can add global remsets which are processed in the finishing
3093 * nursery collection, below. That implies that the workers must have finished
3094 * marking before the nursery collection is allowed to run, otherwise we might miss
3097 wait_for_workers_to_finish ();
3099 SGEN_TV_GETTIME (time_major_conc_collection_end);
3100 gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
3102 major_collector.update_cardtable_mod_union ();
3103 sgen_los_update_cardtable_mod_union ();
3105 late_pinned = collect_nursery (&unpin_queue, TRUE);
3107 if (mod_union_consistency_check)
3108 sgen_check_mod_union_consistency ();
3110 current_collection_generation = GENERATION_OLD;
3111 major_finish_collection ("finishing", -1, TRUE, late_pinned);
3113 if (whole_heap_check_before_collection)
3114 sgen_check_whole_heap (FALSE);
3116 unpin_objects_from_queue (&unpin_queue);
3117 sgen_gray_object_queue_deinit (&unpin_queue);
3119 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3121 TV_GETTIME (total_end);
3122 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
3124 current_collection_generation = -1;
3128 * Ensure an allocation request for @size will succeed by freeing enough memory.
3130 * LOCKING: The GC lock MUST be held.
3133 sgen_ensure_free_space (size_t size)
3135 int generation_to_collect = -1;
3136 const char *reason = NULL;
3139 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3140 if (sgen_need_major_collection (size)) {
3141 reason = "LOS overflow";
3142 generation_to_collect = GENERATION_OLD;
3145 if (degraded_mode) {
3146 if (sgen_need_major_collection (size)) {
3147 reason = "Degraded mode overflow";
3148 generation_to_collect = GENERATION_OLD;
3150 } else if (sgen_need_major_collection (size)) {
3151 reason = "Minor allowance";
3152 generation_to_collect = GENERATION_OLD;
3154 generation_to_collect = GENERATION_NURSERY;
3155 reason = "Nursery full";
3159 if (generation_to_collect == -1) {
3160 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3161 generation_to_collect = GENERATION_OLD;
3162 reason = "Finish concurrent collection";
3166 if (generation_to_collect == -1)
3168 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3172 * LOCKING: Assumes the GC lock is held.
3175 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3177 TV_DECLARE (gc_start);
3178 TV_DECLARE (gc_end);
3179 TV_DECLARE (gc_total_start);
3180 TV_DECLARE (gc_total_end);
3181 GGTimingInfo infos [2];
3182 int overflow_generation_to_collect = -1;
3183 int oldest_generation_collected = generation_to_collect;
3184 const char *overflow_reason = NULL;
3186 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3188 binary_protocol_collection_force (generation_to_collect);
3190 SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD, "What generation is this?");
3192 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3194 TV_GETTIME (gc_start);
3196 sgen_stop_world (generation_to_collect);
3198 TV_GETTIME (gc_total_start);
3200 if (concurrent_collection_in_progress) {
3202 * We update the concurrent collection. If it finished, we're done. If
3203 * not, and we've been asked to do a nursery collection, we do that.
3205 gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
3208 major_finish_concurrent_collection ();
3209 oldest_generation_collected = GENERATION_OLD;
3211 sgen_workers_signal_start_nursery_collection_and_wait ();
3213 major_update_concurrent_collection ();
3214 if (generation_to_collect == GENERATION_NURSERY)
3215 collect_nursery (NULL, FALSE);
3217 sgen_workers_signal_finish_nursery_collection ();
3224 * If we've been asked to do a major collection, and the major collector wants to
3225 * run synchronously (to evacuate), we set the flag to do that.
3227 if (generation_to_collect == GENERATION_OLD &&
3228 allow_synchronous_major &&
3229 major_collector.want_synchronous_collection &&
3230 *major_collector.want_synchronous_collection) {
3231 wait_to_finish = TRUE;
3234 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3237 * There's no concurrent collection in progress. Collect the generation we're asked
3238 * to collect. If the major collector is concurrent and we're not forced to wait,
3239 * start a concurrent collection.
3241 // FIXME: extract overflow reason
3242 if (generation_to_collect == GENERATION_NURSERY) {
3243 if (collect_nursery (NULL, FALSE)) {
3244 overflow_generation_to_collect = GENERATION_OLD;
3245 overflow_reason = "Minor overflow";
3248 if (major_collector.is_concurrent && !wait_to_finish) {
3249 collect_nursery (NULL, FALSE);
3250 major_start_concurrent_collection (reason);
3251 // FIXME: set infos[0] properly
3255 if (major_do_collection (reason)) {
3256 overflow_generation_to_collect = GENERATION_NURSERY;
3257 overflow_reason = "Excessive pinning";
3261 TV_GETTIME (gc_end);
3263 memset (infos, 0, sizeof (infos));
3264 infos [0].generation = generation_to_collect;
3265 infos [0].reason = reason;
3266 infos [0].is_overflow = FALSE;
3267 infos [1].generation = -1;
3268 infos [0].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
3270 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3272 if (overflow_generation_to_collect != -1) {
3274 * We need to do an overflow collection, either because we ran out of memory
3275 * or the nursery is fully pinned.
3278 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3279 infos [1].generation = overflow_generation_to_collect;
3280 infos [1].reason = overflow_reason;
3281 infos [1].is_overflow = TRUE;
3282 infos [1].total_time = gc_end;
3284 if (overflow_generation_to_collect == GENERATION_NURSERY)
3285 collect_nursery (NULL, FALSE);
3287 major_do_collection (overflow_reason);
3289 TV_GETTIME (gc_end);
3290 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3292 /* keep events symmetric */
3293 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3295 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3298 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3300 /* this also sets the proper pointers for the next allocation */
3301 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3302 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3303 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
3304 sgen_dump_pin_queue ();
3309 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3311 TV_GETTIME (gc_total_end);
3312 time_max = MAX (time_max, TV_ELAPSED (gc_total_start, gc_total_end));
3314 sgen_restart_world (oldest_generation_collected, infos);
3316 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3320 * ######################################################################
3321 * ######## Memory allocation from the OS
3322 * ######################################################################
3323 * This section of code deals with getting memory from the OS and
3324 * allocating memory for GC-internal data structures.
3325 * Internal memory can be handled with a freelist for small objects.
3331 G_GNUC_UNUSED static void
3332 report_internal_mem_usage (void)
3334 printf ("Internal memory usage:\n");
3335 sgen_report_internal_mem_usage ();
3336 printf ("Pinned memory usage:\n");
3337 major_collector.report_pinned_memory_usage ();
3341 * ######################################################################
3342 * ######## Finalization support
3343 * ######################################################################
3346 static inline gboolean
3347 sgen_major_is_object_alive (void *object)
3351 /* Oldgen objects can be pinned and forwarded too */
3352 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3356 * FIXME: major_collector.is_object_live() also calculates the
3357 * size. Avoid the double calculation.
3359 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3360 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3361 return sgen_los_object_is_pinned (object);
3363 return major_collector.is_object_live (object);
3367 * If the object has been forwarded it means it's still referenced from a root.
3368 * If it is pinned it's still alive as well.
3369 * A LOS object is only alive if we have pinned it.
3370 * Return TRUE if @obj is ready to be finalized.
3372 static inline gboolean
3373 sgen_is_object_alive (void *object)
3375 if (ptr_in_nursery (object))
3376 return sgen_nursery_is_object_alive (object);
3378 return sgen_major_is_object_alive (object);
3382 * This function returns true if @object is either alive or it belongs to the old gen
3383 * and we're currently doing a minor collection.
3386 sgen_is_object_alive_for_current_gen (char *object)
3388 if (ptr_in_nursery (object))
3389 return sgen_nursery_is_object_alive (object);
3391 if (current_collection_generation == GENERATION_NURSERY)
3394 return sgen_major_is_object_alive (object);
3398 * This function returns true if @object is either alive and belongs to the
3399 * current collection - major collections are full heap, so old gen objects
3400 * are never alive during a minor collection.
3403 sgen_is_object_alive_and_on_current_collection (char *object)
3405 if (ptr_in_nursery (object))
3406 return sgen_nursery_is_object_alive (object);
3408 if (current_collection_generation == GENERATION_NURSERY)
3411 return sgen_major_is_object_alive (object);
3416 sgen_gc_is_object_ready_for_finalization (void *object)
3418 return !sgen_is_object_alive (object);
3422 has_critical_finalizer (MonoObject *obj)
3426 if (!mono_defaults.critical_finalizer_object)
3429 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3431 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3435 is_finalization_aware (MonoObject *obj)
3437 MonoVTable *vt = ((MonoVTable*)LOAD_VTABLE (obj));
3438 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
3442 sgen_queue_finalization_entry (MonoObject *obj)
3444 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3445 gboolean critical = has_critical_finalizer (obj);
3446 entry->object = obj;
3448 entry->next = critical_fin_list;
3449 critical_fin_list = entry;
3451 entry->next = fin_ready_list;
3452 fin_ready_list = entry;
3455 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
3456 fin_callbacks.object_queued_for_finalization (obj);
3458 #ifdef ENABLE_DTRACE
3459 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3460 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3461 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3462 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3463 vt->klass->name_space, vt->klass->name, gen, critical);
3469 sgen_object_is_live (void *obj)
3471 return sgen_is_object_alive_and_on_current_collection (obj);
3474 /* LOCKING: requires that the GC lock is held */
3476 null_ephemerons_for_domain (MonoDomain *domain)
3478 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3481 MonoObject *object = (MonoObject*)current->array;
3483 if (object && !object->vtable) {
3484 EphemeronLinkNode *tmp = current;
3487 prev->next = current->next;
3489 ephemeron_list = current->next;
3491 current = current->next;
3492 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3495 current = current->next;
3500 /* LOCKING: requires that the GC lock is held */
3502 clear_unreachable_ephemerons (ScanCopyContext ctx)
3504 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3505 GrayQueue *queue = ctx.queue;
3506 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3508 Ephemeron *cur, *array_end;
3512 char *object = current->array;
3514 if (!sgen_is_object_alive_for_current_gen (object)) {
3515 EphemeronLinkNode *tmp = current;
3517 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3520 prev->next = current->next;
3522 ephemeron_list = current->next;
3524 current = current->next;
3525 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3530 copy_func ((void**)&object, queue);
3531 current->array = object;
3533 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3535 array = (MonoArray*)object;
3536 cur = mono_array_addr (array, Ephemeron, 0);
3537 array_end = cur + mono_array_length_fast (array);
3538 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3540 for (; cur < array_end; ++cur) {
3541 char *key = (char*)cur->key;
3543 if (!key || key == tombstone)
3546 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3547 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3548 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3550 if (!sgen_is_object_alive_for_current_gen (key)) {
3551 cur->key = tombstone;
3557 current = current->next;
3562 LOCKING: requires that the GC lock is held
3564 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3567 mark_ephemerons_in_range (ScanCopyContext ctx)
3569 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3570 GrayQueue *queue = ctx.queue;
3571 int nothing_marked = 1;
3572 EphemeronLinkNode *current = ephemeron_list;
3574 Ephemeron *cur, *array_end;
3577 for (current = ephemeron_list; current; current = current->next) {
3578 char *object = current->array;
3579 SGEN_LOG (5, "Ephemeron array at %p", object);
3581 /*It has to be alive*/
3582 if (!sgen_is_object_alive_for_current_gen (object)) {
3583 SGEN_LOG (5, "\tnot reachable");
3587 copy_func ((void**)&object, queue);
3589 array = (MonoArray*)object;
3590 cur = mono_array_addr (array, Ephemeron, 0);
3591 array_end = cur + mono_array_length_fast (array);
3592 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3594 for (; cur < array_end; ++cur) {
3595 char *key = cur->key;
3597 if (!key || key == tombstone)
3600 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3601 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3602 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3604 if (sgen_is_object_alive_for_current_gen (key)) {
3605 char *value = cur->value;
3607 copy_func ((void**)&cur->key, queue);
3609 if (!sgen_is_object_alive_for_current_gen (value))
3611 copy_func ((void**)&cur->value, queue);
3617 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3618 return nothing_marked;
3622 mono_gc_invoke_finalizers (void)
3624 FinalizeReadyEntry *entry = NULL;
3625 gboolean entry_is_critical = FALSE;
3628 /* FIXME: batch to reduce lock contention */
3629 while (fin_ready_list || critical_fin_list) {
3633 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3635 /* We have finalized entry in the last
3636 interation, now we need to remove it from
3639 *list = entry->next;
3641 FinalizeReadyEntry *e = *list;
3642 while (e->next != entry)
3644 e->next = entry->next;
3646 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3650 /* Now look for the first non-null entry. */
3651 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3654 entry_is_critical = FALSE;
3656 entry_is_critical = TRUE;
3657 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3662 g_assert (entry->object);
3663 num_ready_finalizers--;
3664 obj = entry->object;
3665 entry->object = NULL;
3666 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3674 g_assert (entry->object == NULL);
3676 /* the object is on the stack so it is pinned */
3677 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3678 mono_gc_run_finalize (obj, NULL);
3685 mono_gc_pending_finalizers (void)
3687 return fin_ready_list || critical_fin_list;
3691 * ######################################################################
3692 * ######## registered roots support
3693 * ######################################################################
3697 * We do not coalesce roots.
3700 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3702 RootRecord new_root;
3705 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3706 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3707 /* we allow changing the size and the descriptor (for thread statics etc) */
3709 size_t old_size = root->end_root - start;
3710 root->end_root = start + size;
3711 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3712 ((root->root_desc == 0) && (descr == NULL)));
3713 root->root_desc = (mword)descr;
3715 roots_size -= old_size;
3721 new_root.end_root = start + size;
3722 new_root.root_desc = (mword)descr;
3724 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3727 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3734 mono_gc_register_root (char *start, size_t size, void *descr)
3736 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3740 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3742 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3746 mono_gc_deregister_root (char* addr)
3752 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3753 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3754 roots_size -= (root.end_root - addr);
3760 * ######################################################################
3761 * ######## Thread handling (stop/start code)
3762 * ######################################################################
3765 unsigned int sgen_global_stop_count = 0;
3768 sgen_get_current_collection_generation (void)
3770 return current_collection_generation;
3774 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3776 gc_callbacks = *callbacks;
3780 mono_gc_get_gc_callbacks ()
3782 return &gc_callbacks;
3785 /* Variables holding start/end nursery so it won't have to be passed at every call */
3786 static void *scan_area_arg_start, *scan_area_arg_end;
3789 mono_gc_conservatively_scan_area (void *start, void *end)
3791 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3795 mono_gc_scan_object (void *obj, void *gc_data)
3797 UserCopyOrMarkData *data = gc_data;
3798 current_object_ops.copy_or_mark_object (&obj, data->queue);
3803 * Mark from thread stacks and registers.
3806 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3808 SgenThreadInfo *info;
3810 scan_area_arg_start = start_nursery;
3811 scan_area_arg_end = end_nursery;
3813 FOREACH_THREAD (info) {
3815 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3818 if (info->gc_disabled) {
3819 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3822 if (mono_thread_info_run_state (info) != STATE_RUNNING) {
3823 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %d)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_thread_info_run_state (info));
3826 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3827 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3828 UserCopyOrMarkData data = { NULL, queue };
3829 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise, &data);
3830 } else if (!precise) {
3831 if (!conservative_stack_mark) {
3832 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
3833 conservative_stack_mark = TRUE;
3835 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3840 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3841 start_nursery, end_nursery, PIN_TYPE_STACK);
3843 conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
3844 start_nursery, end_nursery, PIN_TYPE_STACK);
3847 } END_FOREACH_THREAD
3851 ptr_on_stack (void *ptr)
3853 gpointer stack_start = &stack_start;
3854 SgenThreadInfo *info = mono_thread_info_current ();
3856 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3862 sgen_thread_register (SgenThreadInfo* info, void *addr)
3865 guint8 *staddr = NULL;
3867 #ifndef HAVE_KW_THREAD
3868 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3870 g_assert (!mono_native_tls_get_value (thread_info_key));
3871 mono_native_tls_set_value (thread_info_key, info);
3873 sgen_thread_info = info;
3876 #ifdef SGEN_POSIX_STW
3877 info->stop_count = -1;
3881 info->stack_start = NULL;
3882 info->stopped_ip = NULL;
3883 info->stopped_domain = NULL;
3885 memset (&info->ctx, 0, sizeof (MonoContext));
3887 memset (&info->regs, 0, sizeof (info->regs));
3890 sgen_init_tlab_info (info);
3892 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3894 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
3895 mono_thread_info_get_stack_bounds (&staddr, &stsize);
3898 info->stack_start_limit = staddr;
3900 info->stack_end = staddr + stsize;
3902 gsize stack_bottom = (gsize)addr;
3903 stack_bottom += 4095;
3904 stack_bottom &= ~4095;
3905 info->stack_end = (char*)stack_bottom;
3908 #ifdef HAVE_KW_THREAD
3909 stack_end = info->stack_end;
3912 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3914 if (gc_callbacks.thread_attach_func)
3915 info->runtime_data = gc_callbacks.thread_attach_func ();
3920 sgen_thread_detach (SgenThreadInfo *p)
3922 /* If a delegate is passed to native code and invoked on a thread we dont
3923 * know about, the jit will register it with mono_jit_thread_attach, but
3924 * we have no way of knowing when that thread goes away. SGen has a TSD
3925 * so we assume that if the domain is still registered, we can detach
3928 if (mono_domain_get ())
3929 mono_thread_detach_internal (mono_thread_internal_current ());
3933 sgen_thread_unregister (SgenThreadInfo *p)
3935 MonoNativeThreadId tid;
3937 tid = mono_thread_info_get_tid (p);
3938 binary_protocol_thread_unregister ((gpointer)tid);
3939 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
3941 #ifndef HAVE_KW_THREAD
3942 mono_native_tls_set_value (thread_info_key, NULL);
3944 sgen_thread_info = NULL;
3947 if (p->info.runtime_thread)
3948 mono_threads_add_joinable_thread ((gpointer)tid);
3950 if (gc_callbacks.thread_detach_func) {
3951 gc_callbacks.thread_detach_func (p->runtime_data);
3952 p->runtime_data = NULL;
3958 sgen_thread_attach (SgenThreadInfo *info)
3961 /*this is odd, can we get attached before the gc is inited?*/
3965 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3966 info->runtime_data = gc_callbacks.thread_attach_func ();
3969 mono_gc_register_thread (void *baseptr)
3971 return mono_thread_info_attach (baseptr) != NULL;
3975 * mono_gc_set_stack_end:
3977 * Set the end of the current threads stack to STACK_END. The stack space between
3978 * STACK_END and the real end of the threads stack will not be scanned during collections.
3981 mono_gc_set_stack_end (void *stack_end)
3983 SgenThreadInfo *info;
3986 info = mono_thread_info_current ();
3988 g_assert (stack_end < info->stack_end);
3989 info->stack_end = stack_end;
3994 #if USE_PTHREAD_INTERCEPT
3998 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4000 return pthread_create (new_thread, attr, start_routine, arg);
4004 mono_gc_pthread_join (pthread_t thread, void **retval)
4006 return pthread_join (thread, retval);
4010 mono_gc_pthread_detach (pthread_t thread)
4012 return pthread_detach (thread);
4016 mono_gc_pthread_exit (void *retval)
4018 mono_thread_info_detach ();
4019 pthread_exit (retval);
4020 g_assert_not_reached ();
4023 #endif /* USE_PTHREAD_INTERCEPT */
4026 * ######################################################################
4027 * ######## Write barriers
4028 * ######################################################################
4032 * Note: the write barriers first do the needed GC work and then do the actual store:
4033 * this way the value is visible to the conservative GC scan after the write barrier
4034 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4035 * the conservative scan, otherwise by the remembered set scan.
4038 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4040 HEAVY_STAT (++stat_wbarrier_set_field);
4041 if (ptr_in_nursery (field_ptr)) {
4042 *(void**)field_ptr = value;
4045 SGEN_LOG (8, "Adding remset at %p", field_ptr);
4047 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4049 remset.wbarrier_set_field (obj, field_ptr, value);
4053 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4055 HEAVY_STAT (++stat_wbarrier_set_arrayref);
4056 if (ptr_in_nursery (slot_ptr)) {
4057 *(void**)slot_ptr = value;
4060 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4062 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4064 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4068 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4070 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4071 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4072 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4073 mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
4077 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4078 if (binary_protocol_is_heavy_enabled ()) {
4080 for (i = 0; i < count; ++i) {
4081 gpointer dest = (gpointer*)dest_ptr + i;
4082 gpointer obj = *((gpointer*)src_ptr + i);
4084 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4089 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4092 static char *found_obj;
4095 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4097 char *ptr = user_data;
4099 if (ptr >= obj && ptr < obj + size) {
4100 g_assert (!found_obj);
4105 /* for use in the debugger */
4106 char* find_object_for_ptr (char *ptr);
4108 find_object_for_ptr (char *ptr)
4110 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4112 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4113 find_object_for_ptr_callback, ptr, TRUE);
4119 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4124 * Very inefficient, but this is debugging code, supposed to
4125 * be called from gdb, so we don't care.
4128 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
4133 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4137 HEAVY_STAT (++stat_wbarrier_generic_store);
4139 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4140 /* FIXME: ptr_in_heap must be called with the GC lock held */
4141 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4142 char *start = find_object_for_ptr (ptr);
4143 MonoObject *value = *(MonoObject**)ptr;
4147 MonoObject *obj = (MonoObject*)start;
4148 if (obj->vtable->domain != value->vtable->domain)
4149 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4155 obj = *(gpointer*)ptr;
4157 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4159 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4160 SGEN_LOG (8, "Skipping remset at %p", ptr);
4165 * We need to record old->old pointer locations for the
4166 * concurrent collector.
4168 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4169 SGEN_LOG (8, "Skipping remset at %p", ptr);
4173 SGEN_LOG (8, "Adding remset at %p", ptr);
4175 remset.wbarrier_generic_nostore (ptr);
4179 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4181 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4182 SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
4183 if (ptr_in_nursery (value))
4184 mono_gc_wbarrier_generic_nostore (ptr);
4185 sgen_dummy_use (value);
4188 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4189 * as an atomic operation with release semantics.
4192 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4194 HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4196 SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4198 InterlockedWritePointer (ptr, value);
4200 if (ptr_in_nursery (value))
4201 mono_gc_wbarrier_generic_nostore (ptr);
4203 sgen_dummy_use (value);
4206 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4208 mword *dest = _dest;
4213 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4215 SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, *src);
4218 size -= SIZEOF_VOID_P;
4223 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4225 #define HANDLE_PTR(ptr,obj) do { \
4226 gpointer o = *(gpointer*)(ptr); \
4228 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4229 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4234 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4236 #define SCAN_OBJECT_NOVTABLE
4237 #include "sgen-scan-object.h"
4242 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4244 HEAVY_STAT (++stat_wbarrier_value_copy);
4245 g_assert (klass->valuetype);
4247 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4249 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4250 size_t element_size = mono_class_value_size (klass, NULL);
4251 size_t size = count * element_size;
4252 mono_gc_memmove_atomic (dest, src, size);
4256 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4257 if (binary_protocol_is_heavy_enabled ()) {
4258 size_t element_size = mono_class_value_size (klass, NULL);
4260 for (i = 0; i < count; ++i) {
4261 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4262 (char*)src + i * element_size - sizeof (MonoObject),
4263 (mword) klass->gc_descr);
4268 remset.wbarrier_value_copy (dest, src, count, klass);
4272 * mono_gc_wbarrier_object_copy:
4274 * Write barrier to call when obj is the result of a clone or copy of an object.
4277 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4281 HEAVY_STAT (++stat_wbarrier_object_copy);
4283 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4284 size = mono_object_class (obj)->instance_size;
4285 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4286 size - sizeof (MonoObject));
4290 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4291 if (binary_protocol_is_heavy_enabled ())
4292 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4295 remset.wbarrier_object_copy (obj, src);
4300 * ######################################################################
4301 * ######## Other mono public interface functions.
4302 * ######################################################################
4305 #define REFS_SIZE 128
4308 MonoGCReferences callback;
4312 MonoObject *refs [REFS_SIZE];
4313 uintptr_t offsets [REFS_SIZE];
4317 #define HANDLE_PTR(ptr,obj) do { \
4319 if (hwi->count == REFS_SIZE) { \
4320 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4324 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4325 hwi->refs [hwi->count++] = *(ptr); \
4330 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4332 mword desc = sgen_obj_get_descriptor (start);
4334 #include "sgen-scan-object.h"
4338 walk_references (char *start, size_t size, void *data)
4340 HeapWalkInfo *hwi = data;
4343 collect_references (hwi, start, size);
4344 if (hwi->count || !hwi->called)
4345 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4349 * mono_gc_walk_heap:
4350 * @flags: flags for future use
4351 * @callback: a function pointer called for each object in the heap
4352 * @data: a user data pointer that is passed to callback
4354 * This function can be used to iterate over all the live objects in the heap:
4355 * for each object, @callback is invoked, providing info about the object's
4356 * location in memory, its class, its size and the objects it references.
4357 * For each referenced object it's offset from the object address is
4358 * reported in the offsets array.
4359 * The object references may be buffered, so the callback may be invoked
4360 * multiple times for the same object: in all but the first call, the size
4361 * argument will be zero.
4362 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4363 * profiler event handler.
4365 * Returns: a non-zero value if the GC doesn't support heap walking
4368 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4373 hwi.callback = callback;
4376 sgen_clear_nursery_fragments ();
4377 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4379 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
4380 sgen_los_iterate_objects (walk_references, &hwi);
4386 mono_gc_collect (int generation)
4391 sgen_perform_collection (0, generation, "user request", TRUE);
4396 mono_gc_max_generation (void)
4402 mono_gc_collection_count (int generation)
4404 if (generation == 0)
4405 return gc_stats.minor_gc_count;
4406 return gc_stats.major_gc_count;
4410 mono_gc_get_used_size (void)
4414 tot = los_memory_usage;
4415 tot += nursery_section->next_data - nursery_section->data;
4416 tot += major_collector.get_used_size ();
4417 /* FIXME: account for pinned objects */
4423 mono_gc_get_los_limit (void)
4425 return MAX_SMALL_OBJ_SIZE;
4429 mono_gc_set_string_length (MonoString *str, gint32 new_length)
4431 mono_unichar2 *new_end = str->chars + new_length;
4433 /* zero the discarded string. This null-delimits the string and allows
4434 * the space to be reclaimed by SGen. */
4436 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
4437 CHECK_CANARY_FOR_OBJECT (str);
4438 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
4439 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
4441 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
4444 str->length = new_length;
4448 mono_gc_user_markers_supported (void)
4454 mono_object_is_alive (MonoObject* o)
4460 mono_gc_get_generation (MonoObject *obj)
4462 if (ptr_in_nursery (obj))
4468 mono_gc_enable_events (void)
4473 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4475 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4479 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4481 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4485 mono_gc_weak_link_get (void **link_addr)
4487 void * volatile *link_addr_volatile;
4491 link_addr_volatile = link_addr;
4492 ptr = (void*)*link_addr_volatile;
4494 * At this point we have a hidden pointer. If the GC runs
4495 * here, it will not recognize the hidden pointer as a
4496 * reference, and if the object behind it is not referenced
4497 * elsewhere, it will be freed. Once the world is restarted
4498 * we reveal the pointer, giving us a pointer to a freed
4499 * object. To make sure we don't return it, we load the
4500 * hidden pointer again. If it's still the same, we can be
4501 * sure the object reference is valid.
4504 obj = (MonoObject*) REVEAL_POINTER (ptr);
4508 mono_memory_barrier ();
4511 * During the second bridge processing step the world is
4512 * running again. That step processes all weak links once
4513 * more to null those that refer to dead objects. Before that
4514 * is completed, those links must not be followed, so we
4515 * conservatively wait for bridge processing when any weak
4516 * link is dereferenced.
4518 if (G_UNLIKELY (bridge_processing_in_progress))
4519 mono_gc_wait_for_bridge_processing ();
4521 if ((void*)*link_addr_volatile != ptr)
4528 mono_gc_ephemeron_array_add (MonoObject *obj)
4530 EphemeronLinkNode *node;
4534 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4539 node->array = (char*)obj;
4540 node->next = ephemeron_list;
4541 ephemeron_list = node;
4543 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4550 mono_gc_set_allow_synchronous_major (gboolean flag)
4552 if (!major_collector.is_concurrent)
4555 allow_synchronous_major = flag;
4560 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4564 result = func (data);
4565 UNLOCK_INTERRUPTION;
4570 mono_gc_is_gc_thread (void)
4574 result = mono_thread_info_current () != NULL;
4580 is_critical_method (MonoMethod *method)
4582 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4586 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4590 va_start (ap, description_format);
4592 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4593 vfprintf (stderr, description_format, ap);
4595 fprintf (stderr, " - %s", fallback);
4596 fprintf (stderr, "\n");
4602 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4605 double val = strtod (opt, &endptr);
4606 if (endptr == opt) {
4607 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4610 else if (val < min || val > max) {
4611 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4619 mono_gc_base_init (void)
4621 MonoThreadInfoCallbacks cb;
4624 char *major_collector_opt = NULL;
4625 char *minor_collector_opt = NULL;
4626 size_t max_heap = 0;
4627 size_t soft_limit = 0;
4630 gboolean debug_print_allowance = FALSE;
4631 double allowance_ratio = 0, save_target = 0;
4632 gboolean have_split_nursery = FALSE;
4633 gboolean cement_enabled = TRUE;
4635 mono_counters_init ();
4638 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4641 /* already inited */
4644 /* being inited by another thread */
4648 /* we will init it */
4651 g_assert_not_reached ();
4653 } while (result != 0);
4655 SGEN_TV_GETTIME (sgen_init_timestamp);
4657 LOCK_INIT (gc_mutex);
4659 pagesize = mono_pagesize ();
4660 gc_debug_file = stderr;
4662 cb.thread_register = sgen_thread_register;
4663 cb.thread_detach = sgen_thread_detach;
4664 cb.thread_unregister = sgen_thread_unregister;
4665 cb.thread_attach = sgen_thread_attach;
4666 cb.mono_method_is_critical = (gpointer)is_critical_method;
4668 cb.thread_exit = mono_gc_pthread_exit;
4669 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4672 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4674 LOCK_INIT (sgen_interruption_mutex);
4676 if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4677 opts = g_strsplit (env, ",", -1);
4678 for (ptr = opts; *ptr; ++ptr) {
4680 if (g_str_has_prefix (opt, "major=")) {
4681 opt = strchr (opt, '=') + 1;
4682 major_collector_opt = g_strdup (opt);
4683 } else if (g_str_has_prefix (opt, "minor=")) {
4684 opt = strchr (opt, '=') + 1;
4685 minor_collector_opt = g_strdup (opt);
4693 sgen_init_internal_allocator ();
4694 sgen_init_nursery_allocator ();
4695 sgen_init_fin_weak_hash ();
4697 sgen_init_hash_table ();
4698 sgen_init_descriptors ();
4699 sgen_init_gray_queues ();
4701 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4702 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4703 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4704 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4706 #ifndef HAVE_KW_THREAD
4707 mono_native_tls_alloc (&thread_info_key, NULL);
4708 #if defined(__APPLE__) || defined (HOST_WIN32)
4710 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4711 * where the two are the same.
4713 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4717 int tls_offset = -1;
4718 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4719 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4724 * This needs to happen before any internal allocations because
4725 * it inits the small id which is required for hazard pointer
4730 mono_thread_info_attach (&dummy);
4732 if (!minor_collector_opt) {
4733 sgen_simple_nursery_init (&sgen_minor_collector);
4735 if (!strcmp (minor_collector_opt, "simple")) {
4737 sgen_simple_nursery_init (&sgen_minor_collector);
4738 } else if (!strcmp (minor_collector_opt, "split")) {
4739 sgen_split_nursery_init (&sgen_minor_collector);
4740 have_split_nursery = TRUE;
4742 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4743 goto use_simple_nursery;
4747 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4748 use_marksweep_major:
4749 sgen_marksweep_init (&major_collector);
4750 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4751 sgen_marksweep_conc_init (&major_collector);
4753 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4754 goto use_marksweep_major;
4757 ///* Keep this the default for now */
4758 /* Precise marking is broken on all supported targets. Disable until fixed. */
4759 conservative_stack_mark = TRUE;
4761 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4764 gboolean usage_printed = FALSE;
4766 for (ptr = opts; *ptr; ++ptr) {
4768 if (!strcmp (opt, ""))
4770 if (g_str_has_prefix (opt, "major="))
4772 if (g_str_has_prefix (opt, "minor="))
4774 if (g_str_has_prefix (opt, "max-heap-size=")) {
4775 size_t max_heap_candidate = 0;
4776 opt = strchr (opt, '=') + 1;
4777 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4778 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(size_t)(mono_pagesize () - 1);
4779 if (max_heap != max_heap_candidate)
4780 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4782 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4786 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4787 opt = strchr (opt, '=') + 1;
4788 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4789 if (soft_limit <= 0) {
4790 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4794 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4798 if (g_str_has_prefix (opt, "stack-mark=")) {
4799 opt = strchr (opt, '=') + 1;
4800 if (!strcmp (opt, "precise")) {
4801 conservative_stack_mark = FALSE;
4802 } else if (!strcmp (opt, "conservative")) {
4803 conservative_stack_mark = TRUE;
4805 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4806 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
4810 if (g_str_has_prefix (opt, "bridge-implementation=")) {
4811 opt = strchr (opt, '=') + 1;
4812 sgen_set_bridge_implementation (opt);
4815 if (g_str_has_prefix (opt, "toggleref-test")) {
4816 sgen_register_test_toggleref_callback ();
4821 if (g_str_has_prefix (opt, "nursery-size=")) {
4823 opt = strchr (opt, '=') + 1;
4824 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4825 #ifdef SGEN_ALIGN_NURSERY
4826 if ((val & (val - 1))) {
4827 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
4831 if (val < SGEN_MAX_NURSERY_WASTE) {
4832 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
4833 "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
4837 sgen_nursery_size = val;
4838 sgen_nursery_bits = 0;
4839 while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
4842 sgen_nursery_size = val;
4845 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
4851 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4853 opt = strchr (opt, '=') + 1;
4854 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
4855 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
4860 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4862 opt = strchr (opt, '=') + 1;
4863 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
4864 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
4865 allowance_ratio = val;
4869 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
4870 if (!major_collector.is_concurrent) {
4871 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
4875 opt = strchr (opt, '=') + 1;
4877 if (!strcmp (opt, "yes")) {
4878 allow_synchronous_major = TRUE;
4879 } else if (!strcmp (opt, "no")) {
4880 allow_synchronous_major = FALSE;
4882 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
4887 if (!strcmp (opt, "cementing")) {
4888 cement_enabled = TRUE;
4891 if (!strcmp (opt, "no-cementing")) {
4892 cement_enabled = FALSE;
4896 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4899 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4902 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4907 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
4908 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4909 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4910 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4911 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
4912 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4913 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4914 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4915 fprintf (stderr, " [no-]cementing\n");
4916 if (major_collector.is_concurrent)
4917 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
4918 if (major_collector.print_gc_param_usage)
4919 major_collector.print_gc_param_usage ();
4920 if (sgen_minor_collector.print_gc_param_usage)
4921 sgen_minor_collector.print_gc_param_usage ();
4922 fprintf (stderr, " Experimental options:\n");
4923 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4924 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4925 fprintf (stderr, "\n");
4927 usage_printed = TRUE;
4932 if (major_collector.is_concurrent)
4933 sgen_workers_init (1);
4935 if (major_collector_opt)
4936 g_free (major_collector_opt);
4938 if (minor_collector_opt)
4939 g_free (minor_collector_opt);
4943 sgen_cement_init (cement_enabled);
4945 if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
4946 gboolean usage_printed = FALSE;
4948 opts = g_strsplit (env, ",", -1);
4949 for (ptr = opts; ptr && *ptr; ptr ++) {
4951 if (!strcmp (opt, ""))
4953 if (opt [0] >= '0' && opt [0] <= '9') {
4954 gc_debug_level = atoi (opt);
4959 char *rf = g_strdup_printf ("%s.%d", opt, mono_process_current_pid ());
4960 gc_debug_file = fopen (rf, "wb");
4962 gc_debug_file = stderr;
4965 } else if (!strcmp (opt, "print-allowance")) {
4966 debug_print_allowance = TRUE;
4967 } else if (!strcmp (opt, "print-pinning")) {
4968 do_pin_stats = TRUE;
4969 } else if (!strcmp (opt, "verify-before-allocs")) {
4970 verify_before_allocs = 1;
4971 has_per_allocation_action = TRUE;
4972 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4973 char *arg = strchr (opt, '=') + 1;
4974 verify_before_allocs = atoi (arg);
4975 has_per_allocation_action = TRUE;
4976 } else if (!strcmp (opt, "collect-before-allocs")) {
4977 collect_before_allocs = 1;
4978 has_per_allocation_action = TRUE;
4979 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4980 char *arg = strchr (opt, '=') + 1;
4981 has_per_allocation_action = TRUE;
4982 collect_before_allocs = atoi (arg);
4983 } else if (!strcmp (opt, "verify-before-collections")) {
4984 whole_heap_check_before_collection = TRUE;
4985 } else if (!strcmp (opt, "check-at-minor-collections")) {
4986 consistency_check_at_minor_collection = TRUE;
4987 nursery_clear_policy = CLEAR_AT_GC;
4988 } else if (!strcmp (opt, "mod-union-consistency-check")) {
4989 if (!major_collector.is_concurrent) {
4990 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
4993 mod_union_consistency_check = TRUE;
4994 } else if (!strcmp (opt, "check-mark-bits")) {
4995 check_mark_bits_after_major_collection = TRUE;
4996 } else if (!strcmp (opt, "check-nursery-pinned")) {
4997 check_nursery_objects_pinned = TRUE;
4998 } else if (!strcmp (opt, "xdomain-checks")) {
4999 xdomain_checks = TRUE;
5000 } else if (!strcmp (opt, "clear-at-gc")) {
5001 nursery_clear_policy = CLEAR_AT_GC;
5002 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5003 nursery_clear_policy = CLEAR_AT_GC;
5004 } else if (!strcmp (opt, "clear-at-tlab-creation")) {
5005 nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
5006 } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
5007 nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
5008 } else if (!strcmp (opt, "check-scan-starts")) {
5009 do_scan_starts_check = TRUE;
5010 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5011 do_verify_nursery = TRUE;
5012 } else if (!strcmp (opt, "check-concurrent")) {
5013 if (!major_collector.is_concurrent) {
5014 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
5017 do_concurrent_checks = TRUE;
5018 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5019 do_dump_nursery_content = TRUE;
5020 } else if (!strcmp (opt, "no-managed-allocator")) {
5021 sgen_set_use_managed_allocator (FALSE);
5022 } else if (!strcmp (opt, "disable-minor")) {
5023 disable_minor_collections = TRUE;
5024 } else if (!strcmp (opt, "disable-major")) {
5025 disable_major_collections = TRUE;
5026 } else if (g_str_has_prefix (opt, "heap-dump=")) {
5027 char *filename = strchr (opt, '=') + 1;
5028 nursery_clear_policy = CLEAR_AT_GC;
5029 heap_dump_file = fopen (filename, "w");
5030 if (heap_dump_file) {
5031 fprintf (heap_dump_file, "<sgen-dump>\n");
5032 do_pin_stats = TRUE;
5034 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5035 char *filename = strchr (opt, '=') + 1;
5036 char *colon = strrchr (filename, ':');
5039 if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
5040 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
5045 binary_protocol_init (filename, (long long)limit);
5046 } else if (!strcmp (opt, "nursery-canaries")) {
5047 do_verify_nursery = TRUE;
5048 sgen_set_use_managed_allocator (FALSE);
5049 enable_nursery_canaries = TRUE;
5050 } else if (!sgen_bridge_handle_gc_debug (opt)) {
5051 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5056 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
5057 fprintf (stderr, "Valid <option>s are:\n");
5058 fprintf (stderr, " collect-before-allocs[=<n>]\n");
5059 fprintf (stderr, " verify-before-allocs[=<n>]\n");
5060 fprintf (stderr, " check-at-minor-collections\n");
5061 fprintf (stderr, " check-mark-bits\n");
5062 fprintf (stderr, " check-nursery-pinned\n");
5063 fprintf (stderr, " verify-before-collections\n");
5064 fprintf (stderr, " verify-nursery-at-minor-gc\n");
5065 fprintf (stderr, " dump-nursery-at-minor-gc\n");
5066 fprintf (stderr, " disable-minor\n");
5067 fprintf (stderr, " disable-major\n");
5068 fprintf (stderr, " xdomain-checks\n");
5069 fprintf (stderr, " check-concurrent\n");
5070 fprintf (stderr, " clear-[nursery-]at-gc\n");
5071 fprintf (stderr, " clear-at-tlab-creation\n");
5072 fprintf (stderr, " debug-clear-at-tlab-creation\n");
5073 fprintf (stderr, " check-scan-starts\n");
5074 fprintf (stderr, " no-managed-allocator\n");
5075 fprintf (stderr, " print-allowance\n");
5076 fprintf (stderr, " print-pinning\n");
5077 fprintf (stderr, " heap-dump=<filename>\n");
5078 fprintf (stderr, " binary-protocol=<filename>[:<file-size-limit>]\n");
5079 fprintf (stderr, " nursery-canaries\n");
5080 sgen_bridge_print_gc_debug_usage ();
5081 fprintf (stderr, "\n");
5083 usage_printed = TRUE;
5089 if (check_mark_bits_after_major_collection)
5090 nursery_clear_policy = CLEAR_AT_GC;
5092 if (major_collector.post_param_init)
5093 major_collector.post_param_init (&major_collector);
5095 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5097 memset (&remset, 0, sizeof (remset));
5099 sgen_card_table_init (&remset);
5105 mono_gc_get_gc_name (void)
5110 static MonoMethod *write_barrier_method;
5113 sgen_is_critical_method (MonoMethod *method)
5115 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5119 sgen_has_critical_method (void)
5121 return write_barrier_method || sgen_has_managed_allocator ();
5127 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5129 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5130 #ifdef SGEN_ALIGN_NURSERY
5131 // if (ptr_in_nursery (ptr)) return;
5133 * Masking out the bits might be faster, but we would have to use 64 bit
5134 * immediates, which might be slower.
5136 mono_mb_emit_ldarg (mb, 0);
5137 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5138 mono_mb_emit_byte (mb, CEE_SHR_UN);
5139 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5140 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5142 if (!major_collector.is_concurrent) {
5143 // if (!ptr_in_nursery (*ptr)) return;
5144 mono_mb_emit_ldarg (mb, 0);
5145 mono_mb_emit_byte (mb, CEE_LDIND_I);
5146 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5147 mono_mb_emit_byte (mb, CEE_SHR_UN);
5148 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5149 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5152 int label_continue1, label_continue2;
5153 int dereferenced_var;
5155 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5156 mono_mb_emit_ldarg (mb, 0);
5157 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5158 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5160 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5161 mono_mb_emit_ldarg (mb, 0);
5162 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5163 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5166 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5169 mono_mb_patch_branch (mb, label_continue_1);
5170 mono_mb_patch_branch (mb, label_continue_2);
5172 // Dereference and store in local var
5173 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5174 mono_mb_emit_ldarg (mb, 0);
5175 mono_mb_emit_byte (mb, CEE_LDIND_I);
5176 mono_mb_emit_stloc (mb, dereferenced_var);
5178 if (!major_collector.is_concurrent) {
5179 // if (*ptr < sgen_get_nursery_start ()) return;
5180 mono_mb_emit_ldloc (mb, dereferenced_var);
5181 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5182 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5184 // if (*ptr >= sgen_get_nursery_end ()) return;
5185 mono_mb_emit_ldloc (mb, dereferenced_var);
5186 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5187 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5194 mono_gc_get_write_barrier (void)
5197 MonoMethodBuilder *mb;
5198 MonoMethodSignature *sig;
5199 #ifdef MANAGED_WBARRIER
5200 int i, nursery_check_labels [3];
5202 #ifdef HAVE_KW_THREAD
5203 int stack_end_offset = -1;
5205 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5206 g_assert (stack_end_offset != -1);
5210 // FIXME: Maybe create a separate version for ctors (the branch would be
5211 // correctly predicted more times)
5212 if (write_barrier_method)
5213 return write_barrier_method;
5215 /* Create the IL version of mono_gc_barrier_generic_store () */
5216 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5217 sig->ret = &mono_defaults.void_class->byval_arg;
5218 sig->params [0] = &mono_defaults.int_class->byval_arg;
5220 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5223 #ifdef MANAGED_WBARRIER
5224 emit_nursery_check (mb, nursery_check_labels);
5226 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5230 LDC_PTR sgen_cardtable
5232 address >> CARD_BITS
5236 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5237 LDC_PTR card_table_mask
5244 mono_mb_emit_ptr (mb, sgen_cardtable);
5245 mono_mb_emit_ldarg (mb, 0);
5246 mono_mb_emit_icon (mb, CARD_BITS);
5247 mono_mb_emit_byte (mb, CEE_SHR_UN);
5248 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5249 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5250 mono_mb_emit_byte (mb, CEE_AND);
5252 mono_mb_emit_byte (mb, CEE_ADD);
5253 mono_mb_emit_icon (mb, 1);
5254 mono_mb_emit_byte (mb, CEE_STIND_I1);
5257 for (i = 0; i < 3; ++i) {
5258 if (nursery_check_labels [i])
5259 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5261 mono_mb_emit_byte (mb, CEE_RET);
5263 mono_mb_emit_ldarg (mb, 0);
5264 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5265 mono_mb_emit_byte (mb, CEE_RET);
5268 res = mono_mb_create_method (mb, sig, 16);
5272 if (write_barrier_method) {
5273 /* Already created */
5274 mono_free_method (res);
5276 /* double-checked locking */
5277 mono_memory_barrier ();
5278 write_barrier_method = res;
5282 return write_barrier_method;
5286 mono_gc_get_description (void)
5288 return g_strdup ("sgen");
5292 mono_gc_set_desktop_mode (void)
5297 mono_gc_is_moving (void)
5303 mono_gc_is_disabled (void)
5309 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5316 sgen_get_nursery_clear_policy (void)
5318 return nursery_clear_policy;
5322 sgen_get_array_fill_vtable (void)
5324 if (!array_fill_vtable) {
5325 static MonoClass klass;
5326 static char _vtable[sizeof(MonoVTable)+8];
5327 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
5330 MonoDomain *domain = mono_get_root_domain ();
5333 klass.element_class = mono_defaults.byte_class;
5335 klass.instance_size = sizeof (MonoArray);
5336 klass.sizes.element_size = 1;
5337 klass.name = "array_filler_type";
5339 vtable->klass = &klass;
5341 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5344 array_fill_vtable = vtable;
5346 return array_fill_vtable;
5356 sgen_gc_unlock (void)
5358 gboolean try_free = sgen_try_free_some_memory;
5359 sgen_try_free_some_memory = FALSE;
5360 mono_mutex_unlock (&gc_mutex);
5361 MONO_GC_UNLOCKED ();
5363 mono_thread_hazardous_try_free_some ();
5367 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5369 major_collector.iterate_live_block_ranges (callback);
5373 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5375 major_collector.scan_card_table (FALSE, queue);
5379 sgen_get_major_collector (void)
5381 return &major_collector;
5384 void mono_gc_set_skip_thread (gboolean skip)
5386 SgenThreadInfo *info = mono_thread_info_current ();
5389 info->gc_disabled = skip;
5394 sgen_get_remset (void)
5400 mono_gc_get_vtable_bits (MonoClass *class)
5403 /* FIXME move this to the bridge code */
5404 if (sgen_need_bridge_processing ()) {
5405 switch (sgen_bridge_class_kind (class)) {
5406 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
5407 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
5408 res = SGEN_GC_BIT_BRIDGE_OBJECT;
5410 case GC_BRIDGE_OPAQUE_CLASS:
5411 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
5415 if (fin_callbacks.is_class_finalization_aware) {
5416 if (fin_callbacks.is_class_finalization_aware (class))
5417 res |= SGEN_GC_BIT_FINALIZER_AWARE;
5423 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5430 sgen_check_whole_heap_stw (void)
5432 sgen_stop_world (0);
5433 sgen_clear_nursery_fragments ();
5434 sgen_check_whole_heap (FALSE);
5435 sgen_restart_world (0, NULL);
5439 sgen_gc_event_moves (void)
5441 if (moved_objects_idx) {
5442 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5443 moved_objects_idx = 0;
5448 sgen_timestamp (void)
5450 SGEN_TV_DECLARE (timestamp);
5451 SGEN_TV_GETTIME (timestamp);
5452 return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
5456 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
5458 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
5459 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
5461 fin_callbacks = *callbacks;
5468 #endif /* HAVE_SGEN_GC */