2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
193 #include "metadata/sgen-gc.h"
194 #include "metadata/metadata-internals.h"
195 #include "metadata/class-internals.h"
196 #include "metadata/gc-internal.h"
197 #include "metadata/object-internals.h"
198 #include "metadata/threads.h"
199 #include "metadata/sgen-cardtable.h"
200 #include "metadata/sgen-protocol.h"
201 #include "metadata/sgen-archdep.h"
202 #include "metadata/sgen-bridge.h"
203 #include "metadata/sgen-memory-governor.h"
204 #include "metadata/sgen-hash-table.h"
205 #include "metadata/mono-gc.h"
206 #include "metadata/method-builder.h"
207 #include "metadata/profiler-private.h"
208 #include "metadata/monitor.h"
209 #include "metadata/mempool-internals.h"
210 #include "metadata/marshal.h"
211 #include "metadata/runtime.h"
212 #include "metadata/sgen-cardtable.h"
213 #include "metadata/sgen-pinning.h"
214 #include "metadata/sgen-workers.h"
215 #include "metadata/sgen-layout-stats.h"
216 #include "utils/mono-mmap.h"
217 #include "utils/mono-time.h"
218 #include "utils/mono-semaphore.h"
219 #include "utils/mono-counters.h"
220 #include "utils/mono-proclib.h"
221 #include "utils/mono-memory-model.h"
222 #include "utils/mono-logger-internal.h"
223 #include "utils/dtrace.h"
225 #include <mono/utils/mono-logger-internal.h>
226 #include <mono/utils/memcheck.h>
228 #if defined(__MACH__)
229 #include "utils/mach-support.h"
232 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
236 #include "mono/cil/opcode.def"
242 #undef pthread_create
244 #undef pthread_detach
247 * ######################################################################
248 * ######## Types and constants used by the GC.
249 * ######################################################################
252 /* 0 means not initialized, 1 is initialized, -1 means in progress */
253 static int gc_initialized = 0;
254 /* If set, check if we need to do something every X allocations */
255 gboolean has_per_allocation_action;
256 /* If set, do a heap check every X allocation */
257 guint32 verify_before_allocs = 0;
258 /* If set, do a minor collection before every X allocation */
259 guint32 collect_before_allocs = 0;
260 /* If set, do a whole heap check before each collection */
261 static gboolean whole_heap_check_before_collection = FALSE;
262 /* If set, do a heap consistency check before each minor collection */
263 static gboolean consistency_check_at_minor_collection = FALSE;
264 /* If set, do a mod union consistency check before each finishing collection pause */
265 static gboolean mod_union_consistency_check = FALSE;
266 /* If set, check whether mark bits are consistent after major collections */
267 static gboolean check_mark_bits_after_major_collection = FALSE;
268 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
269 static gboolean check_nursery_objects_pinned = FALSE;
270 /* If set, do a few checks when the concurrent collector is used */
271 static gboolean do_concurrent_checks = FALSE;
272 /* If set, check that there are no references to the domain left at domain unload */
273 static gboolean xdomain_checks = FALSE;
274 /* If not null, dump the heap after each collection into this file */
275 static FILE *heap_dump_file = NULL;
276 /* If set, mark stacks conservatively, even if precise marking is possible */
277 static gboolean conservative_stack_mark = FALSE;
278 /* If set, do a plausibility check on the scan_starts before and after
280 static gboolean do_scan_starts_check = FALSE;
283 * If the major collector is concurrent and this is FALSE, we will
284 * never initiate a synchronous major collection, unless requested via
287 static gboolean allow_synchronous_major = TRUE;
288 static gboolean disable_minor_collections = FALSE;
289 static gboolean disable_major_collections = FALSE;
290 gboolean do_pin_stats = FALSE;
291 static gboolean do_verify_nursery = FALSE;
292 static gboolean do_dump_nursery_content = FALSE;
293 static gboolean enable_nursery_canaries = FALSE;
295 #ifdef HEAVY_STATISTICS
296 guint64 stat_objects_alloced_degraded = 0;
297 guint64 stat_bytes_alloced_degraded = 0;
299 guint64 stat_copy_object_called_nursery = 0;
300 guint64 stat_objects_copied_nursery = 0;
301 guint64 stat_copy_object_called_major = 0;
302 guint64 stat_objects_copied_major = 0;
304 guint64 stat_scan_object_called_nursery = 0;
305 guint64 stat_scan_object_called_major = 0;
307 guint64 stat_slots_allocated_in_vain;
309 guint64 stat_nursery_copy_object_failed_from_space = 0;
310 guint64 stat_nursery_copy_object_failed_forwarded = 0;
311 guint64 stat_nursery_copy_object_failed_pinned = 0;
312 guint64 stat_nursery_copy_object_failed_to_space = 0;
314 static int stat_wbarrier_add_to_global_remset = 0;
315 static int stat_wbarrier_set_field = 0;
316 static int stat_wbarrier_set_arrayref = 0;
317 static int stat_wbarrier_arrayref_copy = 0;
318 static int stat_wbarrier_generic_store = 0;
319 static int stat_wbarrier_generic_store_atomic = 0;
320 static int stat_wbarrier_set_root = 0;
321 static int stat_wbarrier_value_copy = 0;
322 static int stat_wbarrier_object_copy = 0;
325 static guint64 stat_pinned_objects = 0;
327 static guint64 time_minor_pre_collection_fragment_clear = 0;
328 static guint64 time_minor_pinning = 0;
329 static guint64 time_minor_scan_remsets = 0;
330 static guint64 time_minor_scan_pinned = 0;
331 static guint64 time_minor_scan_roots = 0;
332 static guint64 time_minor_finish_gray_stack = 0;
333 static guint64 time_minor_fragment_creation = 0;
335 static guint64 time_major_pre_collection_fragment_clear = 0;
336 static guint64 time_major_pinning = 0;
337 static guint64 time_major_scan_pinned = 0;
338 static guint64 time_major_scan_roots = 0;
339 static guint64 time_major_scan_mod_union = 0;
340 static guint64 time_major_finish_gray_stack = 0;
341 static guint64 time_major_free_bigobjs = 0;
342 static guint64 time_major_los_sweep = 0;
343 static guint64 time_major_sweep = 0;
344 static guint64 time_major_fragment_creation = 0;
346 static guint64 time_max = 0;
348 static SGEN_TV_DECLARE (time_major_conc_collection_start);
349 static SGEN_TV_DECLARE (time_major_conc_collection_end);
351 static SGEN_TV_DECLARE (last_minor_collection_start_tv);
352 static SGEN_TV_DECLARE (last_minor_collection_end_tv);
354 int gc_debug_level = 0;
357 static MonoGCFinalizerCallbacks fin_callbacks;
361 mono_gc_flush_info (void)
363 fflush (gc_debug_file);
367 #define TV_DECLARE SGEN_TV_DECLARE
368 #define TV_GETTIME SGEN_TV_GETTIME
369 #define TV_ELAPSED SGEN_TV_ELAPSED
371 SGEN_TV_DECLARE (sgen_init_timestamp);
373 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
375 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
377 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
378 #define object_is_pinned SGEN_OBJECT_IS_PINNED
379 #define pin_object SGEN_PIN_OBJECT
381 #define ptr_in_nursery sgen_ptr_in_nursery
383 #define LOAD_VTABLE SGEN_LOAD_VTABLE
386 safe_name (void* obj)
388 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
389 return vt->klass->name;
393 nursery_canaries_enabled (void)
395 return enable_nursery_canaries;
398 #define safe_object_get_size sgen_safe_object_get_size
401 sgen_safe_name (void* obj)
403 return safe_name (obj);
407 * ######################################################################
408 * ######## Global data.
409 * ######################################################################
411 LOCK_DECLARE (gc_mutex);
412 gboolean sgen_try_free_some_memory;
414 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
416 static mword pagesize = 4096;
417 size_t degraded_mode = 0;
419 static mword bytes_pinned_from_failed_allocation = 0;
421 GCMemSection *nursery_section = NULL;
422 static volatile mword lowest_heap_address = ~(mword)0;
423 static volatile mword highest_heap_address = 0;
425 LOCK_DECLARE (sgen_interruption_mutex);
427 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
428 struct _FinalizeReadyEntry {
429 FinalizeReadyEntry *next;
433 typedef struct _EphemeronLinkNode EphemeronLinkNode;
435 struct _EphemeronLinkNode {
436 EphemeronLinkNode *next;
445 int current_collection_generation = -1;
446 volatile gboolean concurrent_collection_in_progress = FALSE;
448 /* objects that are ready to be finalized */
449 static FinalizeReadyEntry *fin_ready_list = NULL;
450 static FinalizeReadyEntry *critical_fin_list = NULL;
452 static EphemeronLinkNode *ephemeron_list;
454 /* registered roots: the key to the hash is the root start address */
456 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
458 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
459 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
460 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
461 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
463 static mword roots_size = 0; /* amount of memory in the root set */
465 #define GC_ROOT_NUM 32
467 int count; /* must be the first field */
468 void *objects [GC_ROOT_NUM];
469 int root_types [GC_ROOT_NUM];
470 uintptr_t extra_info [GC_ROOT_NUM];
474 notify_gc_roots (GCRootReport *report)
478 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
483 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
485 if (report->count == GC_ROOT_NUM)
486 notify_gc_roots (report);
487 report->objects [report->count] = object;
488 report->root_types [report->count] = rtype;
489 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
492 MonoNativeTlsKey thread_info_key;
494 #ifdef HAVE_KW_THREAD
495 __thread SgenThreadInfo *sgen_thread_info;
496 __thread char *stack_end;
499 /* The size of a TLAB */
500 /* The bigger the value, the less often we have to go to the slow path to allocate a new
501 * one, but the more space is wasted by threads not allocating much memory.
503 * FIXME: Make this self-tuning for each thread.
505 guint32 tlab_size = (1024 * 4);
507 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
509 /* Functions supplied by the runtime to be called by the GC */
510 static MonoGCCallbacks gc_callbacks;
512 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
514 #define ALIGN_UP SGEN_ALIGN_UP
516 #define MOVED_OBJECTS_NUM 64
517 static void *moved_objects [MOVED_OBJECTS_NUM];
518 static int moved_objects_idx = 0;
520 /* Vtable of the objects used to fill out nursery fragments before a collection */
521 static MonoVTable *array_fill_vtable;
523 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
524 MonoNativeThreadId main_gc_thread = NULL;
527 /*Object was pinned during the current collection*/
528 static mword objects_pinned;
531 * ######################################################################
532 * ######## Macros and function declarations.
533 * ######################################################################
536 typedef SgenGrayQueue GrayQueue;
538 /* forward declarations */
539 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
540 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
541 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
542 static void report_finalizer_roots (void);
543 static void report_registered_roots (void);
545 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
546 static void finish_gray_stack (int generation, GrayQueue *queue);
548 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
551 static void init_stats (void);
553 static int mark_ephemerons_in_range (ScanCopyContext ctx);
554 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
555 static void null_ephemerons_for_domain (MonoDomain *domain);
557 SgenObjectOperations current_object_ops;
558 SgenMajorCollector major_collector;
559 SgenMinorCollector sgen_minor_collector;
560 static GrayQueue gray_queue;
562 static SgenRememberedSet remset;
564 /* The gray queue to use from the main collection thread. */
565 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
568 * The gray queue a worker job must use. If we're not parallel or
569 * concurrent, we use the main gray queue.
571 static SgenGrayQueue*
572 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
574 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
578 gray_queue_redirect (SgenGrayQueue *queue)
580 gboolean wake = FALSE;
583 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
586 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
591 g_assert (concurrent_collection_in_progress);
592 sgen_workers_ensure_awake ();
597 gray_queue_enable_redirect (SgenGrayQueue *queue)
599 if (!concurrent_collection_in_progress)
602 sgen_gray_queue_set_alloc_prepare (queue, gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
603 gray_queue_redirect (queue);
607 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
609 while (start < end) {
613 if (!*(void**)start) {
614 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
619 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
625 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable) {
626 CHECK_CANARY_FOR_OBJECT (obj);
627 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
628 callback (obj, size, data);
629 CANARIFY_SIZE (size);
631 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
639 need_remove_object_for_domain (char *start, MonoDomain *domain)
641 if (mono_object_domain (start) == domain) {
642 SGEN_LOG (4, "Need to cleanup object %p", start);
643 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
650 process_object_for_domain_clearing (char *start, MonoDomain *domain)
652 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
653 if (vt->klass == mono_defaults.internal_thread_class)
654 g_assert (mono_object_domain (start) == mono_get_root_domain ());
655 /* The object could be a proxy for an object in the domain
657 #ifndef DISABLE_REMOTING
658 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
659 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
661 /* The server could already have been zeroed out, so
662 we need to check for that, too. */
663 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
664 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
665 ((MonoRealProxy*)start)->unwrapped_server = NULL;
672 clear_domain_process_object (char *obj, MonoDomain *domain)
676 process_object_for_domain_clearing (obj, domain);
677 remove = need_remove_object_for_domain (obj, domain);
679 if (remove && ((MonoObject*)obj)->synchronisation) {
680 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
682 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
689 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
691 if (clear_domain_process_object (obj, domain)) {
692 CANARIFY_SIZE (size);
693 memset (obj, 0, size);
698 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
700 clear_domain_process_object (obj, domain);
704 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
706 if (need_remove_object_for_domain (obj, domain))
707 major_collector.free_non_pinned_object (obj, size);
711 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
713 if (need_remove_object_for_domain (obj, domain))
714 major_collector.free_pinned_object (obj, size);
718 * When appdomains are unloaded we can easily remove objects that have finalizers,
719 * but all the others could still be present in random places on the heap.
720 * We need a sweep to get rid of them even though it's going to be costly
722 * The reason we need to remove them is because we access the vtable and class
723 * structures to know the object size and the reference bitmap: once the domain is
724 * unloaded the point to random memory.
727 mono_gc_clear_domain (MonoDomain * domain)
729 LOSObject *bigobj, *prev;
734 binary_protocol_domain_unload_begin (domain);
738 if (concurrent_collection_in_progress)
739 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
740 g_assert (!concurrent_collection_in_progress);
742 major_collector.finish_sweeping ();
744 sgen_process_fin_stage_entries ();
745 sgen_process_dislink_stage_entries ();
747 sgen_clear_nursery_fragments ();
749 if (xdomain_checks && domain != mono_get_root_domain ()) {
750 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
751 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
752 sgen_check_for_xdomain_refs ();
755 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
756 to memory returned to the OS.*/
757 null_ephemerons_for_domain (domain);
759 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
760 sgen_null_links_for_domain (domain, i);
762 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
763 sgen_remove_finalizers_for_domain (domain, i);
765 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
766 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
768 /* We need two passes over major and large objects because
769 freeing such objects might give their memory back to the OS
770 (in the case of large objects) or obliterate its vtable
771 (pinned objects with major-copying or pinned and non-pinned
772 objects with major-mark&sweep), but we might need to
773 dereference a pointer from an object to another object if
774 the first object is a proxy. */
775 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
776 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
777 clear_domain_process_object (bigobj->data, domain);
780 for (bigobj = los_object_list; bigobj;) {
781 if (need_remove_object_for_domain (bigobj->data, domain)) {
782 LOSObject *to_free = bigobj;
784 prev->next = bigobj->next;
786 los_object_list = bigobj->next;
787 bigobj = bigobj->next;
788 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
789 sgen_los_free_object (to_free);
793 bigobj = bigobj->next;
795 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
796 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
798 if (domain == mono_get_root_domain ()) {
799 if (G_UNLIKELY (do_pin_stats))
800 sgen_pin_stats_print_class_stats ();
801 sgen_object_layout_dump (stdout);
804 sgen_restart_world (0, NULL);
806 binary_protocol_domain_unload_end (domain);
807 binary_protocol_flush_buffers (FALSE);
813 * sgen_add_to_global_remset:
815 * The global remset contains locations which point into newspace after
816 * a minor collection. This can happen if the objects they point to are pinned.
818 * LOCKING: If called from a parallel collector, the global remset
819 * lock must be held. For serial collectors that is not necessary.
822 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
824 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
826 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
828 if (!major_collector.is_concurrent) {
829 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
831 if (current_collection_generation == -1)
832 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
835 if (!object_is_pinned (obj))
836 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
837 else if (sgen_cement_lookup_or_register (obj))
840 remset.record_pointer (ptr);
842 if (G_UNLIKELY (do_pin_stats))
843 sgen_pin_stats_register_global_remset (obj);
845 SGEN_LOG (8, "Adding global remset for %p", ptr);
846 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
850 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
851 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
852 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
853 vt->klass->name_space, vt->klass->name);
859 * sgen_drain_gray_stack:
861 * Scan objects in the gray stack until the stack is empty. This should be called
862 * frequently after each object is copied, to achieve better locality and cache
865 * max_objs is the maximum number of objects to scan, or -1 to scan until the stack is
869 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
871 ScanObjectFunc scan_func = ctx.scan_func;
872 GrayQueue *queue = ctx.queue;
874 if (current_collection_generation == GENERATION_OLD && major_collector.drain_gray_stack)
875 return major_collector.drain_gray_stack (ctx);
879 for (i = 0; i != max_objs; ++i) {
882 GRAY_OBJECT_DEQUEUE (queue, &obj, &desc);
885 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
886 scan_func (obj, desc, queue);
888 } while (max_objs < 0);
893 * Addresses in the pin queue are already sorted. This function finds
894 * the object header for each address and pins the object. The
895 * addresses must be inside the nursery section. The (start of the)
896 * address array is overwritten with the addresses of the actually
897 * pinned objects. Return the number of pinned objects.
900 pin_objects_from_nursery_pin_queue (ScanCopyContext ctx)
902 GCMemSection *section = nursery_section;
903 void **start = sgen_pinning_get_entry (section->pin_queue_first_entry);
904 void **end = sgen_pinning_get_entry (section->pin_queue_last_entry);
905 void *start_nursery = section->data;
906 void *end_nursery = section->next_data;
911 void *pinning_front = start_nursery;
913 void **definitely_pinned = start;
914 ScanObjectFunc scan_func = ctx.scan_func;
915 SgenGrayQueue *queue = ctx.queue;
917 sgen_nursery_allocator_prepare_for_pinning ();
919 while (start < end) {
920 void *obj_to_pin = NULL;
921 size_t obj_to_pin_size = 0;
926 SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
927 SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
934 SGEN_LOG (5, "Considering pinning addr %p", addr);
935 /* We've already processed everything up to pinning_front. */
936 if (addr < pinning_front) {
942 * Find the closest scan start <= addr. We might search backward in the
943 * scan_starts array because entries might be NULL. In the worst case we
944 * start at start_nursery.
946 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
947 SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
948 search_start = (void*)section->scan_starts [idx];
949 if (!search_start || search_start > addr) {
952 search_start = section->scan_starts [idx];
953 if (search_start && search_start <= addr)
956 if (!search_start || search_start > addr)
957 search_start = start_nursery;
961 * If the pinning front is closer than the scan start we found, start
962 * searching at the front.
964 if (search_start < pinning_front)
965 search_start = pinning_front;
968 * Now addr should be in an object a short distance from search_start.
970 * search_start must point to zeroed mem or point to an object.
973 size_t obj_size, canarified_obj_size;
976 if (!*(void**)search_start) {
977 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
978 /* The loop condition makes sure we don't overrun addr. */
982 canarified_obj_size = obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
985 * Filler arrays are marked by an invalid sync word. We don't
986 * consider them for pinning. They are not delimited by canaries,
989 if (((MonoObject*)search_start)->synchronisation != GINT_TO_POINTER (-1)) {
990 CHECK_CANARY_FOR_OBJECT (search_start);
991 CANARIFY_SIZE (canarified_obj_size);
993 if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
994 /* This is the object we're looking for. */
995 obj_to_pin = search_start;
996 obj_to_pin_size = canarified_obj_size;
1001 /* Skip to the next object */
1002 search_start = (void*)((char*)search_start + canarified_obj_size);
1003 } while (search_start <= addr);
1005 /* We've searched past the address we were looking for. */
1007 pinning_front = search_start;
1008 goto next_pin_queue_entry;
1012 * We've found an object to pin. It might still be a dummy array, but we
1013 * can advance the pinning front in any case.
1015 pinning_front = (char*)obj_to_pin + obj_to_pin_size;
1018 * If this is a dummy array marking the beginning of a nursery
1019 * fragment, we don't pin it.
1021 if (((MonoObject*)obj_to_pin)->synchronisation == GINT_TO_POINTER (-1))
1022 goto next_pin_queue_entry;
1025 * Finally - pin the object!
1027 desc = sgen_obj_get_descriptor_safe (obj_to_pin);
1029 scan_func (obj_to_pin, desc, queue);
1031 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1032 obj_to_pin, *(void**)obj_to_pin, safe_name (obj_to_pin), count);
1033 binary_protocol_pin (obj_to_pin,
1034 (gpointer)LOAD_VTABLE (obj_to_pin),
1035 safe_object_get_size (obj_to_pin));
1037 #ifdef ENABLE_DTRACE
1038 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1039 int gen = sgen_ptr_in_nursery (obj_to_pin) ? GENERATION_NURSERY : GENERATION_OLD;
1040 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj_to_pin);
1041 MONO_GC_OBJ_PINNED ((mword)obj_to_pin,
1042 sgen_safe_object_get_size (obj_to_pin),
1043 vt->klass->name_space, vt->klass->name, gen);
1047 pin_object (obj_to_pin);
1048 GRAY_OBJECT_ENQUEUE (queue, obj_to_pin, desc);
1049 if (G_UNLIKELY (do_pin_stats))
1050 sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
1051 definitely_pinned [count] = obj_to_pin;
1055 next_pin_queue_entry:
1059 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1060 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1061 GCRootReport report;
1063 for (idx = 0; idx < count; ++idx)
1064 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1065 notify_gc_roots (&report);
1067 stat_pinned_objects += count;
1072 pin_objects_in_nursery (ScanCopyContext ctx)
1076 if (nursery_section->pin_queue_first_entry == nursery_section->pin_queue_last_entry)
1079 reduced_to = pin_objects_from_nursery_pin_queue (ctx);
1080 nursery_section->pin_queue_last_entry = nursery_section->pin_queue_first_entry + reduced_to;
1084 * This function is only ever called (via `collector_pin_object()` in `sgen-copy-object.h`)
1085 * when we can't promote an object because we're out of memory.
1088 sgen_pin_object (void *object, GrayQueue *queue)
1091 * All pinned objects are assumed to have been staged, so we need to stage as well.
1092 * Also, the count of staged objects shows that "late pinning" happened.
1094 sgen_pin_stage_ptr (object);
1096 SGEN_PIN_OBJECT (object);
1097 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1100 if (G_UNLIKELY (do_pin_stats))
1101 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1103 GRAY_OBJECT_ENQUEUE (queue, object, sgen_obj_get_descriptor_safe (object));
1105 #ifdef ENABLE_DTRACE
1106 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1107 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1108 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1109 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1114 /* Sort the addresses in array in increasing order.
1115 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1118 sgen_sort_addresses (void **array, size_t size)
1123 for (i = 1; i < size; ++i) {
1126 size_t parent = (child - 1) / 2;
1128 if (array [parent] >= array [child])
1131 tmp = array [parent];
1132 array [parent] = array [child];
1133 array [child] = tmp;
1139 for (i = size - 1; i > 0; --i) {
1142 array [i] = array [0];
1148 while (root * 2 + 1 <= end) {
1149 size_t child = root * 2 + 1;
1151 if (child < end && array [child] < array [child + 1])
1153 if (array [root] >= array [child])
1157 array [root] = array [child];
1158 array [child] = tmp;
1166 * Scan the memory between start and end and queue values which could be pointers
1167 * to the area between start_nursery and end_nursery for later consideration.
1168 * Typically used for thread stacks.
1171 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1175 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1176 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1179 while (start < end) {
1180 if (*start >= start_nursery && *start < end_nursery) {
1182 * *start can point to the middle of an object
1183 * note: should we handle pointing at the end of an object?
1184 * pinning in C# code disallows pointing at the end of an object
1185 * but there is some small chance that an optimizing C compiler
1186 * may keep the only reference to an object by pointing
1187 * at the end of it. We ignore this small chance for now.
1188 * Pointers to the end of an object are indistinguishable
1189 * from pointers to the start of the next object in memory
1190 * so if we allow that we'd need to pin two objects...
1191 * We queue the pointer in an array, the
1192 * array will then be sorted and uniqued. This way
1193 * we can coalesce several pinning pointers and it should
1194 * be faster since we'd do a memory scan with increasing
1195 * addresses. Note: we can align the address to the allocation
1196 * alignment, so the unique process is more effective.
1198 mword addr = (mword)*start;
1199 addr &= ~(ALLOC_ALIGN - 1);
1200 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1201 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1202 sgen_pin_stage_ptr ((void*)addr);
1203 binary_protocol_pin_stage (start, (void*)addr);
1206 if (G_UNLIKELY (do_pin_stats)) {
1207 if (ptr_in_nursery ((void*)addr))
1208 sgen_pin_stats_register_address ((char*)addr, pin_type);
1214 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1218 * The first thing we do in a collection is to identify pinned objects.
1219 * This function considers all the areas of memory that need to be
1220 * conservatively scanned.
1223 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1227 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1228 /* objects pinned from the API are inside these roots */
1229 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1230 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1231 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1232 } SGEN_HASH_TABLE_FOREACH_END;
1233 /* now deal with the thread stacks
1234 * in the future we should be able to conservatively scan only:
1235 * *) the cpu registers
1236 * *) the unmanaged stack frames
1237 * *) the _last_ managed stack frame
1238 * *) pointers slots in managed frames
1240 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1244 unpin_objects_from_queue (SgenGrayQueue *queue)
1249 GRAY_OBJECT_DEQUEUE (queue, &addr, &desc);
1252 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1253 SGEN_UNPIN_OBJECT (addr);
1258 CopyOrMarkObjectFunc func;
1260 } UserCopyOrMarkData;
1263 single_arg_user_copy_or_mark (void **obj, void *gc_data)
1265 UserCopyOrMarkData *data = gc_data;
1267 data->func (obj, data->queue);
1271 * The memory area from start_root to end_root contains pointers to objects.
1272 * Their position is precisely described by @desc (this means that the pointer
1273 * can be either NULL or the pointer to the start of an object).
1274 * This functions copies them to to_space updates them.
1276 * This function is not thread-safe!
1279 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1281 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1282 SgenGrayQueue *queue = ctx.queue;
1284 switch (desc & ROOT_DESC_TYPE_MASK) {
1285 case ROOT_DESC_BITMAP:
1286 desc >>= ROOT_DESC_TYPE_SHIFT;
1288 if ((desc & 1) && *start_root) {
1289 copy_func (start_root, queue);
1290 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1296 case ROOT_DESC_COMPLEX: {
1297 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1298 gsize bwords = (*bitmap_data) - 1;
1299 void **start_run = start_root;
1301 while (bwords-- > 0) {
1302 gsize bmap = *bitmap_data++;
1303 void **objptr = start_run;
1305 if ((bmap & 1) && *objptr) {
1306 copy_func (objptr, queue);
1307 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1312 start_run += GC_BITS_PER_WORD;
1316 case ROOT_DESC_USER: {
1317 UserCopyOrMarkData data = { copy_func, queue };
1318 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1319 marker (start_root, single_arg_user_copy_or_mark, &data);
1322 case ROOT_DESC_RUN_LEN:
1323 g_assert_not_reached ();
1325 g_assert_not_reached ();
1330 reset_heap_boundaries (void)
1332 lowest_heap_address = ~(mword)0;
1333 highest_heap_address = 0;
1337 sgen_update_heap_boundaries (mword low, mword high)
1342 old = lowest_heap_address;
1345 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1348 old = highest_heap_address;
1351 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1355 * Allocate and setup the data structures needed to be able to allocate objects
1356 * in the nursery. The nursery is stored in nursery_section.
1359 alloc_nursery (void)
1361 GCMemSection *section;
1366 if (nursery_section)
1368 SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
1369 /* later we will alloc a larger area for the nursery but only activate
1370 * what we need. The rest will be used as expansion if we have too many pinned
1371 * objects in the existing nursery.
1373 /* FIXME: handle OOM */
1374 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1376 alloc_size = sgen_nursery_size;
1378 /* If there isn't enough space even for the nursery we should simply abort. */
1379 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1381 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1382 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1383 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1384 section->data = section->next_data = data;
1385 section->size = alloc_size;
1386 section->end_data = data + sgen_nursery_size;
1387 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1388 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1389 section->num_scan_start = scan_starts;
1391 nursery_section = section;
1393 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1397 mono_gc_get_nursery (int *shift_bits, size_t *size)
1399 *size = sgen_nursery_size;
1400 *shift_bits = DEFAULT_NURSERY_BITS;
1401 return sgen_get_nursery_start ();
1405 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1407 SgenThreadInfo *info = mono_thread_info_current ();
1409 /* Could be called from sgen_thread_unregister () with a NULL info */
1412 info->stopped_domain = domain;
1417 mono_gc_precise_stack_mark_enabled (void)
1419 return !conservative_stack_mark;
1423 mono_gc_get_logfile (void)
1425 return gc_debug_file;
1429 report_finalizer_roots_list (FinalizeReadyEntry *list)
1431 GCRootReport report;
1432 FinalizeReadyEntry *fin;
1435 for (fin = list; fin; fin = fin->next) {
1438 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1440 notify_gc_roots (&report);
1444 report_finalizer_roots (void)
1446 report_finalizer_roots_list (fin_ready_list);
1447 report_finalizer_roots_list (critical_fin_list);
1450 static GCRootReport *root_report;
1453 single_arg_report_root (void **obj, void *gc_data)
1456 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1460 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1462 switch (desc & ROOT_DESC_TYPE_MASK) {
1463 case ROOT_DESC_BITMAP:
1464 desc >>= ROOT_DESC_TYPE_SHIFT;
1466 if ((desc & 1) && *start_root) {
1467 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1473 case ROOT_DESC_COMPLEX: {
1474 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1475 gsize bwords = (*bitmap_data) - 1;
1476 void **start_run = start_root;
1478 while (bwords-- > 0) {
1479 gsize bmap = *bitmap_data++;
1480 void **objptr = start_run;
1482 if ((bmap & 1) && *objptr) {
1483 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1488 start_run += GC_BITS_PER_WORD;
1492 case ROOT_DESC_USER: {
1493 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1494 root_report = report;
1495 marker (start_root, single_arg_report_root, NULL);
1498 case ROOT_DESC_RUN_LEN:
1499 g_assert_not_reached ();
1501 g_assert_not_reached ();
1506 report_registered_roots_by_type (int root_type)
1508 GCRootReport report;
1512 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1513 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1514 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1515 } SGEN_HASH_TABLE_FOREACH_END;
1516 notify_gc_roots (&report);
1520 report_registered_roots (void)
1522 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1523 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1527 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1529 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1530 SgenGrayQueue *queue = ctx.queue;
1531 FinalizeReadyEntry *fin;
1533 for (fin = list; fin; fin = fin->next) {
1536 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1537 copy_func (&fin->object, queue);
1542 generation_name (int generation)
1544 switch (generation) {
1545 case GENERATION_NURSERY: return "nursery";
1546 case GENERATION_OLD: return "old";
1547 default: g_assert_not_reached ();
1552 sgen_generation_name (int generation)
1554 return generation_name (generation);
1557 SgenObjectOperations *
1558 sgen_get_current_object_ops (void){
1559 return ¤t_object_ops;
1564 finish_gray_stack (int generation, GrayQueue *queue)
1568 int done_with_ephemerons, ephemeron_rounds = 0;
1569 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1570 ScanObjectFunc scan_func = current_object_ops.scan_object;
1571 ScanCopyContext ctx = { scan_func, copy_func, queue };
1572 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1573 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1576 * We copied all the reachable objects. Now it's the time to copy
1577 * the objects that were not referenced by the roots, but by the copied objects.
1578 * we built a stack of objects pointed to by gray_start: they are
1579 * additional roots and we may add more items as we go.
1580 * We loop until gray_start == gray_objects which means no more objects have
1581 * been added. Note this is iterative: no recursion is involved.
1582 * We need to walk the LO list as well in search of marked big objects
1583 * (use a flag since this is needed only on major collections). We need to loop
1584 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1585 * To achieve better cache locality and cache usage, we drain the gray stack
1586 * frequently, after each object is copied, and just finish the work here.
1588 sgen_drain_gray_stack (-1, ctx);
1590 SGEN_LOG (2, "%s generation done", generation_name (generation));
1593 Reset bridge data, we might have lingering data from a previous collection if this is a major
1594 collection trigged by minor overflow.
1596 We must reset the gathered bridges since their original block might be evacuated due to major
1597 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1599 if (sgen_need_bridge_processing ())
1600 sgen_bridge_reset_data ();
1603 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1604 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1605 * objects that are in fact reachable.
1607 done_with_ephemerons = 0;
1609 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1610 sgen_drain_gray_stack (-1, ctx);
1612 } while (!done_with_ephemerons);
1614 sgen_mark_togglerefs (start_addr, end_addr, ctx);
1616 if (sgen_need_bridge_processing ()) {
1617 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1618 sgen_drain_gray_stack (-1, ctx);
1619 sgen_collect_bridge_objects (generation, ctx);
1620 if (generation == GENERATION_OLD)
1621 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1624 Do the first bridge step here, as the collector liveness state will become useless after that.
1626 An important optimization is to only proccess the possibly dead part of the object graph and skip
1627 over all live objects as we transitively know everything they point must be alive too.
1629 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1631 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1632 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1635 sgen_bridge_processing_stw_step ();
1639 Make sure we drain the gray stack before processing disappearing links and finalizers.
1640 If we don't make sure it is empty we might wrongly see a live object as dead.
1642 sgen_drain_gray_stack (-1, ctx);
1645 We must clear weak links that don't track resurrection before processing object ready for
1646 finalization so they can be cleared before that.
1648 sgen_null_link_in_range (generation, TRUE, ctx);
1649 if (generation == GENERATION_OLD)
1650 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1653 /* walk the finalization queue and move also the objects that need to be
1654 * finalized: use the finalized objects as new roots so the objects they depend
1655 * on are also not reclaimed. As with the roots above, only objects in the nursery
1656 * are marked/copied.
1658 sgen_finalize_in_range (generation, ctx);
1659 if (generation == GENERATION_OLD)
1660 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1661 /* drain the new stack that might have been created */
1662 SGEN_LOG (6, "Precise scan of gray area post fin");
1663 sgen_drain_gray_stack (-1, ctx);
1666 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1668 done_with_ephemerons = 0;
1670 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1671 sgen_drain_gray_stack (-1, ctx);
1673 } while (!done_with_ephemerons);
1676 * Clear ephemeron pairs with unreachable keys.
1677 * We pass the copy func so we can figure out if an array was promoted or not.
1679 clear_unreachable_ephemerons (ctx);
1682 * We clear togglerefs only after all possible chances of revival are done.
1683 * This is semantically more inline with what users expect and it allows for
1684 * user finalizers to correctly interact with TR objects.
1686 sgen_clear_togglerefs (start_addr, end_addr, ctx);
1689 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1692 * handle disappearing links
1693 * Note we do this after checking the finalization queue because if an object
1694 * survives (at least long enough to be finalized) we don't clear the link.
1695 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1696 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1699 g_assert (sgen_gray_object_queue_is_empty (queue));
1701 sgen_null_link_in_range (generation, FALSE, ctx);
1702 if (generation == GENERATION_OLD)
1703 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1704 if (sgen_gray_object_queue_is_empty (queue))
1706 sgen_drain_gray_stack (-1, ctx);
1709 g_assert (sgen_gray_object_queue_is_empty (queue));
1711 sgen_gray_object_queue_trim_free_list (queue);
1715 sgen_check_section_scan_starts (GCMemSection *section)
1718 for (i = 0; i < section->num_scan_start; ++i) {
1719 if (section->scan_starts [i]) {
1720 mword size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1721 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1727 check_scan_starts (void)
1729 if (!do_scan_starts_check)
1731 sgen_check_section_scan_starts (nursery_section);
1732 major_collector.check_scan_starts ();
1736 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
1740 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1741 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1742 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
1743 } SGEN_HASH_TABLE_FOREACH_END;
1747 sgen_dump_occupied (char *start, char *end, char *section_start)
1749 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1753 sgen_dump_section (GCMemSection *section, const char *type)
1755 char *start = section->data;
1756 char *end = section->data + section->size;
1757 char *occ_start = NULL;
1759 char *old_start G_GNUC_UNUSED = NULL; /* just for debugging */
1761 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1763 while (start < end) {
1765 MonoClass *class G_GNUC_UNUSED;
1767 if (!*(void**)start) {
1769 sgen_dump_occupied (occ_start, start, section->data);
1772 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1775 g_assert (start < section->next_data);
1780 vt = (GCVTable*)LOAD_VTABLE (start);
1783 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1786 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1787 start - section->data,
1788 vt->klass->name_space, vt->klass->name,
1796 sgen_dump_occupied (occ_start, start, section->data);
1798 fprintf (heap_dump_file, "</section>\n");
1802 dump_object (MonoObject *obj, gboolean dump_location)
1804 static char class_name [1024];
1806 MonoClass *class = mono_object_class (obj);
1810 * Python's XML parser is too stupid to parse angle brackets
1811 * in strings, so we just ignore them;
1814 while (class->name [i] && j < sizeof (class_name) - 1) {
1815 if (!strchr ("<>\"", class->name [i]))
1816 class_name [j++] = class->name [i];
1819 g_assert (j < sizeof (class_name));
1822 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
1823 class->name_space, class_name,
1824 safe_object_get_size (obj));
1825 if (dump_location) {
1826 const char *location;
1827 if (ptr_in_nursery (obj))
1828 location = "nursery";
1829 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
1833 fprintf (heap_dump_file, " location=\"%s\"", location);
1835 fprintf (heap_dump_file, "/>\n");
1839 dump_heap (const char *type, int num, const char *reason)
1844 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
1846 fprintf (heap_dump_file, " reason=\"%s\"", reason);
1847 fprintf (heap_dump_file, ">\n");
1848 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
1849 sgen_dump_internal_mem_usage (heap_dump_file);
1850 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
1851 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
1852 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
1854 fprintf (heap_dump_file, "<pinned-objects>\n");
1855 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
1856 dump_object (list->obj, TRUE);
1857 fprintf (heap_dump_file, "</pinned-objects>\n");
1859 sgen_dump_section (nursery_section, "nursery");
1861 major_collector.dump_heap (heap_dump_file);
1863 fprintf (heap_dump_file, "<los>\n");
1864 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1865 dump_object ((MonoObject*)bigobj->data, FALSE);
1866 fprintf (heap_dump_file, "</los>\n");
1868 fprintf (heap_dump_file, "</collection>\n");
1872 sgen_register_moved_object (void *obj, void *destination)
1874 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1876 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1877 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1878 moved_objects_idx = 0;
1880 moved_objects [moved_objects_idx++] = obj;
1881 moved_objects [moved_objects_idx++] = destination;
1887 static gboolean inited = FALSE;
1892 mono_counters_register ("Collection max time", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME | MONO_COUNTER_MONOTONIC, &time_max);
1894 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
1895 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_pinning);
1896 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
1897 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
1898 mono_counters_register ("Minor scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_scan_roots);
1899 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
1901 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
1902 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_pinning);
1903 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
1904 mono_counters_register ("Major scan roots", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_roots);
1905 mono_counters_register ("Major scan mod union", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_scan_mod_union);
1906 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
1907 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
1908 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_los_sweep);
1909 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_sweep);
1910 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_ULONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
1912 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_pinned_objects);
1914 #ifdef HEAVY_STATISTICS
1915 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
1916 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
1917 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
1918 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
1919 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
1920 mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
1921 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
1922 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
1923 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
1925 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced_degraded);
1926 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_degraded);
1928 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_nursery);
1929 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_nursery);
1930 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_copy_object_called_major);
1931 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_copied_major);
1933 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_nursery);
1934 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_scan_object_called_major);
1936 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_slots_allocated_in_vain);
1938 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_from_space);
1939 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_forwarded);
1940 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_pinned);
1941 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_nursery_copy_object_failed_to_space);
1943 sgen_nursery_allocator_init_heavy_stats ();
1944 sgen_alloc_init_heavy_stats ();
1952 reset_pinned_from_failed_allocation (void)
1954 bytes_pinned_from_failed_allocation = 0;
1958 sgen_set_pinned_from_failed_allocation (mword objsize)
1960 bytes_pinned_from_failed_allocation += objsize;
1964 sgen_collection_is_concurrent (void)
1966 switch (current_collection_generation) {
1967 case GENERATION_NURSERY:
1969 case GENERATION_OLD:
1970 return concurrent_collection_in_progress;
1972 g_error ("Invalid current generation %d", current_collection_generation);
1977 sgen_concurrent_collection_in_progress (void)
1979 return concurrent_collection_in_progress;
1983 job_remembered_set_scan (void *worker_data_untyped, SgenThreadPoolJob *job)
1985 WorkerData *worker_data = worker_data_untyped;
1986 remset.scan_remsets (sgen_workers_get_job_gray_queue (worker_data));
1990 SgenThreadPoolJob job;
1991 CopyOrMarkObjectFunc copy_or_mark_func;
1992 ScanObjectFunc scan_func;
1996 } ScanFromRegisteredRootsJob;
1999 job_scan_from_registered_roots (void *worker_data_untyped, SgenThreadPoolJob *job)
2001 WorkerData *worker_data = worker_data_untyped;
2002 ScanFromRegisteredRootsJob *job_data = (ScanFromRegisteredRootsJob*)job;
2003 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2004 sgen_workers_get_job_gray_queue (worker_data) };
2006 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2010 SgenThreadPoolJob job;
2013 } ScanThreadDataJob;
2016 job_scan_thread_data (void *worker_data_untyped, SgenThreadPoolJob *job)
2018 WorkerData *worker_data = worker_data_untyped;
2019 ScanThreadDataJob *job_data = (ScanThreadDataJob*)job;
2021 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2022 sgen_workers_get_job_gray_queue (worker_data));
2026 SgenThreadPoolJob job;
2027 FinalizeReadyEntry *list;
2028 } ScanFinalizerEntriesJob;
2031 job_scan_finalizer_entries (void *worker_data_untyped, SgenThreadPoolJob *job)
2033 WorkerData *worker_data = worker_data_untyped;
2034 ScanFinalizerEntriesJob *job_data = (ScanFinalizerEntriesJob*)job;
2035 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2037 scan_finalizer_entries (job_data->list, ctx);
2041 job_scan_major_mod_union_cardtable (void *worker_data_untyped, SgenThreadPoolJob *job)
2043 WorkerData *worker_data = worker_data_untyped;
2044 g_assert (concurrent_collection_in_progress);
2045 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2049 job_scan_los_mod_union_cardtable (void *worker_data_untyped, SgenThreadPoolJob *job)
2051 WorkerData *worker_data = worker_data_untyped;
2052 g_assert (concurrent_collection_in_progress);
2053 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2057 verify_scan_starts (char *start, char *end)
2061 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2062 char *addr = nursery_section->scan_starts [i];
2063 if (addr > start && addr < end)
2064 SGEN_LOG (1, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
2069 verify_nursery (void)
2071 char *start, *end, *cur, *hole_start;
2073 if (!do_verify_nursery)
2076 if (nursery_canaries_enabled ())
2077 SGEN_LOG (1, "Checking nursery canaries...");
2079 /*This cleans up unused fragments */
2080 sgen_nursery_allocator_prepare_for_pinning ();
2082 hole_start = start = cur = sgen_get_nursery_start ();
2083 end = sgen_get_nursery_end ();
2088 if (!*(void**)cur) {
2089 cur += sizeof (void*);
2093 if (object_is_forwarded (cur))
2094 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2095 else if (object_is_pinned (cur))
2096 SGEN_LOG (1, "PINNED OBJ %p", cur);
2098 ss = safe_object_get_size ((MonoObject*)cur);
2099 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2100 verify_scan_starts (cur, cur + size);
2101 if (do_dump_nursery_content) {
2102 if (cur > hole_start)
2103 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2104 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2106 if (nursery_canaries_enabled () && (MonoVTable*)SGEN_LOAD_VTABLE (cur) != array_fill_vtable) {
2107 CHECK_CANARY_FOR_OBJECT (cur);
2108 CANARIFY_SIZE (size);
2116 * Checks that no objects in the nursery are fowarded or pinned. This
2117 * is a precondition to restarting the mutator while doing a
2118 * concurrent collection. Note that we don't clear fragments because
2119 * we depend on that having happened earlier.
2122 check_nursery_is_clean (void)
2126 cur = sgen_get_nursery_start ();
2127 end = sgen_get_nursery_end ();
2132 if (!*(void**)cur) {
2133 cur += sizeof (void*);
2137 g_assert (!object_is_forwarded (cur));
2138 g_assert (!object_is_pinned (cur));
2140 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2141 verify_scan_starts (cur, cur + size);
2148 init_gray_queue (void)
2150 if (sgen_collection_is_concurrent ())
2151 sgen_workers_init_distribute_gray_queue ();
2152 sgen_gray_object_queue_init (&gray_queue, NULL);
2156 enqueue_scan_from_roots_jobs (char *heap_start, char *heap_end)
2158 ScanFromRegisteredRootsJob *scrrj;
2159 ScanThreadDataJob *stdj;
2160 ScanFinalizerEntriesJob *sfej;
2162 /* registered roots, this includes static fields */
2164 scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots normal", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
2165 scrrj->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2166 scrrj->scan_func = current_object_ops.scan_object;
2167 scrrj->heap_start = heap_start;
2168 scrrj->heap_end = heap_end;
2169 scrrj->root_type = ROOT_TYPE_NORMAL;
2170 sgen_workers_enqueue_job (&scrrj->job);
2172 scrrj = (ScanFromRegisteredRootsJob*)sgen_thread_pool_job_alloc ("scan from registered roots wbarrier", job_scan_from_registered_roots, sizeof (ScanFromRegisteredRootsJob));
2173 scrrj->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2174 scrrj->scan_func = current_object_ops.scan_object;
2175 scrrj->heap_start = heap_start;
2176 scrrj->heap_end = heap_end;
2177 scrrj->root_type = ROOT_TYPE_WBARRIER;
2178 sgen_workers_enqueue_job (&scrrj->job);
2182 stdj = (ScanThreadDataJob*)sgen_thread_pool_job_alloc ("scan thread data", job_scan_thread_data, sizeof (ScanThreadDataJob));
2183 stdj->heap_start = heap_start;
2184 stdj->heap_end = heap_end;
2185 sgen_workers_enqueue_job (&stdj->job);
2187 /* Scan the list of objects ready for finalization. */
2189 sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
2190 sfej->list = fin_ready_list;
2191 sgen_workers_enqueue_job (&sfej->job);
2193 sfej = (ScanFinalizerEntriesJob*)sgen_thread_pool_job_alloc ("scan critical finalizer entries", job_scan_finalizer_entries, sizeof (ScanFinalizerEntriesJob));
2194 sfej->list = critical_fin_list;
2195 sgen_workers_enqueue_job (&sfej->job);
2199 * Perform a nursery collection.
2201 * Return whether any objects were late-pinned due to being out of memory.
2204 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2206 gboolean needs_major;
2207 size_t max_garbage_amount;
2209 mword fragment_total;
2210 ScanCopyContext ctx;
2214 if (disable_minor_collections)
2217 TV_GETTIME (last_minor_collection_start_tv);
2218 atv = last_minor_collection_start_tv;
2220 MONO_GC_BEGIN (GENERATION_NURSERY);
2221 binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
2225 #ifndef DISABLE_PERFCOUNTERS
2226 mono_perfcounters->gc_collections0++;
2229 current_collection_generation = GENERATION_NURSERY;
2230 current_object_ops = sgen_minor_collector.serial_ops;
2232 SGEN_ASSERT (0, !sgen_collection_is_concurrent (), "Why is the nursery collection concurrent?");
2234 reset_pinned_from_failed_allocation ();
2236 check_scan_starts ();
2238 sgen_nursery_alloc_prepare_for_minor ();
2242 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2243 /* FIXME: optimize later to use the higher address where an object can be present */
2244 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2246 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2247 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2248 g_assert (nursery_section->size >= max_garbage_amount);
2250 /* world must be stopped already */
2252 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2254 if (xdomain_checks) {
2255 sgen_clear_nursery_fragments ();
2256 sgen_check_for_xdomain_refs ();
2259 nursery_section->next_data = nursery_next;
2261 major_collector.start_nursery_collection ();
2263 sgen_memgov_minor_collection_start ();
2267 gc_stats.minor_gc_count ++;
2269 if (whole_heap_check_before_collection) {
2270 sgen_clear_nursery_fragments ();
2271 sgen_check_whole_heap (finish_up_concurrent_mark);
2273 if (consistency_check_at_minor_collection)
2274 sgen_check_consistency ();
2276 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2278 sgen_process_fin_stage_entries ();
2279 sgen_process_dislink_stage_entries ();
2281 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2283 /* pin from pinned handles */
2284 sgen_init_pinning ();
2285 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2286 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2287 /* pin cemented objects */
2288 sgen_pin_cemented_objects ();
2289 /* identify pinned objects */
2290 sgen_optimize_pin_queue ();
2291 sgen_pinning_setup_section (nursery_section);
2292 ctx.scan_func = NULL;
2293 ctx.copy_func = NULL;
2294 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2295 pin_objects_in_nursery (ctx);
2296 sgen_pinning_trim_queue_to_section (nursery_section);
2299 time_minor_pinning += TV_ELAPSED (btv, atv);
2300 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2301 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2303 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2306 * FIXME: When we finish a concurrent collection we do a nursery collection first,
2307 * as part of which we scan the card table. Then, later, we scan the mod union
2308 * cardtable. We should only have to do one.
2310 sgen_workers_enqueue_job (sgen_thread_pool_job_alloc ("scan remset", job_remembered_set_scan, sizeof (SgenThreadPoolJob)));
2312 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2314 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2315 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2317 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2319 /* FIXME: why is this here? */
2320 ctx.scan_func = current_object_ops.scan_object;
2321 ctx.copy_func = NULL;
2322 ctx.queue = &gray_queue;
2323 sgen_drain_gray_stack (-1, ctx);
2325 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2326 report_registered_roots ();
2327 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2328 report_finalizer_roots ();
2330 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2332 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2334 enqueue_scan_from_roots_jobs (sgen_get_nursery_start (), nursery_next);
2337 time_minor_scan_roots += TV_ELAPSED (atv, btv);
2339 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2340 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2341 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2343 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2345 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2346 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2348 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2350 if (objects_pinned) {
2351 sgen_optimize_pin_queue ();
2352 sgen_pinning_setup_section (nursery_section);
2355 /* walk the pin_queue, build up the fragment list of free memory, unmark
2356 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2359 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2360 fragment_total = sgen_build_nursery_fragments (nursery_section, unpin_queue);
2361 if (!fragment_total)
2364 /* Clear TLABs for all threads */
2365 sgen_clear_tlabs ();
2367 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2369 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2370 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2372 if (consistency_check_at_minor_collection)
2373 sgen_check_major_refs ();
2375 major_collector.finish_nursery_collection ();
2377 TV_GETTIME (last_minor_collection_end_tv);
2378 gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
2381 dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
2383 /* prepare the pin queue for the next collection */
2384 sgen_finish_pinning ();
2385 if (fin_ready_list || critical_fin_list) {
2386 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2387 mono_gc_finalize_notify ();
2389 sgen_pin_stats_reset ();
2390 /* clear cemented hash */
2391 sgen_cement_clear_below_threshold ();
2393 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2395 remset.finish_minor_collection ();
2397 check_scan_starts ();
2399 binary_protocol_flush_buffers (FALSE);
2401 sgen_memgov_minor_collection_end ();
2403 /*objects are late pinned because of lack of memory, so a major is a good call*/
2404 needs_major = objects_pinned > 0;
2405 current_collection_generation = -1;
2408 MONO_GC_END (GENERATION_NURSERY);
2409 binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY, 0, 0);
2411 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2412 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2418 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2421 * This is called on all objects in the nursery, including pinned ones, so we need
2422 * to use sgen_obj_get_descriptor_safe(), which masks out the vtable tag bits.
2424 ctx->scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx->queue);
2428 scan_nursery_objects (ScanCopyContext ctx)
2430 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2431 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2435 major_copy_or_mark_from_roots (size_t *old_next_pin_slot, gboolean start_concurrent_mark, gboolean finish_up_concurrent_mark, gboolean scan_mod_union, gboolean scan_whole_nursery)
2440 /* FIXME: only use these values for the precise scan
2441 * note that to_space pointers should be excluded anyway...
2443 char *heap_start = NULL;
2444 char *heap_end = (char*)-1;
2445 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2446 GCRootReport root_report = { 0 };
2447 ScanCopyContext ctx;
2449 if (concurrent_collection_in_progress) {
2450 /*This cleans up unused fragments */
2451 sgen_nursery_allocator_prepare_for_pinning ();
2453 if (do_concurrent_checks)
2454 check_nursery_is_clean ();
2456 /* The concurrent collector doesn't touch the nursery. */
2457 sgen_nursery_alloc_prepare_for_major ();
2464 /* Pinning depends on this */
2465 sgen_clear_nursery_fragments ();
2467 if (whole_heap_check_before_collection)
2468 sgen_check_whole_heap (finish_up_concurrent_mark);
2471 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2473 if (!sgen_collection_is_concurrent ())
2474 nursery_section->next_data = sgen_get_nursery_end ();
2475 /* we should also coalesce scanning from sections close to each other
2476 * and deal with pointers outside of the sections later.
2481 if (xdomain_checks) {
2482 sgen_clear_nursery_fragments ();
2483 sgen_check_for_xdomain_refs ();
2486 if (!concurrent_collection_in_progress) {
2487 /* Remsets are not useful for a major collection */
2488 remset.clear_cards ();
2491 sgen_process_fin_stage_entries ();
2492 sgen_process_dislink_stage_entries ();
2495 sgen_init_pinning ();
2496 SGEN_LOG (6, "Collecting pinned addresses");
2497 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2499 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2500 if (major_collector.is_concurrent) {
2502 * The concurrent major collector cannot evict
2503 * yet, so we need to pin cemented objects to
2504 * not break some asserts.
2506 * FIXME: We could evict now!
2508 sgen_pin_cemented_objects ();
2512 sgen_optimize_pin_queue ();
2515 * pin_queue now contains all candidate pointers, sorted and
2516 * uniqued. We must do two passes now to figure out which
2517 * objects are pinned.
2519 * The first is to find within the pin_queue the area for each
2520 * section. This requires that the pin_queue be sorted. We
2521 * also process the LOS objects and pinned chunks here.
2523 * The second, destructive, pass is to reduce the section
2524 * areas to pointers to the actually pinned objects.
2526 SGEN_LOG (6, "Pinning from sections");
2527 /* first pass for the sections */
2528 sgen_find_section_pin_queue_start_end (nursery_section);
2529 /* identify possible pointers to the insize of large objects */
2530 SGEN_LOG (6, "Pinning from large objects");
2531 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2533 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy, &dummy)) {
2534 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2536 #ifdef ENABLE_DTRACE
2537 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2538 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2539 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2543 if (sgen_los_object_is_pinned (bigobj->data)) {
2544 g_assert (finish_up_concurrent_mark);
2547 sgen_los_pin_object (bigobj->data);
2548 if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
2549 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data, sgen_obj_get_descriptor (bigobj->data));
2550 if (G_UNLIKELY (do_pin_stats))
2551 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2552 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2555 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2559 notify_gc_roots (&root_report);
2560 /* second pass for the sections */
2561 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2562 ctx.copy_func = NULL;
2563 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2566 * Concurrent mark never follows references into the nursery. In the start and
2567 * finish pauses we must scan live nursery objects, though.
2569 * In the finish pause we do this conservatively by scanning all nursery objects.
2570 * Previously we would only scan pinned objects here. We assumed that all objects
2571 * that were pinned during the nursery collection immediately preceding this finish
2572 * mark would be pinned again here. Due to the way we get the stack end for the GC
2573 * thread, however, that's not necessarily the case: we scan part of the stack used
2574 * by the GC itself, which changes constantly, so pinning isn't entirely
2577 * The split nursery also complicates things because non-pinned objects can survive
2578 * in the nursery. That's why we need to do a full scan of the nursery for it, too.
2580 * In the future we shouldn't do a preceding nursery collection at all and instead
2581 * do the finish pause with promotion from the nursery.
2583 * A further complication arises when we have late-pinned objects from the preceding
2584 * nursery collection. Those are the result of being out of memory when trying to
2585 * evacuate objects. They won't be found from the roots, so we just scan the whole
2588 * Non-concurrent mark evacuates from the nursery, so it's
2589 * sufficient to just scan pinned nursery objects.
2591 if (scan_whole_nursery || finish_up_concurrent_mark || (concurrent_collection_in_progress && sgen_minor_collector.is_split)) {
2592 scan_nursery_objects (ctx);
2594 pin_objects_in_nursery (ctx);
2595 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2596 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2599 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2600 if (old_next_pin_slot)
2601 *old_next_pin_slot = sgen_get_pinned_count ();
2604 time_major_pinning += TV_ELAPSED (atv, btv);
2605 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2606 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2608 major_collector.init_to_space ();
2611 * The concurrent collector doesn't move objects, neither on
2612 * the major heap nor in the nursery, so we can mark even
2613 * before pinning has finished. For the non-concurrent
2614 * collector we start the workers after pinning.
2616 if (start_concurrent_mark) {
2617 sgen_workers_start_all_workers ();
2618 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2621 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2622 main_gc_thread = mono_native_thread_self ();
2625 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2626 report_registered_roots ();
2628 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2630 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2631 report_finalizer_roots ();
2633 enqueue_scan_from_roots_jobs (heap_start, heap_end);
2636 time_major_scan_roots += TV_ELAPSED (atv, btv);
2638 if (scan_mod_union) {
2639 g_assert (finish_up_concurrent_mark);
2641 /* Mod union card table */
2642 sgen_workers_enqueue_job (sgen_thread_pool_job_alloc ("scan mod union cardtable", job_scan_major_mod_union_cardtable, sizeof (SgenThreadPoolJob)));
2643 sgen_workers_enqueue_job (sgen_thread_pool_job_alloc ("scan LOS mod union cardtable", job_scan_los_mod_union_cardtable, sizeof (SgenThreadPoolJob)));
2646 time_major_scan_mod_union += TV_ELAPSED (btv, atv);
2651 major_finish_copy_or_mark (void)
2653 if (!concurrent_collection_in_progress)
2657 * Prepare the pin queue for the next collection. Since pinning runs on the worker
2658 * threads we must wait for the jobs to finish before we can reset it.
2660 sgen_workers_wait_for_jobs_finished ();
2661 sgen_finish_pinning ();
2663 sgen_pin_stats_reset ();
2665 if (do_concurrent_checks)
2666 check_nursery_is_clean ();
2670 major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
2672 MONO_GC_BEGIN (GENERATION_OLD);
2673 binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
2675 current_collection_generation = GENERATION_OLD;
2676 #ifndef DISABLE_PERFCOUNTERS
2677 mono_perfcounters->gc_collections1++;
2680 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2682 sgen_cement_reset ();
2685 g_assert (major_collector.is_concurrent);
2686 concurrent_collection_in_progress = TRUE;
2688 current_object_ops = major_collector.major_concurrent_ops;
2690 current_object_ops = major_collector.major_ops;
2693 reset_pinned_from_failed_allocation ();
2695 sgen_memgov_major_collection_start ();
2697 //count_ref_nonref_objs ();
2698 //consistency_check ();
2700 check_scan_starts ();
2703 SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
2704 gc_stats.major_gc_count ++;
2706 if (major_collector.start_major_collection)
2707 major_collector.start_major_collection ();
2709 major_copy_or_mark_from_roots (old_next_pin_slot, concurrent, FALSE, FALSE, FALSE);
2710 major_finish_copy_or_mark ();
2714 major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean forced, gboolean scan_whole_nursery)
2716 ScannedObjectCounts counts;
2717 LOSObject *bigobj, *prevbo;
2723 if (concurrent_collection_in_progress) {
2724 sgen_workers_signal_start_nursery_collection_and_wait ();
2726 current_object_ops = major_collector.major_concurrent_ops;
2728 major_copy_or_mark_from_roots (NULL, FALSE, TRUE, TRUE, scan_whole_nursery);
2730 sgen_workers_signal_finish_nursery_collection ();
2732 major_finish_copy_or_mark ();
2733 gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2735 sgen_workers_join ();
2737 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty after workers have finished working?");
2739 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2740 main_gc_thread = NULL;
2743 if (do_concurrent_checks)
2744 check_nursery_is_clean ();
2746 SGEN_ASSERT (0, !scan_whole_nursery, "scan_whole_nursery only applies to concurrent collections");
2747 current_object_ops = major_collector.major_ops;
2751 * The workers have stopped so we need to finish gray queue
2752 * work that might result from finalization in the main GC
2753 * thread. Redirection must therefore be turned off.
2755 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
2756 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2758 /* all the objects in the heap */
2759 finish_gray_stack (GENERATION_OLD, &gray_queue);
2761 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2763 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after joining");
2765 if (objects_pinned) {
2766 g_assert (!concurrent_collection_in_progress);
2769 * This is slow, but we just OOM'd.
2771 * See comment at `sgen_pin_queue_clear_discarded_entries` for how the pin
2772 * queue is laid out at this point.
2774 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2776 * We need to reestablish all pinned nursery objects in the pin queue
2777 * because they're needed for fragment creation. Unpinning happens by
2778 * walking the whole queue, so it's not necessary to reestablish where major
2779 * heap block pins are - all we care is that they're still in there
2782 sgen_optimize_pin_queue ();
2783 sgen_find_section_pin_queue_start_end (nursery_section);
2787 reset_heap_boundaries ();
2788 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2790 if (!concurrent_collection_in_progress) {
2791 /* walk the pin_queue, build up the fragment list of free memory, unmark
2792 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2795 if (!sgen_build_nursery_fragments (nursery_section, NULL))
2798 /* prepare the pin queue for the next collection */
2799 sgen_finish_pinning ();
2801 /* Clear TLABs for all threads */
2802 sgen_clear_tlabs ();
2804 sgen_pin_stats_reset ();
2807 sgen_cement_clear_below_threshold ();
2809 if (check_mark_bits_after_major_collection)
2810 sgen_check_heap_marked (concurrent_collection_in_progress);
2813 time_major_fragment_creation += TV_ELAPSED (atv, btv);
2816 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
2818 /* sweep the big objects list */
2820 for (bigobj = los_object_list; bigobj;) {
2821 g_assert (!object_is_pinned (bigobj->data));
2822 if (sgen_los_object_is_pinned (bigobj->data)) {
2823 sgen_los_unpin_object (bigobj->data);
2824 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2827 /* not referenced anywhere, so we can free it */
2829 prevbo->next = bigobj->next;
2831 los_object_list = bigobj->next;
2833 bigobj = bigobj->next;
2834 sgen_los_free_object (to_free);
2838 bigobj = bigobj->next;
2842 time_major_free_bigobjs += TV_ELAPSED (btv, atv);
2847 time_major_los_sweep += TV_ELAPSED (atv, btv);
2849 major_collector.sweep ();
2851 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
2854 time_major_sweep += TV_ELAPSED (btv, atv);
2857 dump_heap ("major", gc_stats.major_gc_count - 1, reason);
2859 if (fin_ready_list || critical_fin_list) {
2860 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2861 mono_gc_finalize_notify ();
2864 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2866 sgen_memgov_major_collection_end (forced);
2867 current_collection_generation = -1;
2869 memset (&counts, 0, sizeof (ScannedObjectCounts));
2870 major_collector.finish_major_collection (&counts);
2872 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2874 SGEN_ASSERT (0, sgen_workers_all_done (), "Can't have workers working after major collection has finished");
2875 if (concurrent_collection_in_progress)
2876 concurrent_collection_in_progress = FALSE;
2878 check_scan_starts ();
2880 binary_protocol_flush_buffers (FALSE);
2882 //consistency_check ();
2884 MONO_GC_END (GENERATION_OLD);
2885 binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD, counts.num_scanned_objects, counts.num_unique_scanned_objects);
2889 major_do_collection (const char *reason, gboolean forced)
2891 TV_DECLARE (time_start);
2892 TV_DECLARE (time_end);
2893 size_t old_next_pin_slot;
2895 if (disable_major_collections)
2898 if (major_collector.get_and_reset_num_major_objects_marked) {
2899 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
2900 g_assert (!num_marked);
2903 /* world must be stopped already */
2904 TV_GETTIME (time_start);
2906 major_start_collection (FALSE, &old_next_pin_slot);
2907 major_finish_collection (reason, old_next_pin_slot, forced, FALSE);
2909 TV_GETTIME (time_end);
2910 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
2912 /* FIXME: also report this to the user, preferably in gc-end. */
2913 if (major_collector.get_and_reset_num_major_objects_marked)
2914 major_collector.get_and_reset_num_major_objects_marked ();
2916 return bytes_pinned_from_failed_allocation > 0;
2920 major_start_concurrent_collection (const char *reason)
2922 TV_DECLARE (time_start);
2923 TV_DECLARE (time_end);
2924 long long num_objects_marked;
2926 if (disable_major_collections)
2929 TV_GETTIME (time_start);
2930 SGEN_TV_GETTIME (time_major_conc_collection_start);
2932 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
2933 g_assert (num_objects_marked == 0);
2935 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
2936 binary_protocol_concurrent_start ();
2938 // FIXME: store reason and pass it when finishing
2939 major_start_collection (TRUE, NULL);
2941 gray_queue_redirect (&gray_queue);
2943 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
2944 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
2946 TV_GETTIME (time_end);
2947 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
2949 current_collection_generation = -1;
2953 * Returns whether the major collection has finished.
2956 major_should_finish_concurrent_collection (void)
2958 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (&gray_queue), "Why is the gray queue not empty before we have started doing anything?");
2959 return sgen_workers_all_done ();
2963 major_update_concurrent_collection (void)
2965 TV_DECLARE (total_start);
2966 TV_DECLARE (total_end);
2968 TV_GETTIME (total_start);
2970 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
2971 binary_protocol_concurrent_update ();
2973 major_collector.update_cardtable_mod_union ();
2974 sgen_los_update_cardtable_mod_union ();
2976 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
2978 TV_GETTIME (total_end);
2979 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
2983 major_finish_concurrent_collection (gboolean forced)
2985 TV_DECLARE (total_start);
2986 TV_DECLARE (total_end);
2987 gboolean late_pinned;
2988 SgenGrayQueue unpin_queue;
2989 memset (&unpin_queue, 0, sizeof (unpin_queue));
2991 TV_GETTIME (total_start);
2993 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
2994 binary_protocol_concurrent_finish ();
2997 * The major collector can add global remsets which are processed in the finishing
2998 * nursery collection, below. That implies that the workers must have finished
2999 * marking before the nursery collection is allowed to run, otherwise we might miss
3002 sgen_workers_wait ();
3004 SGEN_TV_GETTIME (time_major_conc_collection_end);
3005 gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
3007 major_collector.update_cardtable_mod_union ();
3008 sgen_los_update_cardtable_mod_union ();
3010 late_pinned = collect_nursery (&unpin_queue, TRUE);
3012 if (mod_union_consistency_check)
3013 sgen_check_mod_union_consistency ();
3015 current_collection_generation = GENERATION_OLD;
3016 major_finish_collection ("finishing", -1, forced, late_pinned);
3018 if (whole_heap_check_before_collection)
3019 sgen_check_whole_heap (FALSE);
3021 unpin_objects_from_queue (&unpin_queue);
3022 sgen_gray_object_queue_deinit (&unpin_queue);
3024 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3026 TV_GETTIME (total_end);
3027 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
3029 current_collection_generation = -1;
3033 * Ensure an allocation request for @size will succeed by freeing enough memory.
3035 * LOCKING: The GC lock MUST be held.
3038 sgen_ensure_free_space (size_t size)
3040 int generation_to_collect = -1;
3041 const char *reason = NULL;
3043 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3044 if (sgen_need_major_collection (size)) {
3045 reason = "LOS overflow";
3046 generation_to_collect = GENERATION_OLD;
3049 if (degraded_mode) {
3050 if (sgen_need_major_collection (size)) {
3051 reason = "Degraded mode overflow";
3052 generation_to_collect = GENERATION_OLD;
3054 } else if (sgen_need_major_collection (size)) {
3055 reason = "Minor allowance";
3056 generation_to_collect = GENERATION_OLD;
3058 generation_to_collect = GENERATION_NURSERY;
3059 reason = "Nursery full";
3063 if (generation_to_collect == -1) {
3064 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3065 generation_to_collect = GENERATION_OLD;
3066 reason = "Finish concurrent collection";
3070 if (generation_to_collect == -1)
3072 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3076 * LOCKING: Assumes the GC lock is held.
3079 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3081 TV_DECLARE (gc_start);
3082 TV_DECLARE (gc_end);
3083 TV_DECLARE (gc_total_start);
3084 TV_DECLARE (gc_total_end);
3085 GGTimingInfo infos [2];
3086 int overflow_generation_to_collect = -1;
3087 int oldest_generation_collected = generation_to_collect;
3088 const char *overflow_reason = NULL;
3090 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3092 binary_protocol_collection_force (generation_to_collect);
3094 SGEN_ASSERT (0, generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD, "What generation is this?");
3096 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3098 TV_GETTIME (gc_start);
3100 sgen_stop_world (generation_to_collect);
3102 TV_GETTIME (gc_total_start);
3104 if (concurrent_collection_in_progress) {
3106 * We update the concurrent collection. If it finished, we're done. If
3107 * not, and we've been asked to do a nursery collection, we do that.
3109 gboolean finish = major_should_finish_concurrent_collection () || (wait_to_finish && generation_to_collect == GENERATION_OLD);
3112 major_finish_concurrent_collection (wait_to_finish);
3113 oldest_generation_collected = GENERATION_OLD;
3115 sgen_workers_signal_start_nursery_collection_and_wait ();
3117 major_update_concurrent_collection ();
3118 if (generation_to_collect == GENERATION_NURSERY)
3119 collect_nursery (NULL, FALSE);
3121 sgen_workers_signal_finish_nursery_collection ();
3128 * If we've been asked to do a major collection, and the major collector wants to
3129 * run synchronously (to evacuate), we set the flag to do that.
3131 if (generation_to_collect == GENERATION_OLD &&
3132 allow_synchronous_major &&
3133 major_collector.want_synchronous_collection &&
3134 *major_collector.want_synchronous_collection) {
3135 wait_to_finish = TRUE;
3138 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3141 * There's no concurrent collection in progress. Collect the generation we're asked
3142 * to collect. If the major collector is concurrent and we're not forced to wait,
3143 * start a concurrent collection.
3145 // FIXME: extract overflow reason
3146 if (generation_to_collect == GENERATION_NURSERY) {
3147 if (collect_nursery (NULL, FALSE)) {
3148 overflow_generation_to_collect = GENERATION_OLD;
3149 overflow_reason = "Minor overflow";
3152 if (major_collector.is_concurrent && !wait_to_finish) {
3153 collect_nursery (NULL, FALSE);
3154 major_start_concurrent_collection (reason);
3155 // FIXME: set infos[0] properly
3159 if (major_do_collection (reason, wait_to_finish)) {
3160 overflow_generation_to_collect = GENERATION_NURSERY;
3161 overflow_reason = "Excessive pinning";
3165 TV_GETTIME (gc_end);
3167 memset (infos, 0, sizeof (infos));
3168 infos [0].generation = generation_to_collect;
3169 infos [0].reason = reason;
3170 infos [0].is_overflow = FALSE;
3171 infos [1].generation = -1;
3172 infos [0].total_time = SGEN_TV_ELAPSED (gc_start, gc_end);
3174 SGEN_ASSERT (0, !concurrent_collection_in_progress, "Why did this not get handled above?");
3176 if (overflow_generation_to_collect != -1) {
3178 * We need to do an overflow collection, either because we ran out of memory
3179 * or the nursery is fully pinned.
3182 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3183 infos [1].generation = overflow_generation_to_collect;
3184 infos [1].reason = overflow_reason;
3185 infos [1].is_overflow = TRUE;
3186 infos [1].total_time = gc_end;
3188 if (overflow_generation_to_collect == GENERATION_NURSERY)
3189 collect_nursery (NULL, FALSE);
3191 major_do_collection (overflow_reason, wait_to_finish);
3193 TV_GETTIME (gc_end);
3194 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3196 /* keep events symmetric */
3197 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3199 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3202 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3204 /* this also sets the proper pointers for the next allocation */
3205 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3206 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3207 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
3208 sgen_dump_pin_queue ();
3213 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3215 TV_GETTIME (gc_total_end);
3216 time_max = MAX (time_max, TV_ELAPSED (gc_total_start, gc_total_end));
3218 sgen_restart_world (oldest_generation_collected, infos);
3220 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3224 * ######################################################################
3225 * ######## Memory allocation from the OS
3226 * ######################################################################
3227 * This section of code deals with getting memory from the OS and
3228 * allocating memory for GC-internal data structures.
3229 * Internal memory can be handled with a freelist for small objects.
3235 G_GNUC_UNUSED static void
3236 report_internal_mem_usage (void)
3238 printf ("Internal memory usage:\n");
3239 sgen_report_internal_mem_usage ();
3240 printf ("Pinned memory usage:\n");
3241 major_collector.report_pinned_memory_usage ();
3245 * ######################################################################
3246 * ######## Finalization support
3247 * ######################################################################
3250 static inline gboolean
3251 sgen_major_is_object_alive (void *object)
3255 /* Oldgen objects can be pinned and forwarded too */
3256 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3260 * FIXME: major_collector.is_object_live() also calculates the
3261 * size. Avoid the double calculation.
3263 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3264 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3265 return sgen_los_object_is_pinned (object);
3267 return major_collector.is_object_live (object);
3271 * If the object has been forwarded it means it's still referenced from a root.
3272 * If it is pinned it's still alive as well.
3273 * A LOS object is only alive if we have pinned it.
3274 * Return TRUE if @obj is ready to be finalized.
3276 static inline gboolean
3277 sgen_is_object_alive (void *object)
3279 if (ptr_in_nursery (object))
3280 return sgen_nursery_is_object_alive (object);
3282 return sgen_major_is_object_alive (object);
3286 * This function returns true if @object is either alive or it belongs to the old gen
3287 * and we're currently doing a minor collection.
3290 sgen_is_object_alive_for_current_gen (char *object)
3292 if (ptr_in_nursery (object))
3293 return sgen_nursery_is_object_alive (object);
3295 if (current_collection_generation == GENERATION_NURSERY)
3298 return sgen_major_is_object_alive (object);
3302 * This function returns true if @object is either alive and belongs to the
3303 * current collection - major collections are full heap, so old gen objects
3304 * are never alive during a minor collection.
3307 sgen_is_object_alive_and_on_current_collection (char *object)
3309 if (ptr_in_nursery (object))
3310 return sgen_nursery_is_object_alive (object);
3312 if (current_collection_generation == GENERATION_NURSERY)
3315 return sgen_major_is_object_alive (object);
3320 sgen_gc_is_object_ready_for_finalization (void *object)
3322 return !sgen_is_object_alive (object);
3326 has_critical_finalizer (MonoObject *obj)
3330 if (!mono_defaults.critical_finalizer_object)
3333 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3335 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3339 is_finalization_aware (MonoObject *obj)
3341 MonoVTable *vt = ((MonoVTable*)LOAD_VTABLE (obj));
3342 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
3346 sgen_queue_finalization_entry (MonoObject *obj)
3348 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3349 gboolean critical = has_critical_finalizer (obj);
3350 entry->object = obj;
3352 entry->next = critical_fin_list;
3353 critical_fin_list = entry;
3355 entry->next = fin_ready_list;
3356 fin_ready_list = entry;
3359 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
3360 fin_callbacks.object_queued_for_finalization (obj);
3362 #ifdef ENABLE_DTRACE
3363 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3364 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3365 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3366 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3367 vt->klass->name_space, vt->klass->name, gen, critical);
3373 sgen_object_is_live (void *obj)
3375 return sgen_is_object_alive_and_on_current_collection (obj);
3378 /* LOCKING: requires that the GC lock is held */
3380 null_ephemerons_for_domain (MonoDomain *domain)
3382 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3385 MonoObject *object = (MonoObject*)current->array;
3388 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
3390 if (object && object->vtable->domain == domain) {
3391 EphemeronLinkNode *tmp = current;
3394 prev->next = current->next;
3396 ephemeron_list = current->next;
3398 current = current->next;
3399 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3402 current = current->next;
3407 /* LOCKING: requires that the GC lock is held */
3409 clear_unreachable_ephemerons (ScanCopyContext ctx)
3411 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3412 GrayQueue *queue = ctx.queue;
3413 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3415 Ephemeron *cur, *array_end;
3419 char *object = current->array;
3421 if (!sgen_is_object_alive_for_current_gen (object)) {
3422 EphemeronLinkNode *tmp = current;
3424 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3427 prev->next = current->next;
3429 ephemeron_list = current->next;
3431 current = current->next;
3432 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3437 copy_func ((void**)&object, queue);
3438 current->array = object;
3440 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3442 array = (MonoArray*)object;
3443 cur = mono_array_addr (array, Ephemeron, 0);
3444 array_end = cur + mono_array_length_fast (array);
3445 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3447 for (; cur < array_end; ++cur) {
3448 char *key = (char*)cur->key;
3450 if (!key || key == tombstone)
3453 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3454 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3455 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3457 if (!sgen_is_object_alive_for_current_gen (key)) {
3458 cur->key = tombstone;
3464 current = current->next;
3469 LOCKING: requires that the GC lock is held
3471 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3474 mark_ephemerons_in_range (ScanCopyContext ctx)
3476 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3477 GrayQueue *queue = ctx.queue;
3478 int nothing_marked = 1;
3479 EphemeronLinkNode *current = ephemeron_list;
3481 Ephemeron *cur, *array_end;
3484 for (current = ephemeron_list; current; current = current->next) {
3485 char *object = current->array;
3486 SGEN_LOG (5, "Ephemeron array at %p", object);
3488 /*It has to be alive*/
3489 if (!sgen_is_object_alive_for_current_gen (object)) {
3490 SGEN_LOG (5, "\tnot reachable");
3494 copy_func ((void**)&object, queue);
3496 array = (MonoArray*)object;
3497 cur = mono_array_addr (array, Ephemeron, 0);
3498 array_end = cur + mono_array_length_fast (array);
3499 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3501 for (; cur < array_end; ++cur) {
3502 char *key = cur->key;
3504 if (!key || key == tombstone)
3507 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3508 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3509 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3511 if (sgen_is_object_alive_for_current_gen (key)) {
3512 char *value = cur->value;
3514 copy_func ((void**)&cur->key, queue);
3516 if (!sgen_is_object_alive_for_current_gen (value))
3518 copy_func ((void**)&cur->value, queue);
3524 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3525 return nothing_marked;
3529 mono_gc_invoke_finalizers (void)
3531 FinalizeReadyEntry *entry = NULL;
3532 gboolean entry_is_critical = FALSE;
3535 /* FIXME: batch to reduce lock contention */
3536 while (fin_ready_list || critical_fin_list) {
3540 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3542 /* We have finalized entry in the last
3543 interation, now we need to remove it from
3546 *list = entry->next;
3548 FinalizeReadyEntry *e = *list;
3549 while (e->next != entry)
3551 e->next = entry->next;
3553 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3557 /* Now look for the first non-null entry. */
3558 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3561 entry_is_critical = FALSE;
3563 entry_is_critical = TRUE;
3564 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3569 g_assert (entry->object);
3570 num_ready_finalizers--;
3571 obj = entry->object;
3572 entry->object = NULL;
3573 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3581 g_assert (entry->object == NULL);
3583 /* the object is on the stack so it is pinned */
3584 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3585 mono_gc_run_finalize (obj, NULL);
3592 mono_gc_pending_finalizers (void)
3594 return fin_ready_list || critical_fin_list;
3598 * ######################################################################
3599 * ######## registered roots support
3600 * ######################################################################
3604 * We do not coalesce roots.
3607 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3609 RootRecord new_root;
3612 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3613 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3614 /* we allow changing the size and the descriptor (for thread statics etc) */
3616 size_t old_size = root->end_root - start;
3617 root->end_root = start + size;
3618 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3619 ((root->root_desc == 0) && (descr == NULL)));
3620 root->root_desc = (mword)descr;
3622 roots_size -= old_size;
3628 new_root.end_root = start + size;
3629 new_root.root_desc = (mword)descr;
3631 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3634 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3641 mono_gc_register_root (char *start, size_t size, void *descr)
3643 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3647 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3649 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3653 mono_gc_deregister_root (char* addr)
3659 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3660 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3661 roots_size -= (root.end_root - addr);
3667 * ######################################################################
3668 * ######## Thread handling (stop/start code)
3669 * ######################################################################
3672 unsigned int sgen_global_stop_count = 0;
3675 sgen_get_current_collection_generation (void)
3677 return current_collection_generation;
3681 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3683 gc_callbacks = *callbacks;
3687 mono_gc_get_gc_callbacks ()
3689 return &gc_callbacks;
3692 /* Variables holding start/end nursery so it won't have to be passed at every call */
3693 static void *scan_area_arg_start, *scan_area_arg_end;
3696 mono_gc_conservatively_scan_area (void *start, void *end)
3698 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3702 mono_gc_scan_object (void *obj, void *gc_data)
3704 UserCopyOrMarkData *data = gc_data;
3705 current_object_ops.copy_or_mark_object (&obj, data->queue);
3710 * Mark from thread stacks and registers.
3713 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3715 SgenThreadInfo *info;
3717 scan_area_arg_start = start_nursery;
3718 scan_area_arg_end = end_nursery;
3720 FOREACH_THREAD (info) {
3722 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3725 if (info->gc_disabled) {
3726 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3729 if (!mono_thread_info_is_live (info)) {
3730 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %x)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, info->info.thread_state);
3733 g_assert (info->suspend_done);
3734 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3735 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3736 UserCopyOrMarkData data = { NULL, queue };
3737 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise, &data);
3738 } else if (!precise) {
3739 if (!conservative_stack_mark) {
3740 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
3741 conservative_stack_mark = TRUE;
3743 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3748 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3749 start_nursery, end_nursery, PIN_TYPE_STACK);
3751 conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
3752 start_nursery, end_nursery, PIN_TYPE_STACK);
3755 } END_FOREACH_THREAD
3759 ptr_on_stack (void *ptr)
3761 gpointer stack_start = &stack_start;
3762 SgenThreadInfo *info = mono_thread_info_current ();
3764 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3770 sgen_thread_register (SgenThreadInfo* info, void *addr)
3773 guint8 *staddr = NULL;
3775 #ifndef HAVE_KW_THREAD
3776 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3778 g_assert (!mono_native_tls_get_value (thread_info_key));
3779 mono_native_tls_set_value (thread_info_key, info);
3781 sgen_thread_info = info;
3784 #ifdef SGEN_POSIX_STW
3785 info->stop_count = -1;
3789 info->stack_start = NULL;
3790 info->stopped_ip = NULL;
3791 info->stopped_domain = NULL;
3793 memset (&info->ctx, 0, sizeof (MonoContext));
3795 memset (&info->regs, 0, sizeof (info->regs));
3798 sgen_init_tlab_info (info);
3800 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3802 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
3803 mono_thread_info_get_stack_bounds (&staddr, &stsize);
3806 info->stack_start_limit = staddr;
3808 info->stack_end = staddr + stsize;
3810 gsize stack_bottom = (gsize)addr;
3811 stack_bottom += 4095;
3812 stack_bottom &= ~4095;
3813 info->stack_end = (char*)stack_bottom;
3816 #ifdef HAVE_KW_THREAD
3817 stack_end = info->stack_end;
3820 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3822 if (gc_callbacks.thread_attach_func)
3823 info->runtime_data = gc_callbacks.thread_attach_func ();
3828 sgen_thread_detach (SgenThreadInfo *p)
3830 /* If a delegate is passed to native code and invoked on a thread we dont
3831 * know about, the jit will register it with mono_jit_thread_attach, but
3832 * we have no way of knowing when that thread goes away. SGen has a TSD
3833 * so we assume that if the domain is still registered, we can detach
3836 if (mono_domain_get ())
3837 mono_thread_detach_internal (mono_thread_internal_current ());
3841 sgen_thread_unregister (SgenThreadInfo *p)
3843 MonoNativeThreadId tid;
3845 tid = mono_thread_info_get_tid (p);
3846 binary_protocol_thread_unregister ((gpointer)tid);
3847 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
3849 #ifndef HAVE_KW_THREAD
3850 mono_native_tls_set_value (thread_info_key, NULL);
3852 sgen_thread_info = NULL;
3855 if (p->info.runtime_thread)
3856 mono_threads_add_joinable_thread ((gpointer)tid);
3858 if (gc_callbacks.thread_detach_func) {
3859 gc_callbacks.thread_detach_func (p->runtime_data);
3860 p->runtime_data = NULL;
3866 sgen_thread_attach (SgenThreadInfo *info)
3869 /*this is odd, can we get attached before the gc is inited?*/
3873 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3874 info->runtime_data = gc_callbacks.thread_attach_func ();
3877 mono_gc_register_thread (void *baseptr)
3879 return mono_thread_info_attach (baseptr) != NULL;
3883 * mono_gc_set_stack_end:
3885 * Set the end of the current threads stack to STACK_END. The stack space between
3886 * STACK_END and the real end of the threads stack will not be scanned during collections.
3889 mono_gc_set_stack_end (void *stack_end)
3891 SgenThreadInfo *info;
3894 info = mono_thread_info_current ();
3896 g_assert (stack_end < info->stack_end);
3897 info->stack_end = stack_end;
3902 #if USE_PTHREAD_INTERCEPT
3906 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
3908 return pthread_create (new_thread, attr, start_routine, arg);
3912 mono_gc_pthread_join (pthread_t thread, void **retval)
3914 return pthread_join (thread, retval);
3918 mono_gc_pthread_detach (pthread_t thread)
3920 return pthread_detach (thread);
3924 mono_gc_pthread_exit (void *retval)
3926 mono_thread_info_detach ();
3927 pthread_exit (retval);
3928 g_assert_not_reached ();
3931 #endif /* USE_PTHREAD_INTERCEPT */
3934 * ######################################################################
3935 * ######## Write barriers
3936 * ######################################################################
3940 * Note: the write barriers first do the needed GC work and then do the actual store:
3941 * this way the value is visible to the conservative GC scan after the write barrier
3942 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
3943 * the conservative scan, otherwise by the remembered set scan.
3946 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
3948 HEAVY_STAT (++stat_wbarrier_set_field);
3949 if (ptr_in_nursery (field_ptr)) {
3950 *(void**)field_ptr = value;
3953 SGEN_LOG (8, "Adding remset at %p", field_ptr);
3955 binary_protocol_wbarrier (field_ptr, value, value->vtable);
3957 remset.wbarrier_set_field (obj, field_ptr, value);
3961 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
3963 HEAVY_STAT (++stat_wbarrier_set_arrayref);
3964 if (ptr_in_nursery (slot_ptr)) {
3965 *(void**)slot_ptr = value;
3968 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
3970 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
3972 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
3976 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
3978 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
3979 /*This check can be done without taking a lock since dest_ptr array is pinned*/
3980 if (ptr_in_nursery (dest_ptr) || count <= 0) {
3981 mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
3985 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
3986 if (binary_protocol_is_heavy_enabled ()) {
3988 for (i = 0; i < count; ++i) {
3989 gpointer dest = (gpointer*)dest_ptr + i;
3990 gpointer obj = *((gpointer*)src_ptr + i);
3992 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
3997 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4001 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4005 HEAVY_STAT (++stat_wbarrier_generic_store);
4007 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4008 /* FIXME: ptr_in_heap must be called with the GC lock held */
4009 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4010 char *start = sgen_find_object_for_ptr (ptr);
4011 MonoObject *value = *(MonoObject**)ptr;
4015 MonoObject *obj = (MonoObject*)start;
4016 if (obj->vtable->domain != value->vtable->domain)
4017 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4023 obj = *(gpointer*)ptr;
4025 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4027 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4028 SGEN_LOG (8, "Skipping remset at %p", ptr);
4033 * We need to record old->old pointer locations for the
4034 * concurrent collector.
4036 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4037 SGEN_LOG (8, "Skipping remset at %p", ptr);
4041 SGEN_LOG (8, "Adding remset at %p", ptr);
4043 remset.wbarrier_generic_nostore (ptr);
4047 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4049 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4050 SGEN_UPDATE_REFERENCE_ALLOW_NULL (ptr, value);
4051 if (ptr_in_nursery (value))
4052 mono_gc_wbarrier_generic_nostore (ptr);
4053 sgen_dummy_use (value);
4056 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4057 * as an atomic operation with release semantics.
4060 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4062 HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4064 SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4066 InterlockedWritePointer (ptr, value);
4068 if (ptr_in_nursery (value))
4069 mono_gc_wbarrier_generic_nostore (ptr);
4071 sgen_dummy_use (value);
4074 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4076 mword *dest = _dest;
4081 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4083 SGEN_UPDATE_REFERENCE_ALLOW_NULL (dest, *src);
4086 size -= SIZEOF_VOID_P;
4091 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4093 #define HANDLE_PTR(ptr,obj) do { \
4094 gpointer o = *(gpointer*)(ptr); \
4096 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4097 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4102 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4104 #define SCAN_OBJECT_NOVTABLE
4105 #include "sgen-scan-object.h"
4110 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4112 HEAVY_STAT (++stat_wbarrier_value_copy);
4113 g_assert (klass->valuetype);
4115 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4117 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4118 size_t element_size = mono_class_value_size (klass, NULL);
4119 size_t size = count * element_size;
4120 mono_gc_memmove_atomic (dest, src, size);
4124 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4125 if (binary_protocol_is_heavy_enabled ()) {
4126 size_t element_size = mono_class_value_size (klass, NULL);
4128 for (i = 0; i < count; ++i) {
4129 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4130 (char*)src + i * element_size - sizeof (MonoObject),
4131 (mword) klass->gc_descr);
4136 remset.wbarrier_value_copy (dest, src, count, klass);
4140 * mono_gc_wbarrier_object_copy:
4142 * Write barrier to call when obj is the result of a clone or copy of an object.
4145 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4149 HEAVY_STAT (++stat_wbarrier_object_copy);
4151 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4152 size = mono_object_class (obj)->instance_size;
4153 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4154 size - sizeof (MonoObject));
4158 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4159 if (binary_protocol_is_heavy_enabled ())
4160 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4163 remset.wbarrier_object_copy (obj, src);
4168 * ######################################################################
4169 * ######## Other mono public interface functions.
4170 * ######################################################################
4173 #define REFS_SIZE 128
4176 MonoGCReferences callback;
4180 MonoObject *refs [REFS_SIZE];
4181 uintptr_t offsets [REFS_SIZE];
4185 #define HANDLE_PTR(ptr,obj) do { \
4187 if (hwi->count == REFS_SIZE) { \
4188 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4192 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4193 hwi->refs [hwi->count++] = *(ptr); \
4198 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4200 mword desc = sgen_obj_get_descriptor (start);
4202 #include "sgen-scan-object.h"
4206 walk_references (char *start, size_t size, void *data)
4208 HeapWalkInfo *hwi = data;
4211 collect_references (hwi, start, size);
4212 if (hwi->count || !hwi->called)
4213 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4217 * mono_gc_walk_heap:
4218 * @flags: flags for future use
4219 * @callback: a function pointer called for each object in the heap
4220 * @data: a user data pointer that is passed to callback
4222 * This function can be used to iterate over all the live objects in the heap:
4223 * for each object, @callback is invoked, providing info about the object's
4224 * location in memory, its class, its size and the objects it references.
4225 * For each referenced object it's offset from the object address is
4226 * reported in the offsets array.
4227 * The object references may be buffered, so the callback may be invoked
4228 * multiple times for the same object: in all but the first call, the size
4229 * argument will be zero.
4230 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4231 * profiler event handler.
4233 * Returns: a non-zero value if the GC doesn't support heap walking
4236 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4241 hwi.callback = callback;
4244 sgen_clear_nursery_fragments ();
4245 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4247 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
4248 sgen_los_iterate_objects (walk_references, &hwi);
4254 mono_gc_collect (int generation)
4259 sgen_perform_collection (0, generation, "user request", TRUE);
4264 mono_gc_max_generation (void)
4270 mono_gc_collection_count (int generation)
4272 if (generation == 0)
4273 return gc_stats.minor_gc_count;
4274 return gc_stats.major_gc_count;
4278 mono_gc_get_used_size (void)
4282 tot = los_memory_usage;
4283 tot += nursery_section->next_data - nursery_section->data;
4284 tot += major_collector.get_used_size ();
4285 /* FIXME: account for pinned objects */
4291 mono_gc_get_los_limit (void)
4293 return MAX_SMALL_OBJ_SIZE;
4297 mono_gc_set_string_length (MonoString *str, gint32 new_length)
4299 mono_unichar2 *new_end = str->chars + new_length;
4301 /* zero the discarded string. This null-delimits the string and allows
4302 * the space to be reclaimed by SGen. */
4304 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
4305 CHECK_CANARY_FOR_OBJECT (str);
4306 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
4307 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
4309 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
4312 str->length = new_length;
4316 mono_gc_user_markers_supported (void)
4322 mono_object_is_alive (MonoObject* o)
4328 mono_gc_get_generation (MonoObject *obj)
4330 if (ptr_in_nursery (obj))
4336 mono_gc_enable_events (void)
4341 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4343 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4347 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4349 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4353 mono_gc_weak_link_get (void **link_addr)
4355 void * volatile *link_addr_volatile;
4359 link_addr_volatile = link_addr;
4360 ptr = (void*)*link_addr_volatile;
4362 * At this point we have a hidden pointer. If the GC runs
4363 * here, it will not recognize the hidden pointer as a
4364 * reference, and if the object behind it is not referenced
4365 * elsewhere, it will be freed. Once the world is restarted
4366 * we reveal the pointer, giving us a pointer to a freed
4367 * object. To make sure we don't return it, we load the
4368 * hidden pointer again. If it's still the same, we can be
4369 * sure the object reference is valid.
4372 obj = (MonoObject*) REVEAL_POINTER (ptr);
4376 mono_memory_barrier ();
4379 * During the second bridge processing step the world is
4380 * running again. That step processes all weak links once
4381 * more to null those that refer to dead objects. Before that
4382 * is completed, those links must not be followed, so we
4383 * conservatively wait for bridge processing when any weak
4384 * link is dereferenced.
4386 if (G_UNLIKELY (bridge_processing_in_progress))
4387 mono_gc_wait_for_bridge_processing ();
4389 if ((void*)*link_addr_volatile != ptr)
4396 mono_gc_ephemeron_array_add (MonoObject *obj)
4398 EphemeronLinkNode *node;
4402 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4407 node->array = (char*)obj;
4408 node->next = ephemeron_list;
4409 ephemeron_list = node;
4411 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4418 mono_gc_set_allow_synchronous_major (gboolean flag)
4420 if (!major_collector.is_concurrent)
4423 allow_synchronous_major = flag;
4428 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4432 result = func (data);
4433 UNLOCK_INTERRUPTION;
4438 mono_gc_is_gc_thread (void)
4442 result = mono_thread_info_current () != NULL;
4448 is_critical_method (MonoMethod *method)
4450 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4454 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4458 va_start (ap, description_format);
4460 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4461 vfprintf (stderr, description_format, ap);
4463 fprintf (stderr, " - %s", fallback);
4464 fprintf (stderr, "\n");
4470 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4473 double val = strtod (opt, &endptr);
4474 if (endptr == opt) {
4475 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4478 else if (val < min || val > max) {
4479 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4487 thread_in_critical_region (SgenThreadInfo *info)
4489 return info->in_critical_region;
4493 mono_gc_base_init (void)
4495 MonoThreadInfoCallbacks cb;
4498 char *major_collector_opt = NULL;
4499 char *minor_collector_opt = NULL;
4500 size_t max_heap = 0;
4501 size_t soft_limit = 0;
4504 gboolean debug_print_allowance = FALSE;
4505 double allowance_ratio = 0, save_target = 0;
4506 gboolean cement_enabled = TRUE;
4508 mono_counters_init ();
4511 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4514 /* already inited */
4517 /* being inited by another thread */
4521 /* we will init it */
4524 g_assert_not_reached ();
4526 } while (result != 0);
4528 SGEN_TV_GETTIME (sgen_init_timestamp);
4530 LOCK_INIT (gc_mutex);
4532 pagesize = mono_pagesize ();
4533 gc_debug_file = stderr;
4535 cb.thread_register = sgen_thread_register;
4536 cb.thread_detach = sgen_thread_detach;
4537 cb.thread_unregister = sgen_thread_unregister;
4538 cb.thread_attach = sgen_thread_attach;
4539 cb.mono_method_is_critical = (gpointer)is_critical_method;
4540 cb.mono_thread_in_critical_region = thread_in_critical_region;
4542 cb.thread_exit = mono_gc_pthread_exit;
4543 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4546 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4548 LOCK_INIT (sgen_interruption_mutex);
4550 if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4551 opts = g_strsplit (env, ",", -1);
4552 for (ptr = opts; *ptr; ++ptr) {
4554 if (g_str_has_prefix (opt, "major=")) {
4555 opt = strchr (opt, '=') + 1;
4556 major_collector_opt = g_strdup (opt);
4557 } else if (g_str_has_prefix (opt, "minor=")) {
4558 opt = strchr (opt, '=') + 1;
4559 minor_collector_opt = g_strdup (opt);
4567 sgen_init_internal_allocator ();
4568 sgen_init_nursery_allocator ();
4569 sgen_init_fin_weak_hash ();
4571 sgen_init_hash_table ();
4572 sgen_init_descriptors ();
4573 sgen_init_gray_queues ();
4575 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4576 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4577 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4578 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4580 #ifndef HAVE_KW_THREAD
4581 mono_native_tls_alloc (&thread_info_key, NULL);
4582 #if defined(__APPLE__) || defined (HOST_WIN32)
4584 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4585 * where the two are the same.
4587 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4591 int tls_offset = -1;
4592 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4593 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4598 * This needs to happen before any internal allocations because
4599 * it inits the small id which is required for hazard pointer
4604 mono_thread_info_attach (&dummy);
4606 if (!minor_collector_opt) {
4607 sgen_simple_nursery_init (&sgen_minor_collector);
4609 if (!strcmp (minor_collector_opt, "simple")) {
4611 sgen_simple_nursery_init (&sgen_minor_collector);
4612 } else if (!strcmp (minor_collector_opt, "split")) {
4613 sgen_split_nursery_init (&sgen_minor_collector);
4615 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4616 goto use_simple_nursery;
4620 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4621 use_marksweep_major:
4622 sgen_marksweep_init (&major_collector);
4623 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4624 sgen_marksweep_conc_init (&major_collector);
4626 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4627 goto use_marksweep_major;
4630 ///* Keep this the default for now */
4631 /* Precise marking is broken on all supported targets. Disable until fixed. */
4632 conservative_stack_mark = TRUE;
4634 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4636 if (major_collector.is_concurrent)
4637 cement_enabled = FALSE;
4640 gboolean usage_printed = FALSE;
4642 for (ptr = opts; *ptr; ++ptr) {
4644 if (!strcmp (opt, ""))
4646 if (g_str_has_prefix (opt, "major="))
4648 if (g_str_has_prefix (opt, "minor="))
4650 if (g_str_has_prefix (opt, "max-heap-size=")) {
4651 size_t max_heap_candidate = 0;
4652 opt = strchr (opt, '=') + 1;
4653 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4654 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(size_t)(mono_pagesize () - 1);
4655 if (max_heap != max_heap_candidate)
4656 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4658 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4662 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4663 opt = strchr (opt, '=') + 1;
4664 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4665 if (soft_limit <= 0) {
4666 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4670 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4674 if (g_str_has_prefix (opt, "stack-mark=")) {
4675 opt = strchr (opt, '=') + 1;
4676 if (!strcmp (opt, "precise")) {
4677 conservative_stack_mark = FALSE;
4678 } else if (!strcmp (opt, "conservative")) {
4679 conservative_stack_mark = TRUE;
4681 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4682 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
4686 if (g_str_has_prefix (opt, "bridge-implementation=")) {
4687 opt = strchr (opt, '=') + 1;
4688 sgen_set_bridge_implementation (opt);
4691 if (g_str_has_prefix (opt, "toggleref-test")) {
4692 sgen_register_test_toggleref_callback ();
4697 if (g_str_has_prefix (opt, "nursery-size=")) {
4699 opt = strchr (opt, '=') + 1;
4700 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4701 if ((val & (val - 1))) {
4702 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
4706 if (val < SGEN_MAX_NURSERY_WASTE) {
4707 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
4708 "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
4712 sgen_nursery_size = val;
4713 sgen_nursery_bits = 0;
4714 while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
4717 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
4723 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4725 opt = strchr (opt, '=') + 1;
4726 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
4727 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
4732 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4734 opt = strchr (opt, '=') + 1;
4735 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
4736 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
4737 allowance_ratio = val;
4741 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
4742 if (!major_collector.is_concurrent) {
4743 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
4747 opt = strchr (opt, '=') + 1;
4749 if (!strcmp (opt, "yes")) {
4750 allow_synchronous_major = TRUE;
4751 } else if (!strcmp (opt, "no")) {
4752 allow_synchronous_major = FALSE;
4754 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
4759 if (!strcmp (opt, "cementing")) {
4760 cement_enabled = TRUE;
4763 if (!strcmp (opt, "no-cementing")) {
4764 cement_enabled = FALSE;
4768 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4771 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4774 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4779 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
4780 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4781 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4782 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4783 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
4784 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4785 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4786 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4787 fprintf (stderr, " [no-]cementing\n");
4788 if (major_collector.is_concurrent)
4789 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
4790 if (major_collector.print_gc_param_usage)
4791 major_collector.print_gc_param_usage ();
4792 if (sgen_minor_collector.print_gc_param_usage)
4793 sgen_minor_collector.print_gc_param_usage ();
4794 fprintf (stderr, " Experimental options:\n");
4795 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4796 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4797 fprintf (stderr, "\n");
4799 usage_printed = TRUE;
4804 if (major_collector_opt)
4805 g_free (major_collector_opt);
4807 if (minor_collector_opt)
4808 g_free (minor_collector_opt);
4812 if (major_collector.is_concurrent && cement_enabled) {
4813 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported on concurrent major collectors.");
4814 cement_enabled = FALSE;
4817 sgen_cement_init (cement_enabled);
4819 if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
4820 gboolean usage_printed = FALSE;
4822 opts = g_strsplit (env, ",", -1);
4823 for (ptr = opts; ptr && *ptr; ptr ++) {
4825 if (!strcmp (opt, ""))
4827 if (opt [0] >= '0' && opt [0] <= '9') {
4828 gc_debug_level = atoi (opt);
4833 char *rf = g_strdup_printf ("%s.%d", opt, mono_process_current_pid ());
4834 gc_debug_file = fopen (rf, "wb");
4836 gc_debug_file = stderr;
4839 } else if (!strcmp (opt, "print-allowance")) {
4840 debug_print_allowance = TRUE;
4841 } else if (!strcmp (opt, "print-pinning")) {
4842 do_pin_stats = TRUE;
4843 } else if (!strcmp (opt, "verify-before-allocs")) {
4844 verify_before_allocs = 1;
4845 has_per_allocation_action = TRUE;
4846 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4847 char *arg = strchr (opt, '=') + 1;
4848 verify_before_allocs = atoi (arg);
4849 has_per_allocation_action = TRUE;
4850 } else if (!strcmp (opt, "collect-before-allocs")) {
4851 collect_before_allocs = 1;
4852 has_per_allocation_action = TRUE;
4853 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4854 char *arg = strchr (opt, '=') + 1;
4855 has_per_allocation_action = TRUE;
4856 collect_before_allocs = atoi (arg);
4857 } else if (!strcmp (opt, "verify-before-collections")) {
4858 whole_heap_check_before_collection = TRUE;
4859 } else if (!strcmp (opt, "check-at-minor-collections")) {
4860 consistency_check_at_minor_collection = TRUE;
4861 nursery_clear_policy = CLEAR_AT_GC;
4862 } else if (!strcmp (opt, "mod-union-consistency-check")) {
4863 if (!major_collector.is_concurrent) {
4864 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
4867 mod_union_consistency_check = TRUE;
4868 } else if (!strcmp (opt, "check-mark-bits")) {
4869 check_mark_bits_after_major_collection = TRUE;
4870 } else if (!strcmp (opt, "check-nursery-pinned")) {
4871 check_nursery_objects_pinned = TRUE;
4872 } else if (!strcmp (opt, "xdomain-checks")) {
4873 xdomain_checks = TRUE;
4874 } else if (!strcmp (opt, "clear-at-gc")) {
4875 nursery_clear_policy = CLEAR_AT_GC;
4876 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
4877 nursery_clear_policy = CLEAR_AT_GC;
4878 } else if (!strcmp (opt, "clear-at-tlab-creation")) {
4879 nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
4880 } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
4881 nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
4882 } else if (!strcmp (opt, "check-scan-starts")) {
4883 do_scan_starts_check = TRUE;
4884 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
4885 do_verify_nursery = TRUE;
4886 } else if (!strcmp (opt, "check-concurrent")) {
4887 if (!major_collector.is_concurrent) {
4888 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
4891 do_concurrent_checks = TRUE;
4892 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
4893 do_dump_nursery_content = TRUE;
4894 } else if (!strcmp (opt, "no-managed-allocator")) {
4895 sgen_set_use_managed_allocator (FALSE);
4896 } else if (!strcmp (opt, "disable-minor")) {
4897 disable_minor_collections = TRUE;
4898 } else if (!strcmp (opt, "disable-major")) {
4899 disable_major_collections = TRUE;
4900 } else if (g_str_has_prefix (opt, "heap-dump=")) {
4901 char *filename = strchr (opt, '=') + 1;
4902 nursery_clear_policy = CLEAR_AT_GC;
4903 heap_dump_file = fopen (filename, "w");
4904 if (heap_dump_file) {
4905 fprintf (heap_dump_file, "<sgen-dump>\n");
4906 do_pin_stats = TRUE;
4908 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
4909 char *filename = strchr (opt, '=') + 1;
4910 char *colon = strrchr (filename, ':');
4913 if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
4914 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
4919 binary_protocol_init (filename, (long long)limit);
4920 } else if (!strcmp (opt, "nursery-canaries")) {
4921 do_verify_nursery = TRUE;
4922 sgen_set_use_managed_allocator (FALSE);
4923 enable_nursery_canaries = TRUE;
4924 } else if (!strcmp (opt, "do-not-finalize")) {
4925 do_not_finalize = TRUE;
4926 } else if (!strcmp (opt, "log-finalizers")) {
4927 log_finalizers = TRUE;
4928 } else if (!sgen_bridge_handle_gc_debug (opt)) {
4929 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4934 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
4935 fprintf (stderr, "Valid <option>s are:\n");
4936 fprintf (stderr, " collect-before-allocs[=<n>]\n");
4937 fprintf (stderr, " verify-before-allocs[=<n>]\n");
4938 fprintf (stderr, " check-at-minor-collections\n");
4939 fprintf (stderr, " check-mark-bits\n");
4940 fprintf (stderr, " check-nursery-pinned\n");
4941 fprintf (stderr, " verify-before-collections\n");
4942 fprintf (stderr, " verify-nursery-at-minor-gc\n");
4943 fprintf (stderr, " dump-nursery-at-minor-gc\n");
4944 fprintf (stderr, " disable-minor\n");
4945 fprintf (stderr, " disable-major\n");
4946 fprintf (stderr, " xdomain-checks\n");
4947 fprintf (stderr, " check-concurrent\n");
4948 fprintf (stderr, " clear-[nursery-]at-gc\n");
4949 fprintf (stderr, " clear-at-tlab-creation\n");
4950 fprintf (stderr, " debug-clear-at-tlab-creation\n");
4951 fprintf (stderr, " check-scan-starts\n");
4952 fprintf (stderr, " no-managed-allocator\n");
4953 fprintf (stderr, " print-allowance\n");
4954 fprintf (stderr, " print-pinning\n");
4955 fprintf (stderr, " heap-dump=<filename>\n");
4956 fprintf (stderr, " binary-protocol=<filename>[:<file-size-limit>]\n");
4957 fprintf (stderr, " nursery-canaries\n");
4958 fprintf (stderr, " do-not-finalize\n");
4959 fprintf (stderr, " log-finalizers\n");
4960 sgen_bridge_print_gc_debug_usage ();
4961 fprintf (stderr, "\n");
4963 usage_printed = TRUE;
4969 if (check_mark_bits_after_major_collection)
4970 nursery_clear_policy = CLEAR_AT_GC;
4972 if (major_collector.post_param_init)
4973 major_collector.post_param_init (&major_collector);
4975 if (major_collector.needs_thread_pool)
4976 sgen_workers_init (1);
4978 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
4980 memset (&remset, 0, sizeof (remset));
4982 sgen_card_table_init (&remset);
4988 mono_gc_get_gc_name (void)
4993 static MonoMethod *write_barrier_conc_method;
4994 static MonoMethod *write_barrier_noconc_method;
4997 sgen_is_critical_method (MonoMethod *method)
4999 return (method == write_barrier_conc_method || method == write_barrier_noconc_method || sgen_is_managed_allocator (method));
5003 sgen_has_critical_method (void)
5005 return write_barrier_conc_method || write_barrier_noconc_method || sgen_has_managed_allocator ();
5011 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels, gboolean is_concurrent)
5013 int shifted_nursery_start = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5015 memset (nursery_check_return_labels, 0, sizeof (int) * 2);
5016 // if (ptr_in_nursery (ptr)) return;
5018 * Masking out the bits might be faster, but we would have to use 64 bit
5019 * immediates, which might be slower.
5021 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
5022 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_NURSERY_START);
5023 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5024 mono_mb_emit_byte (mb, CEE_SHR_UN);
5025 mono_mb_emit_stloc (mb, shifted_nursery_start);
5027 mono_mb_emit_ldarg (mb, 0);
5028 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5029 mono_mb_emit_byte (mb, CEE_SHR_UN);
5030 mono_mb_emit_ldloc (mb, shifted_nursery_start);
5031 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5033 if (!is_concurrent) {
5034 // if (!ptr_in_nursery (*ptr)) return;
5035 mono_mb_emit_ldarg (mb, 0);
5036 mono_mb_emit_byte (mb, CEE_LDIND_I);
5037 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5038 mono_mb_emit_byte (mb, CEE_SHR_UN);
5039 mono_mb_emit_ldloc (mb, shifted_nursery_start);
5040 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5046 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
5049 MonoMethodBuilder *mb;
5050 MonoMethodSignature *sig;
5051 MonoMethod **write_barrier_method_addr;
5052 #ifdef MANAGED_WBARRIER
5053 int i, nursery_check_labels [2];
5055 #ifdef HAVE_KW_THREAD
5056 int stack_end_offset = -1;
5058 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5059 g_assert (stack_end_offset != -1);
5063 // FIXME: Maybe create a separate version for ctors (the branch would be
5064 // correctly predicted more times)
5066 write_barrier_method_addr = &write_barrier_conc_method;
5068 write_barrier_method_addr = &write_barrier_noconc_method;
5070 if (*write_barrier_method_addr)
5071 return *write_barrier_method_addr;
5073 /* Create the IL version of mono_gc_barrier_generic_store () */
5074 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5075 sig->ret = &mono_defaults.void_class->byval_arg;
5076 sig->params [0] = &mono_defaults.int_class->byval_arg;
5079 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
5081 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
5084 #ifdef MANAGED_WBARRIER
5085 emit_nursery_check (mb, nursery_check_labels, is_concurrent);
5087 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5091 LDC_PTR sgen_cardtable
5093 address >> CARD_BITS
5097 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5098 LDC_PTR card_table_mask
5105 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
5106 mono_mb_emit_byte (mb, CEE_MONO_LDPTR_CARD_TABLE);
5107 mono_mb_emit_ldarg (mb, 0);
5108 mono_mb_emit_icon (mb, CARD_BITS);
5109 mono_mb_emit_byte (mb, CEE_SHR_UN);
5110 mono_mb_emit_byte (mb, CEE_CONV_I);
5111 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5112 #if SIZEOF_VOID_P == 8
5113 mono_mb_emit_icon8 (mb, CARD_MASK);
5115 mono_mb_emit_icon (mb, CARD_MASK);
5117 mono_mb_emit_byte (mb, CEE_CONV_I);
5118 mono_mb_emit_byte (mb, CEE_AND);
5120 mono_mb_emit_byte (mb, CEE_ADD);
5121 mono_mb_emit_icon (mb, 1);
5122 mono_mb_emit_byte (mb, CEE_STIND_I1);
5125 for (i = 0; i < 2; ++i) {
5126 if (nursery_check_labels [i])
5127 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5129 mono_mb_emit_byte (mb, CEE_RET);
5131 mono_mb_emit_ldarg (mb, 0);
5132 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5133 mono_mb_emit_byte (mb, CEE_RET);
5136 res = mono_mb_create_method (mb, sig, 16);
5140 if (*write_barrier_method_addr) {
5141 /* Already created */
5142 mono_free_method (res);
5144 /* double-checked locking */
5145 mono_memory_barrier ();
5146 *write_barrier_method_addr = res;
5150 return *write_barrier_method_addr;
5154 mono_gc_get_write_barrier (void)
5156 return mono_gc_get_specific_write_barrier (major_collector.is_concurrent);
5160 mono_gc_get_description (void)
5162 return g_strdup ("sgen");
5166 mono_gc_set_desktop_mode (void)
5171 mono_gc_is_moving (void)
5177 mono_gc_is_disabled (void)
5183 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5190 sgen_get_nursery_clear_policy (void)
5192 return nursery_clear_policy;
5196 sgen_get_array_fill_vtable (void)
5198 if (!array_fill_vtable) {
5199 static MonoClass klass;
5200 static char _vtable[sizeof(MonoVTable)+8];
5201 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
5204 MonoDomain *domain = mono_get_root_domain ();
5207 klass.element_class = mono_defaults.byte_class;
5209 klass.instance_size = sizeof (MonoArray);
5210 klass.sizes.element_size = 1;
5211 klass.name = "array_filler_type";
5213 vtable->klass = &klass;
5215 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5218 array_fill_vtable = vtable;
5220 return array_fill_vtable;
5230 sgen_gc_unlock (void)
5232 gboolean try_free = sgen_try_free_some_memory;
5233 sgen_try_free_some_memory = FALSE;
5234 mono_mutex_unlock (&gc_mutex);
5235 MONO_GC_UNLOCKED ();
5237 mono_thread_hazardous_try_free_some ();
5241 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5243 major_collector.iterate_live_block_ranges (callback);
5247 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5249 major_collector.scan_card_table (FALSE, queue);
5253 sgen_get_major_collector (void)
5255 return &major_collector;
5258 void mono_gc_set_skip_thread (gboolean skip)
5260 SgenThreadInfo *info = mono_thread_info_current ();
5263 info->gc_disabled = skip;
5268 sgen_get_remset (void)
5274 mono_gc_get_vtable_bits (MonoClass *class)
5277 /* FIXME move this to the bridge code */
5278 if (sgen_need_bridge_processing ()) {
5279 switch (sgen_bridge_class_kind (class)) {
5280 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
5281 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
5282 res = SGEN_GC_BIT_BRIDGE_OBJECT;
5284 case GC_BRIDGE_OPAQUE_CLASS:
5285 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
5287 case GC_BRIDGE_TRANSPARENT_CLASS:
5291 if (fin_callbacks.is_class_finalization_aware) {
5292 if (fin_callbacks.is_class_finalization_aware (class))
5293 res |= SGEN_GC_BIT_FINALIZER_AWARE;
5299 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5306 sgen_check_whole_heap_stw (void)
5308 sgen_stop_world (0);
5309 sgen_clear_nursery_fragments ();
5310 sgen_check_whole_heap (FALSE);
5311 sgen_restart_world (0, NULL);
5315 sgen_gc_event_moves (void)
5317 if (moved_objects_idx) {
5318 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5319 moved_objects_idx = 0;
5324 sgen_timestamp (void)
5326 SGEN_TV_DECLARE (timestamp);
5327 SGEN_TV_GETTIME (timestamp);
5328 return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
5332 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
5334 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
5335 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
5337 fin_callbacks = *callbacks;
5344 #endif /* HAVE_SGEN_GC */