2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-protocol.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/sgen-bridge.h"
204 #include "metadata/sgen-memory-governor.h"
205 #include "metadata/sgen-hash-table.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/mempool-internals.h"
211 #include "metadata/marshal.h"
212 #include "metadata/runtime.h"
213 #include "metadata/sgen-cardtable.h"
214 #include "metadata/sgen-pinning.h"
215 #include "metadata/sgen-workers.h"
216 #include "metadata/sgen-layout-stats.h"
217 #include "utils/mono-mmap.h"
218 #include "utils/mono-time.h"
219 #include "utils/mono-semaphore.h"
220 #include "utils/mono-counters.h"
221 #include "utils/mono-proclib.h"
222 #include "utils/mono-memory-model.h"
223 #include "utils/mono-logger-internal.h"
224 #include "utils/dtrace.h"
226 #include <mono/utils/mono-logger-internal.h>
227 #include <mono/utils/memcheck.h>
229 #if defined(__MACH__)
230 #include "utils/mach-support.h"
233 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
237 #include "mono/cil/opcode.def"
243 #undef pthread_create
245 #undef pthread_detach
248 * ######################################################################
249 * ######## Types and constants used by the GC.
250 * ######################################################################
253 /* 0 means not initialized, 1 is initialized, -1 means in progress */
254 static int gc_initialized = 0;
255 /* If set, check if we need to do something every X allocations */
256 gboolean has_per_allocation_action;
257 /* If set, do a heap check every X allocation */
258 guint32 verify_before_allocs = 0;
259 /* If set, do a minor collection before every X allocation */
260 guint32 collect_before_allocs = 0;
261 /* If set, do a whole heap check before each collection */
262 static gboolean whole_heap_check_before_collection = FALSE;
263 /* If set, do a heap consistency check before each minor collection */
264 static gboolean consistency_check_at_minor_collection = FALSE;
265 /* If set, do a mod union consistency check before each finishing collection pause */
266 static gboolean mod_union_consistency_check = FALSE;
267 /* If set, check whether mark bits are consistent after major collections */
268 static gboolean check_mark_bits_after_major_collection = FALSE;
269 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
270 static gboolean check_nursery_objects_pinned = FALSE;
271 /* If set, do a few checks when the concurrent collector is used */
272 static gboolean do_concurrent_checks = FALSE;
273 /* If set, check that there are no references to the domain left at domain unload */
274 static gboolean xdomain_checks = FALSE;
275 /* If not null, dump the heap after each collection into this file */
276 static FILE *heap_dump_file = NULL;
277 /* If set, mark stacks conservatively, even if precise marking is possible */
278 static gboolean conservative_stack_mark = FALSE;
279 /* If set, do a plausibility check on the scan_starts before and after
281 static gboolean do_scan_starts_check = FALSE;
283 * If the major collector is concurrent and this is FALSE, we will
284 * never initiate a synchronous major collection, unless requested via
287 static gboolean allow_synchronous_major = TRUE;
288 static gboolean nursery_collection_is_parallel = FALSE;
289 static gboolean disable_minor_collections = FALSE;
290 static gboolean disable_major_collections = FALSE;
291 gboolean do_pin_stats = FALSE;
292 static gboolean do_verify_nursery = FALSE;
293 static gboolean do_dump_nursery_content = FALSE;
295 #ifdef HEAVY_STATISTICS
296 long long stat_objects_alloced_degraded = 0;
297 long long stat_bytes_alloced_degraded = 0;
299 long long stat_copy_object_called_nursery = 0;
300 long long stat_objects_copied_nursery = 0;
301 long long stat_copy_object_called_major = 0;
302 long long stat_objects_copied_major = 0;
304 long long stat_scan_object_called_nursery = 0;
305 long long stat_scan_object_called_major = 0;
307 long long stat_slots_allocated_in_vain;
309 long long stat_nursery_copy_object_failed_from_space = 0;
310 long long stat_nursery_copy_object_failed_forwarded = 0;
311 long long stat_nursery_copy_object_failed_pinned = 0;
312 long long stat_nursery_copy_object_failed_to_space = 0;
314 static int stat_wbarrier_add_to_global_remset = 0;
315 static int stat_wbarrier_set_field = 0;
316 static int stat_wbarrier_set_arrayref = 0;
317 static int stat_wbarrier_arrayref_copy = 0;
318 static int stat_wbarrier_generic_store = 0;
319 static int stat_wbarrier_generic_store_atomic = 0;
320 static int stat_wbarrier_set_root = 0;
321 static int stat_wbarrier_value_copy = 0;
322 static int stat_wbarrier_object_copy = 0;
325 static long long stat_pinned_objects = 0;
327 static long long time_minor_pre_collection_fragment_clear = 0;
328 static long long time_minor_pinning = 0;
329 static long long time_minor_scan_remsets = 0;
330 static long long time_minor_scan_pinned = 0;
331 static long long time_minor_scan_registered_roots = 0;
332 static long long time_minor_scan_thread_data = 0;
333 static long long time_minor_finish_gray_stack = 0;
334 static long long time_minor_fragment_creation = 0;
336 static long long time_major_pre_collection_fragment_clear = 0;
337 static long long time_major_pinning = 0;
338 static long long time_major_scan_pinned = 0;
339 static long long time_major_scan_registered_roots = 0;
340 static long long time_major_scan_thread_data = 0;
341 static long long time_major_scan_alloc_pinned = 0;
342 static long long time_major_scan_finalized = 0;
343 static long long time_major_scan_big_objects = 0;
344 static long long time_major_finish_gray_stack = 0;
345 static long long time_major_free_bigobjs = 0;
346 static long long time_major_los_sweep = 0;
347 static long long time_major_sweep = 0;
348 static long long time_major_fragment_creation = 0;
350 static SGEN_TV_DECLARE (time_major_conc_collection_start);
351 static SGEN_TV_DECLARE (time_major_conc_collection_end);
353 static SGEN_TV_DECLARE (last_minor_collection_start_tv);
354 static SGEN_TV_DECLARE (last_minor_collection_end_tv);
356 int gc_debug_level = 0;
359 static MonoGCFinalizerCallbacks fin_callbacks;
363 mono_gc_flush_info (void)
365 fflush (gc_debug_file);
369 #define TV_DECLARE SGEN_TV_DECLARE
370 #define TV_GETTIME SGEN_TV_GETTIME
371 #define TV_ELAPSED SGEN_TV_ELAPSED
372 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
374 SGEN_TV_DECLARE (sgen_init_timestamp);
376 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
378 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
380 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
381 #define object_is_pinned SGEN_OBJECT_IS_PINNED
382 #define pin_object SGEN_PIN_OBJECT
383 #define unpin_object SGEN_UNPIN_OBJECT
385 #define ptr_in_nursery sgen_ptr_in_nursery
387 #define LOAD_VTABLE SGEN_LOAD_VTABLE
390 safe_name (void* obj)
392 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
393 return vt->klass->name;
396 #define safe_object_get_size sgen_safe_object_get_size
399 sgen_safe_name (void* obj)
401 return safe_name (obj);
405 * ######################################################################
406 * ######## Global data.
407 * ######################################################################
409 LOCK_DECLARE (gc_mutex);
410 gboolean sgen_try_free_some_memory;
412 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
414 static mword pagesize = 4096;
415 size_t degraded_mode = 0;
417 static mword bytes_pinned_from_failed_allocation = 0;
419 GCMemSection *nursery_section = NULL;
420 static mword lowest_heap_address = ~(mword)0;
421 static mword highest_heap_address = 0;
423 LOCK_DECLARE (sgen_interruption_mutex);
424 static LOCK_DECLARE (pin_queue_mutex);
426 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
427 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
429 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
430 struct _FinalizeReadyEntry {
431 FinalizeReadyEntry *next;
435 typedef struct _EphemeronLinkNode EphemeronLinkNode;
437 struct _EphemeronLinkNode {
438 EphemeronLinkNode *next;
447 int current_collection_generation = -1;
448 volatile gboolean concurrent_collection_in_progress = FALSE;
450 /* objects that are ready to be finalized */
451 static FinalizeReadyEntry *fin_ready_list = NULL;
452 static FinalizeReadyEntry *critical_fin_list = NULL;
454 static EphemeronLinkNode *ephemeron_list;
456 /* registered roots: the key to the hash is the root start address */
458 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
460 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
461 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
462 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
463 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
465 static mword roots_size = 0; /* amount of memory in the root set */
467 #define GC_ROOT_NUM 32
469 int count; /* must be the first field */
470 void *objects [GC_ROOT_NUM];
471 int root_types [GC_ROOT_NUM];
472 uintptr_t extra_info [GC_ROOT_NUM];
476 notify_gc_roots (GCRootReport *report)
480 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
485 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
487 if (report->count == GC_ROOT_NUM)
488 notify_gc_roots (report);
489 report->objects [report->count] = object;
490 report->root_types [report->count] = rtype;
491 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
494 MonoNativeTlsKey thread_info_key;
496 #ifdef HAVE_KW_THREAD
497 __thread SgenThreadInfo *sgen_thread_info;
498 __thread char *stack_end;
501 /* The size of a TLAB */
502 /* The bigger the value, the less often we have to go to the slow path to allocate a new
503 * one, but the more space is wasted by threads not allocating much memory.
505 * FIXME: Make this self-tuning for each thread.
507 guint32 tlab_size = (1024 * 4);
509 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
511 /* Functions supplied by the runtime to be called by the GC */
512 static MonoGCCallbacks gc_callbacks;
514 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
515 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
517 #define ALIGN_UP SGEN_ALIGN_UP
519 #define MOVED_OBJECTS_NUM 64
520 static void *moved_objects [MOVED_OBJECTS_NUM];
521 static int moved_objects_idx = 0;
523 /* Vtable of the objects used to fill out nursery fragments before a collection */
524 static MonoVTable *array_fill_vtable;
526 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
527 MonoNativeThreadId main_gc_thread = NULL;
530 /*Object was pinned during the current collection*/
531 static mword objects_pinned;
534 * ######################################################################
535 * ######## Macros and function declarations.
536 * ######################################################################
540 align_pointer (void *ptr)
542 mword p = (mword)ptr;
543 p += sizeof (gpointer) - 1;
544 p &= ~ (sizeof (gpointer) - 1);
548 typedef SgenGrayQueue GrayQueue;
550 /* forward declarations */
551 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
552 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
553 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
554 static void report_finalizer_roots (void);
555 static void report_registered_roots (void);
557 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
558 static void finish_gray_stack (int generation, GrayQueue *queue);
560 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
563 static void init_stats (void);
565 static int mark_ephemerons_in_range (ScanCopyContext ctx);
566 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
567 static void null_ephemerons_for_domain (MonoDomain *domain);
569 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
571 SgenObjectOperations current_object_ops;
572 SgenMajorCollector major_collector;
573 SgenMinorCollector sgen_minor_collector;
574 static GrayQueue gray_queue;
576 static SgenRemeberedSet remset;
578 /* The gray queue to use from the main collection thread. */
579 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
582 * The gray queue a worker job must use. If we're not parallel or
583 * concurrent, we use the main gray queue.
585 static SgenGrayQueue*
586 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
588 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
592 gray_queue_redirect (SgenGrayQueue *queue)
594 gboolean wake = FALSE;
598 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
601 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
606 g_assert (concurrent_collection_in_progress ||
607 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
608 if (sgen_workers_have_started ()) {
609 sgen_workers_wake_up_all ();
611 if (concurrent_collection_in_progress)
612 g_assert (current_collection_generation == -1);
618 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
620 while (start < end) {
624 if (!*(void**)start) {
625 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
630 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
636 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
638 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
639 callback (obj, size, data);
646 need_remove_object_for_domain (char *start, MonoDomain *domain)
648 if (mono_object_domain (start) == domain) {
649 SGEN_LOG (4, "Need to cleanup object %p", start);
650 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
657 process_object_for_domain_clearing (char *start, MonoDomain *domain)
659 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
660 if (vt->klass == mono_defaults.internal_thread_class)
661 g_assert (mono_object_domain (start) == mono_get_root_domain ());
662 /* The object could be a proxy for an object in the domain
664 #ifndef DISABLE_REMOTING
665 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
666 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
668 /* The server could already have been zeroed out, so
669 we need to check for that, too. */
670 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
671 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
672 ((MonoRealProxy*)start)->unwrapped_server = NULL;
679 clear_domain_process_object (char *obj, MonoDomain *domain)
683 process_object_for_domain_clearing (obj, domain);
684 remove = need_remove_object_for_domain (obj, domain);
686 if (remove && ((MonoObject*)obj)->synchronisation) {
687 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
689 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
696 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
698 if (clear_domain_process_object (obj, domain))
699 memset (obj, 0, size);
703 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
705 clear_domain_process_object (obj, domain);
709 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
711 if (need_remove_object_for_domain (obj, domain))
712 major_collector.free_non_pinned_object (obj, size);
716 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
718 if (need_remove_object_for_domain (obj, domain))
719 major_collector.free_pinned_object (obj, size);
723 * When appdomains are unloaded we can easily remove objects that have finalizers,
724 * but all the others could still be present in random places on the heap.
725 * We need a sweep to get rid of them even though it's going to be costly
727 * The reason we need to remove them is because we access the vtable and class
728 * structures to know the object size and the reference bitmap: once the domain is
729 * unloaded the point to random memory.
732 mono_gc_clear_domain (MonoDomain * domain)
734 LOSObject *bigobj, *prev;
739 binary_protocol_domain_unload_begin (domain);
743 if (concurrent_collection_in_progress)
744 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
745 g_assert (!concurrent_collection_in_progress);
747 sgen_process_fin_stage_entries ();
748 sgen_process_dislink_stage_entries ();
750 sgen_clear_nursery_fragments ();
752 if (xdomain_checks && domain != mono_get_root_domain ()) {
753 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
754 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
755 sgen_check_for_xdomain_refs ();
758 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
759 to memory returned to the OS.*/
760 null_ephemerons_for_domain (domain);
762 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
763 sgen_null_links_for_domain (domain, i);
765 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
766 sgen_remove_finalizers_for_domain (domain, i);
768 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
769 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
771 /* We need two passes over major and large objects because
772 freeing such objects might give their memory back to the OS
773 (in the case of large objects) or obliterate its vtable
774 (pinned objects with major-copying or pinned and non-pinned
775 objects with major-mark&sweep), but we might need to
776 dereference a pointer from an object to another object if
777 the first object is a proxy. */
778 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
779 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
780 clear_domain_process_object (bigobj->data, domain);
783 for (bigobj = los_object_list; bigobj;) {
784 if (need_remove_object_for_domain (bigobj->data, domain)) {
785 LOSObject *to_free = bigobj;
787 prev->next = bigobj->next;
789 los_object_list = bigobj->next;
790 bigobj = bigobj->next;
791 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
792 sgen_los_free_object (to_free);
796 bigobj = bigobj->next;
798 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
799 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
801 if (domain == mono_get_root_domain ()) {
802 if (G_UNLIKELY (do_pin_stats))
803 sgen_pin_stats_print_class_stats ();
804 sgen_object_layout_dump (stdout);
807 sgen_restart_world (0, NULL);
809 binary_protocol_domain_unload_end (domain);
815 * sgen_add_to_global_remset:
817 * The global remset contains locations which point into newspace after
818 * a minor collection. This can happen if the objects they point to are pinned.
820 * LOCKING: If called from a parallel collector, the global remset
821 * lock must be held. For serial collectors that is not necessary.
824 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
826 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
828 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
830 if (!major_collector.is_concurrent) {
831 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
833 if (current_collection_generation == -1)
834 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
837 if (!object_is_pinned (obj))
838 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
839 else if (sgen_cement_lookup_or_register (obj))
842 remset.record_pointer (ptr);
844 if (G_UNLIKELY (do_pin_stats))
845 sgen_pin_stats_register_global_remset (obj);
847 SGEN_LOG (8, "Adding global remset for %p", ptr);
848 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
852 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
853 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
854 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
855 vt->klass->name_space, vt->klass->name);
861 * sgen_drain_gray_stack:
863 * Scan objects in the gray stack until the stack is empty. This should be called
864 * frequently after each object is copied, to achieve better locality and cache
868 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
871 ScanObjectFunc scan_func = ctx.scan_func;
872 GrayQueue *queue = ctx.queue;
874 if (max_objs == -1) {
876 GRAY_OBJECT_DEQUEUE (queue, &obj);
879 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
880 scan_func (obj, queue);
886 for (i = 0; i != max_objs; ++i) {
887 GRAY_OBJECT_DEQUEUE (queue, &obj);
890 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
891 scan_func (obj, queue);
893 } while (max_objs < 0);
899 * Addresses in the pin queue are already sorted. This function finds
900 * the object header for each address and pins the object. The
901 * addresses must be inside the nursery section. The (start of the)
902 * address array is overwritten with the addresses of the actually
903 * pinned objects. Return the number of pinned objects.
906 pin_objects_from_nursery_pin_queue (ScanCopyContext ctx)
908 GCMemSection *section = nursery_section;
909 void **start = section->pin_queue_start;
910 void **end = start + section->pin_queue_num_entries;
911 void *start_nursery = section->data;
912 void *end_nursery = section->next_data;
917 void *pinning_front = start_nursery;
919 void **definitely_pinned = start;
920 ScanObjectFunc scan_func = ctx.scan_func;
921 SgenGrayQueue *queue = ctx.queue;
923 sgen_nursery_allocator_prepare_for_pinning ();
925 while (start < end) {
926 void *obj_to_pin = NULL;
927 size_t obj_to_pin_size = 0;
931 SGEN_ASSERT (0, addr >= start_nursery && addr < end_nursery, "Potential pinning address out of range");
932 SGEN_ASSERT (0, addr >= last, "Pin queue not sorted");
939 SGEN_LOG (5, "Considering pinning addr %p", addr);
940 /* We've already processed everything up to pinning_front. */
941 if (addr < pinning_front) {
947 * Find the closest scan start <= addr. We might search backward in the
948 * scan_starts array because entries might be NULL. In the worst case we
949 * start at start_nursery.
951 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
952 SGEN_ASSERT (0, idx < section->num_scan_start, "Scan start index out of range");
953 search_start = (void*)section->scan_starts [idx];
954 if (!search_start || search_start > addr) {
957 search_start = section->scan_starts [idx];
958 if (search_start && search_start <= addr)
961 if (!search_start || search_start > addr)
962 search_start = start_nursery;
966 * If the pinning front is closer than the scan start we found, start
967 * searching at the front.
969 if (search_start < pinning_front)
970 search_start = pinning_front;
973 * Now addr should be in an object a short distance from search_start.
975 * search_start must point to zeroed mem or point to an object.
981 if (!*(void**)search_start) {
982 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
983 /* The loop condition makes sure we don't overrun addr. */
987 obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
989 if (addr >= search_start && (char*)addr < (char*)search_start + obj_size) {
990 /* This is the object we're looking for. */
991 obj_to_pin = search_start;
992 obj_to_pin_size = obj_size;
996 /* Skip to the next object */
997 search_start = (void*)((char*)search_start + obj_size);
998 } while (search_start <= addr);
1000 /* We've searched past the address we were looking for. */
1002 pinning_front = search_start;
1003 goto next_pin_queue_entry;
1007 * We've found an object to pin. It might still be a dummy array, but we
1008 * can advance the pinning front in any case.
1010 pinning_front = (char*)obj_to_pin + obj_to_pin_size;
1013 * If this is a dummy array marking the beginning of a nursery
1014 * fragment, we don't pin it.
1016 if (((MonoObject*)obj_to_pin)->synchronisation == GINT_TO_POINTER (-1))
1017 goto next_pin_queue_entry;
1020 * Finally - pin the object!
1023 scan_func (obj_to_pin, queue);
1025 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1026 obj_to_pin, *(void**)obj_to_pin, safe_name (obj_to_pin), count);
1027 binary_protocol_pin (obj_to_pin,
1028 (gpointer)LOAD_VTABLE (obj_to_pin),
1029 safe_object_get_size (obj_to_pin));
1031 #ifdef ENABLE_DTRACE
1032 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1033 int gen = sgen_ptr_in_nursery (obj_to_pin) ? GENERATION_NURSERY : GENERATION_OLD;
1034 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj_to_pin);
1035 MONO_GC_OBJ_PINNED ((mword)obj_to_pin,
1036 sgen_safe_object_get_size (obj_to_pin),
1037 vt->klass->name_space, vt->klass->name, gen);
1041 pin_object (obj_to_pin);
1042 GRAY_OBJECT_ENQUEUE (queue, obj_to_pin);
1043 if (G_UNLIKELY (do_pin_stats))
1044 sgen_pin_stats_register_object (obj_to_pin, obj_to_pin_size);
1045 definitely_pinned [count] = obj_to_pin;
1049 next_pin_queue_entry:
1053 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1054 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1055 GCRootReport report;
1057 for (idx = 0; idx < count; ++idx)
1058 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1059 notify_gc_roots (&report);
1061 stat_pinned_objects += count;
1066 pin_objects_in_nursery (ScanCopyContext ctx)
1070 if (!nursery_section->pin_queue_num_entries)
1073 reduced_to = pin_objects_from_nursery_pin_queue (ctx);
1074 nursery_section->pin_queue_num_entries = reduced_to;
1076 nursery_section->pin_queue_start = NULL;
1081 sgen_pin_object (void *object, GrayQueue *queue)
1083 g_assert (!concurrent_collection_in_progress);
1085 if (sgen_collection_is_parallel ()) {
1087 /*object arrives pinned*/
1088 sgen_pin_stage_ptr (object);
1092 SGEN_PIN_OBJECT (object);
1093 sgen_pin_stage_ptr (object);
1095 if (G_UNLIKELY (do_pin_stats))
1096 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1098 GRAY_OBJECT_ENQUEUE (queue, object);
1099 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1101 #ifdef ENABLE_DTRACE
1102 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1103 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1104 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1105 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1111 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1115 gboolean major_pinned = FALSE;
1117 if (sgen_ptr_in_nursery (obj)) {
1118 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1119 sgen_pin_object (obj, queue);
1123 major_collector.pin_major_object (obj, queue);
1124 major_pinned = TRUE;
1127 vtable_word = *(mword*)obj;
1128 /*someone else forwarded it, update the pointer and bail out*/
1129 if (vtable_word & SGEN_FORWARDED_BIT) {
1130 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1134 /*someone pinned it, nothing to do.*/
1135 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1140 /* Sort the addresses in array in increasing order.
1141 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1144 sgen_sort_addresses (void **array, size_t size)
1149 for (i = 1; i < size; ++i) {
1152 size_t parent = (child - 1) / 2;
1154 if (array [parent] >= array [child])
1157 tmp = array [parent];
1158 array [parent] = array [child];
1159 array [child] = tmp;
1165 for (i = size - 1; i > 0; --i) {
1168 array [i] = array [0];
1174 while (root * 2 + 1 <= end) {
1175 size_t child = root * 2 + 1;
1177 if (child < end && array [child] < array [child + 1])
1179 if (array [root] >= array [child])
1183 array [root] = array [child];
1184 array [child] = tmp;
1192 * Scan the memory between start and end and queue values which could be pointers
1193 * to the area between start_nursery and end_nursery for later consideration.
1194 * Typically used for thread stacks.
1197 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1201 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1202 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1205 while (start < end) {
1206 if (*start >= start_nursery && *start < end_nursery) {
1208 * *start can point to the middle of an object
1209 * note: should we handle pointing at the end of an object?
1210 * pinning in C# code disallows pointing at the end of an object
1211 * but there is some small chance that an optimizing C compiler
1212 * may keep the only reference to an object by pointing
1213 * at the end of it. We ignore this small chance for now.
1214 * Pointers to the end of an object are indistinguishable
1215 * from pointers to the start of the next object in memory
1216 * so if we allow that we'd need to pin two objects...
1217 * We queue the pointer in an array, the
1218 * array will then be sorted and uniqued. This way
1219 * we can coalesce several pinning pointers and it should
1220 * be faster since we'd do a memory scan with increasing
1221 * addresses. Note: we can align the address to the allocation
1222 * alignment, so the unique process is more effective.
1224 mword addr = (mword)*start;
1225 addr &= ~(ALLOC_ALIGN - 1);
1226 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1227 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1228 sgen_pin_stage_ptr ((void*)addr);
1231 if (G_UNLIKELY (do_pin_stats)) {
1232 if (ptr_in_nursery ((void*)addr))
1233 sgen_pin_stats_register_address ((char*)addr, pin_type);
1239 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1243 * The first thing we do in a collection is to identify pinned objects.
1244 * This function considers all the areas of memory that need to be
1245 * conservatively scanned.
1248 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1252 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1253 /* objects pinned from the API are inside these roots */
1254 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1255 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1256 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1257 } SGEN_HASH_TABLE_FOREACH_END;
1258 /* now deal with the thread stacks
1259 * in the future we should be able to conservatively scan only:
1260 * *) the cpu registers
1261 * *) the unmanaged stack frames
1262 * *) the _last_ managed stack frame
1263 * *) pointers slots in managed frames
1265 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1269 unpin_objects_from_queue (SgenGrayQueue *queue)
1273 GRAY_OBJECT_DEQUEUE (queue, &addr);
1276 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1277 SGEN_UNPIN_OBJECT (addr);
1282 CopyOrMarkObjectFunc func;
1284 } UserCopyOrMarkData;
1287 single_arg_user_copy_or_mark (void **obj, void *gc_data)
1289 UserCopyOrMarkData *data = gc_data;
1291 data->func (obj, data->queue);
1295 * The memory area from start_root to end_root contains pointers to objects.
1296 * Their position is precisely described by @desc (this means that the pointer
1297 * can be either NULL or the pointer to the start of an object).
1298 * This functions copies them to to_space updates them.
1300 * This function is not thread-safe!
1303 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1305 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1306 SgenGrayQueue *queue = ctx.queue;
1308 switch (desc & ROOT_DESC_TYPE_MASK) {
1309 case ROOT_DESC_BITMAP:
1310 desc >>= ROOT_DESC_TYPE_SHIFT;
1312 if ((desc & 1) && *start_root) {
1313 copy_func (start_root, queue);
1314 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1315 sgen_drain_gray_stack (-1, ctx);
1321 case ROOT_DESC_COMPLEX: {
1322 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1323 gsize bwords = (*bitmap_data) - 1;
1324 void **start_run = start_root;
1326 while (bwords-- > 0) {
1327 gsize bmap = *bitmap_data++;
1328 void **objptr = start_run;
1330 if ((bmap & 1) && *objptr) {
1331 copy_func (objptr, queue);
1332 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1333 sgen_drain_gray_stack (-1, ctx);
1338 start_run += GC_BITS_PER_WORD;
1342 case ROOT_DESC_USER: {
1343 UserCopyOrMarkData data = { copy_func, queue };
1344 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1345 marker (start_root, single_arg_user_copy_or_mark, &data);
1348 case ROOT_DESC_RUN_LEN:
1349 g_assert_not_reached ();
1351 g_assert_not_reached ();
1356 reset_heap_boundaries (void)
1358 lowest_heap_address = ~(mword)0;
1359 highest_heap_address = 0;
1363 sgen_update_heap_boundaries (mword low, mword high)
1368 old = lowest_heap_address;
1371 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1374 old = highest_heap_address;
1377 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1381 * Allocate and setup the data structures needed to be able to allocate objects
1382 * in the nursery. The nursery is stored in nursery_section.
1385 alloc_nursery (void)
1387 GCMemSection *section;
1392 if (nursery_section)
1394 SGEN_LOG (2, "Allocating nursery size: %zu", (size_t)sgen_nursery_size);
1395 /* later we will alloc a larger area for the nursery but only activate
1396 * what we need. The rest will be used as expansion if we have too many pinned
1397 * objects in the existing nursery.
1399 /* FIXME: handle OOM */
1400 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1402 alloc_size = sgen_nursery_size;
1404 /* If there isn't enough space even for the nursery we should simply abort. */
1405 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1407 #ifdef SGEN_ALIGN_NURSERY
1408 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1410 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1412 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1413 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1414 section->data = section->next_data = data;
1415 section->size = alloc_size;
1416 section->end_data = data + sgen_nursery_size;
1417 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1418 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1419 section->num_scan_start = scan_starts;
1421 nursery_section = section;
1423 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1427 mono_gc_get_nursery (int *shift_bits, size_t *size)
1429 *size = sgen_nursery_size;
1430 #ifdef SGEN_ALIGN_NURSERY
1431 *shift_bits = DEFAULT_NURSERY_BITS;
1435 return sgen_get_nursery_start ();
1439 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1441 SgenThreadInfo *info = mono_thread_info_current ();
1443 /* Could be called from sgen_thread_unregister () with a NULL info */
1446 info->stopped_domain = domain;
1451 mono_gc_precise_stack_mark_enabled (void)
1453 return !conservative_stack_mark;
1457 mono_gc_get_logfile (void)
1459 return gc_debug_file;
1463 report_finalizer_roots_list (FinalizeReadyEntry *list)
1465 GCRootReport report;
1466 FinalizeReadyEntry *fin;
1469 for (fin = list; fin; fin = fin->next) {
1472 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1474 notify_gc_roots (&report);
1478 report_finalizer_roots (void)
1480 report_finalizer_roots_list (fin_ready_list);
1481 report_finalizer_roots_list (critical_fin_list);
1484 static GCRootReport *root_report;
1487 single_arg_report_root (void **obj, void *gc_data)
1490 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1494 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1496 switch (desc & ROOT_DESC_TYPE_MASK) {
1497 case ROOT_DESC_BITMAP:
1498 desc >>= ROOT_DESC_TYPE_SHIFT;
1500 if ((desc & 1) && *start_root) {
1501 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1507 case ROOT_DESC_COMPLEX: {
1508 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1509 gsize bwords = (*bitmap_data) - 1;
1510 void **start_run = start_root;
1512 while (bwords-- > 0) {
1513 gsize bmap = *bitmap_data++;
1514 void **objptr = start_run;
1516 if ((bmap & 1) && *objptr) {
1517 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1522 start_run += GC_BITS_PER_WORD;
1526 case ROOT_DESC_USER: {
1527 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1528 root_report = report;
1529 marker (start_root, single_arg_report_root, NULL);
1532 case ROOT_DESC_RUN_LEN:
1533 g_assert_not_reached ();
1535 g_assert_not_reached ();
1540 report_registered_roots_by_type (int root_type)
1542 GCRootReport report;
1546 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1547 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1548 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1549 } SGEN_HASH_TABLE_FOREACH_END;
1550 notify_gc_roots (&report);
1554 report_registered_roots (void)
1556 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1557 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1561 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1563 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1564 SgenGrayQueue *queue = ctx.queue;
1565 FinalizeReadyEntry *fin;
1567 for (fin = list; fin; fin = fin->next) {
1570 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1571 copy_func (&fin->object, queue);
1576 generation_name (int generation)
1578 switch (generation) {
1579 case GENERATION_NURSERY: return "nursery";
1580 case GENERATION_OLD: return "old";
1581 default: g_assert_not_reached ();
1586 sgen_generation_name (int generation)
1588 return generation_name (generation);
1591 SgenObjectOperations *
1592 sgen_get_current_object_ops (void){
1593 return ¤t_object_ops;
1598 finish_gray_stack (int generation, GrayQueue *queue)
1602 int done_with_ephemerons, ephemeron_rounds = 0;
1603 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1604 ScanObjectFunc scan_func = current_object_ops.scan_object;
1605 ScanCopyContext ctx = { scan_func, copy_func, queue };
1606 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1607 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1610 * We copied all the reachable objects. Now it's the time to copy
1611 * the objects that were not referenced by the roots, but by the copied objects.
1612 * we built a stack of objects pointed to by gray_start: they are
1613 * additional roots and we may add more items as we go.
1614 * We loop until gray_start == gray_objects which means no more objects have
1615 * been added. Note this is iterative: no recursion is involved.
1616 * We need to walk the LO list as well in search of marked big objects
1617 * (use a flag since this is needed only on major collections). We need to loop
1618 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1619 * To achieve better cache locality and cache usage, we drain the gray stack
1620 * frequently, after each object is copied, and just finish the work here.
1622 sgen_drain_gray_stack (-1, ctx);
1624 SGEN_LOG (2, "%s generation done", generation_name (generation));
1627 Reset bridge data, we might have lingering data from a previous collection if this is a major
1628 collection trigged by minor overflow.
1630 We must reset the gathered bridges since their original block might be evacuated due to major
1631 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1633 if (sgen_need_bridge_processing ())
1634 sgen_bridge_reset_data ();
1637 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1638 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1639 * objects that are in fact reachable.
1641 done_with_ephemerons = 0;
1643 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1644 sgen_drain_gray_stack (-1, ctx);
1646 } while (!done_with_ephemerons);
1648 sgen_mark_togglerefs (start_addr, end_addr, ctx);
1650 if (sgen_need_bridge_processing ()) {
1651 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1652 sgen_drain_gray_stack (-1, ctx);
1653 sgen_collect_bridge_objects (generation, ctx);
1654 if (generation == GENERATION_OLD)
1655 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1658 Do the first bridge step here, as the collector liveness state will become useless after that.
1660 An important optimization is to only proccess the possibly dead part of the object graph and skip
1661 over all live objects as we transitively know everything they point must be alive too.
1663 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1665 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1666 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1669 sgen_bridge_processing_stw_step ();
1673 Make sure we drain the gray stack before processing disappearing links and finalizers.
1674 If we don't make sure it is empty we might wrongly see a live object as dead.
1676 sgen_drain_gray_stack (-1, ctx);
1679 We must clear weak links that don't track resurrection before processing object ready for
1680 finalization so they can be cleared before that.
1682 sgen_null_link_in_range (generation, TRUE, ctx);
1683 if (generation == GENERATION_OLD)
1684 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1687 /* walk the finalization queue and move also the objects that need to be
1688 * finalized: use the finalized objects as new roots so the objects they depend
1689 * on are also not reclaimed. As with the roots above, only objects in the nursery
1690 * are marked/copied.
1692 sgen_finalize_in_range (generation, ctx);
1693 if (generation == GENERATION_OLD)
1694 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1695 /* drain the new stack that might have been created */
1696 SGEN_LOG (6, "Precise scan of gray area post fin");
1697 sgen_drain_gray_stack (-1, ctx);
1700 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1702 done_with_ephemerons = 0;
1704 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1705 sgen_drain_gray_stack (-1, ctx);
1707 } while (!done_with_ephemerons);
1710 * Clear ephemeron pairs with unreachable keys.
1711 * We pass the copy func so we can figure out if an array was promoted or not.
1713 clear_unreachable_ephemerons (ctx);
1716 * We clear togglerefs only after all possible chances of revival are done.
1717 * This is semantically more inline with what users expect and it allows for
1718 * user finalizers to correctly interact with TR objects.
1720 sgen_clear_togglerefs (start_addr, end_addr, ctx);
1723 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1726 * handle disappearing links
1727 * Note we do this after checking the finalization queue because if an object
1728 * survives (at least long enough to be finalized) we don't clear the link.
1729 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1730 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1733 g_assert (sgen_gray_object_queue_is_empty (queue));
1735 sgen_null_link_in_range (generation, FALSE, ctx);
1736 if (generation == GENERATION_OLD)
1737 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1738 if (sgen_gray_object_queue_is_empty (queue))
1740 sgen_drain_gray_stack (-1, ctx);
1743 g_assert (sgen_gray_object_queue_is_empty (queue));
1747 sgen_check_section_scan_starts (GCMemSection *section)
1750 for (i = 0; i < section->num_scan_start; ++i) {
1751 if (section->scan_starts [i]) {
1752 mword size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1753 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1759 check_scan_starts (void)
1761 if (!do_scan_starts_check)
1763 sgen_check_section_scan_starts (nursery_section);
1764 major_collector.check_scan_starts ();
1768 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
1772 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1773 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1774 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
1775 } SGEN_HASH_TABLE_FOREACH_END;
1779 sgen_dump_occupied (char *start, char *end, char *section_start)
1781 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1785 sgen_dump_section (GCMemSection *section, const char *type)
1787 char *start = section->data;
1788 char *end = section->data + section->size;
1789 char *occ_start = NULL;
1791 char *old_start = NULL; /* just for debugging */
1793 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1795 while (start < end) {
1799 if (!*(void**)start) {
1801 sgen_dump_occupied (occ_start, start, section->data);
1804 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1807 g_assert (start < section->next_data);
1812 vt = (GCVTable*)LOAD_VTABLE (start);
1815 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1818 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1819 start - section->data,
1820 vt->klass->name_space, vt->klass->name,
1828 sgen_dump_occupied (occ_start, start, section->data);
1830 fprintf (heap_dump_file, "</section>\n");
1834 dump_object (MonoObject *obj, gboolean dump_location)
1836 static char class_name [1024];
1838 MonoClass *class = mono_object_class (obj);
1842 * Python's XML parser is too stupid to parse angle brackets
1843 * in strings, so we just ignore them;
1846 while (class->name [i] && j < sizeof (class_name) - 1) {
1847 if (!strchr ("<>\"", class->name [i]))
1848 class_name [j++] = class->name [i];
1851 g_assert (j < sizeof (class_name));
1854 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%zd\"",
1855 class->name_space, class_name,
1856 safe_object_get_size (obj));
1857 if (dump_location) {
1858 const char *location;
1859 if (ptr_in_nursery (obj))
1860 location = "nursery";
1861 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
1865 fprintf (heap_dump_file, " location=\"%s\"", location);
1867 fprintf (heap_dump_file, "/>\n");
1871 dump_heap (const char *type, int num, const char *reason)
1876 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
1878 fprintf (heap_dump_file, " reason=\"%s\"", reason);
1879 fprintf (heap_dump_file, ">\n");
1880 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
1881 sgen_dump_internal_mem_usage (heap_dump_file);
1882 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
1883 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
1884 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
1886 fprintf (heap_dump_file, "<pinned-objects>\n");
1887 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
1888 dump_object (list->obj, TRUE);
1889 fprintf (heap_dump_file, "</pinned-objects>\n");
1891 sgen_dump_section (nursery_section, "nursery");
1893 major_collector.dump_heap (heap_dump_file);
1895 fprintf (heap_dump_file, "<los>\n");
1896 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1897 dump_object ((MonoObject*)bigobj->data, FALSE);
1898 fprintf (heap_dump_file, "</los>\n");
1900 fprintf (heap_dump_file, "</collection>\n");
1904 sgen_register_moved_object (void *obj, void *destination)
1906 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1908 /* FIXME: handle this for parallel collector */
1909 g_assert (!sgen_collection_is_parallel ());
1911 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1912 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1913 moved_objects_idx = 0;
1915 moved_objects [moved_objects_idx++] = obj;
1916 moved_objects [moved_objects_idx++] = destination;
1922 static gboolean inited = FALSE;
1927 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
1928 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_pinning);
1929 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
1930 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
1931 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_registered_roots);
1932 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_thread_data);
1933 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_finish_gray_stack);
1934 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
1936 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
1937 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_pinning);
1938 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
1939 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_registered_roots);
1940 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_thread_data);
1941 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_alloc_pinned);
1942 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_finalized);
1943 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_big_objects);
1944 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
1945 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
1946 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_los_sweep);
1947 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_sweep);
1948 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
1950 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
1952 #ifdef HEAVY_STATISTICS
1953 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
1954 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
1955 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
1956 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
1957 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
1958 mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
1959 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
1960 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
1961 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
1963 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
1964 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
1966 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
1967 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
1968 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
1969 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
1971 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
1972 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
1974 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
1976 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
1977 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
1978 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
1979 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
1981 sgen_nursery_allocator_init_heavy_stats ();
1982 sgen_alloc_init_heavy_stats ();
1990 reset_pinned_from_failed_allocation (void)
1992 bytes_pinned_from_failed_allocation = 0;
1996 sgen_set_pinned_from_failed_allocation (mword objsize)
1998 bytes_pinned_from_failed_allocation += objsize;
2002 sgen_collection_is_parallel (void)
2004 switch (current_collection_generation) {
2005 case GENERATION_NURSERY:
2006 return nursery_collection_is_parallel;
2007 case GENERATION_OLD:
2008 return major_collector.is_parallel;
2010 g_error ("Invalid current generation %d", current_collection_generation);
2015 sgen_collection_is_concurrent (void)
2017 switch (current_collection_generation) {
2018 case GENERATION_NURSERY:
2020 case GENERATION_OLD:
2021 return concurrent_collection_in_progress;
2023 g_error ("Invalid current generation %d", current_collection_generation);
2028 sgen_concurrent_collection_in_progress (void)
2030 return concurrent_collection_in_progress;
2037 } FinishRememberedSetScanJobData;
2040 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2042 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2044 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2045 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2050 CopyOrMarkObjectFunc copy_or_mark_func;
2051 ScanObjectFunc scan_func;
2055 } ScanFromRegisteredRootsJobData;
2058 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2060 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2061 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2062 sgen_workers_get_job_gray_queue (worker_data) };
2064 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2065 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2072 } ScanThreadDataJobData;
2075 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2077 ScanThreadDataJobData *job_data = job_data_untyped;
2079 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2080 sgen_workers_get_job_gray_queue (worker_data));
2081 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2086 FinalizeReadyEntry *list;
2087 } ScanFinalizerEntriesJobData;
2090 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2092 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2093 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2095 scan_finalizer_entries (job_data->list, ctx);
2096 sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2100 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2102 g_assert (concurrent_collection_in_progress);
2103 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2107 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2109 g_assert (concurrent_collection_in_progress);
2110 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2114 verify_scan_starts (char *start, char *end)
2118 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2119 char *addr = nursery_section->scan_starts [i];
2120 if (addr > start && addr < end)
2121 SGEN_LOG (1, "NFC-BAD SCAN START [%zu] %p for obj [%p %p]", i, addr, start, end);
2126 verify_nursery (void)
2128 char *start, *end, *cur, *hole_start;
2130 if (!do_verify_nursery)
2133 /*This cleans up unused fragments */
2134 sgen_nursery_allocator_prepare_for_pinning ();
2136 hole_start = start = cur = sgen_get_nursery_start ();
2137 end = sgen_get_nursery_end ();
2142 if (!*(void**)cur) {
2143 cur += sizeof (void*);
2147 if (object_is_forwarded (cur))
2148 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2149 else if (object_is_pinned (cur))
2150 SGEN_LOG (1, "PINNED OBJ %p", cur);
2152 ss = safe_object_get_size ((MonoObject*)cur);
2153 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2154 verify_scan_starts (cur, cur + size);
2155 if (do_dump_nursery_content) {
2156 if (cur > hole_start)
2157 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2158 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2166 * Checks that no objects in the nursery are fowarded or pinned. This
2167 * is a precondition to restarting the mutator while doing a
2168 * concurrent collection. Note that we don't clear fragments because
2169 * we depend on that having happened earlier.
2172 check_nursery_is_clean (void)
2174 char *start, *end, *cur;
2176 start = cur = sgen_get_nursery_start ();
2177 end = sgen_get_nursery_end ();
2182 if (!*(void**)cur) {
2183 cur += sizeof (void*);
2187 g_assert (!object_is_forwarded (cur));
2188 g_assert (!object_is_pinned (cur));
2190 ss = safe_object_get_size ((MonoObject*)cur);
2191 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2192 verify_scan_starts (cur, cur + size);
2199 init_gray_queue (void)
2201 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2202 sgen_workers_init_distribute_gray_queue ();
2203 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2204 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2206 sgen_gray_object_queue_init (&gray_queue, NULL);
2211 pin_stage_object_callback (char *obj, size_t size, void *data)
2213 sgen_pin_stage_ptr (obj);
2214 /* FIXME: do pin stats if enabled */
2218 * Collect objects in the nursery. Returns whether to trigger a major
2222 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2224 gboolean needs_major;
2225 size_t max_garbage_amount;
2227 FinishRememberedSetScanJobData *frssjd;
2228 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2229 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2230 ScanThreadDataJobData *stdjd;
2231 mword fragment_total;
2232 ScanCopyContext ctx;
2236 if (disable_minor_collections)
2239 TV_GETTIME (last_minor_collection_start_tv);
2240 atv = last_minor_collection_start_tv;
2242 MONO_GC_BEGIN (GENERATION_NURSERY);
2243 binary_protocol_collection_begin (gc_stats.minor_gc_count, GENERATION_NURSERY);
2247 #ifndef DISABLE_PERFCOUNTERS
2248 mono_perfcounters->gc_collections0++;
2251 current_collection_generation = GENERATION_NURSERY;
2252 if (sgen_collection_is_parallel ())
2253 current_object_ops = sgen_minor_collector.parallel_ops;
2255 current_object_ops = sgen_minor_collector.serial_ops;
2257 reset_pinned_from_failed_allocation ();
2259 check_scan_starts ();
2261 sgen_nursery_alloc_prepare_for_minor ();
2265 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2266 /* FIXME: optimize later to use the higher address where an object can be present */
2267 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2269 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", gc_stats.minor_gc_count, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2270 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2271 g_assert (nursery_section->size >= max_garbage_amount);
2273 /* world must be stopped already */
2275 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2277 if (xdomain_checks) {
2278 sgen_clear_nursery_fragments ();
2279 sgen_check_for_xdomain_refs ();
2282 nursery_section->next_data = nursery_next;
2284 major_collector.start_nursery_collection ();
2286 sgen_memgov_minor_collection_start ();
2290 gc_stats.minor_gc_count ++;
2292 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2294 sgen_process_fin_stage_entries ();
2295 sgen_process_dislink_stage_entries ();
2297 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2299 /* pin from pinned handles */
2300 sgen_init_pinning ();
2301 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2302 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2303 /* pin cemented objects */
2304 sgen_cement_iterate (pin_stage_object_callback, NULL);
2305 /* identify pinned objects */
2306 sgen_optimize_pin_queue ();
2307 sgen_pinning_setup_section (nursery_section);
2308 ctx.scan_func = NULL;
2309 ctx.copy_func = NULL;
2310 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2311 pin_objects_in_nursery (ctx);
2312 sgen_pinning_trim_queue_to_section (nursery_section);
2315 time_minor_pinning += TV_ELAPSED (btv, atv);
2316 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2317 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2319 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2321 if (whole_heap_check_before_collection) {
2322 sgen_clear_nursery_fragments ();
2323 sgen_check_whole_heap (finish_up_concurrent_mark);
2325 if (consistency_check_at_minor_collection)
2326 sgen_check_consistency ();
2328 sgen_workers_start_all_workers ();
2329 sgen_workers_start_marking ();
2331 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2332 frssjd->heap_start = sgen_get_nursery_start ();
2333 frssjd->heap_end = nursery_next;
2334 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2336 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2338 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2339 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2341 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2343 if (!sgen_collection_is_parallel ()) {
2344 ctx.scan_func = current_object_ops.scan_object;
2345 ctx.copy_func = NULL;
2346 ctx.queue = &gray_queue;
2347 sgen_drain_gray_stack (-1, ctx);
2350 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2351 report_registered_roots ();
2352 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2353 report_finalizer_roots ();
2355 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2357 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2359 /* registered roots, this includes static fields */
2360 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2361 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2362 scrrjd_normal->scan_func = current_object_ops.scan_object;
2363 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2364 scrrjd_normal->heap_end = nursery_next;
2365 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2366 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2368 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2369 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2370 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2371 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2372 scrrjd_wbarrier->heap_end = nursery_next;
2373 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2374 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2377 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2379 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2382 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2383 stdjd->heap_start = sgen_get_nursery_start ();
2384 stdjd->heap_end = nursery_next;
2385 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2388 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2391 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2393 g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2395 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2396 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2398 /* Scan the list of objects ready for finalization. If */
2399 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2400 sfejd_fin_ready->list = fin_ready_list;
2401 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2403 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2404 sfejd_critical_fin->list = critical_fin_list;
2405 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2407 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2409 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2411 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2412 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2414 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2417 * The (single-threaded) finalization code might have done
2418 * some copying/marking so we can only reset the GC thread's
2419 * worker data here instead of earlier when we joined the
2422 sgen_workers_reset_data ();
2424 if (objects_pinned) {
2425 sgen_optimize_pin_queue ();
2426 sgen_pinning_setup_section (nursery_section);
2429 /* walk the pin_queue, build up the fragment list of free memory, unmark
2430 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2433 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2434 fragment_total = sgen_build_nursery_fragments (nursery_section,
2435 nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2437 if (!fragment_total)
2440 /* Clear TLABs for all threads */
2441 sgen_clear_tlabs ();
2443 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2445 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2446 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2448 if (consistency_check_at_minor_collection)
2449 sgen_check_major_refs ();
2451 major_collector.finish_nursery_collection ();
2453 TV_GETTIME (last_minor_collection_end_tv);
2454 gc_stats.minor_gc_time += TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
2457 dump_heap ("minor", gc_stats.minor_gc_count - 1, NULL);
2459 /* prepare the pin queue for the next collection */
2460 sgen_finish_pinning ();
2461 if (fin_ready_list || critical_fin_list) {
2462 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2463 mono_gc_finalize_notify ();
2465 sgen_pin_stats_reset ();
2466 /* clear cemented hash */
2467 sgen_cement_clear_below_threshold ();
2469 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2471 remset.finish_minor_collection ();
2473 check_scan_starts ();
2475 binary_protocol_flush_buffers (FALSE);
2477 sgen_memgov_minor_collection_end ();
2479 /*objects are late pinned because of lack of memory, so a major is a good call*/
2480 needs_major = objects_pinned > 0;
2481 current_collection_generation = -1;
2484 MONO_GC_END (GENERATION_NURSERY);
2485 binary_protocol_collection_end (gc_stats.minor_gc_count - 1, GENERATION_NURSERY);
2487 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2488 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2494 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2496 ctx->scan_func (obj, ctx->queue);
2500 scan_nursery_objects (ScanCopyContext ctx)
2502 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2503 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2507 major_copy_or_mark_from_roots (size_t *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2512 /* FIXME: only use these values for the precise scan
2513 * note that to_space pointers should be excluded anyway...
2515 char *heap_start = NULL;
2516 char *heap_end = (char*)-1;
2517 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2518 GCRootReport root_report = { 0 };
2519 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2520 ScanThreadDataJobData *stdjd;
2521 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2522 ScanCopyContext ctx;
2524 if (concurrent_collection_in_progress) {
2525 /*This cleans up unused fragments */
2526 sgen_nursery_allocator_prepare_for_pinning ();
2528 if (do_concurrent_checks)
2529 check_nursery_is_clean ();
2531 /* The concurrent collector doesn't touch the nursery. */
2532 sgen_nursery_alloc_prepare_for_major ();
2539 /* Pinning depends on this */
2540 sgen_clear_nursery_fragments ();
2542 if (whole_heap_check_before_collection)
2543 sgen_check_whole_heap (finish_up_concurrent_mark);
2546 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2548 if (!sgen_collection_is_concurrent ())
2549 nursery_section->next_data = sgen_get_nursery_end ();
2550 /* we should also coalesce scanning from sections close to each other
2551 * and deal with pointers outside of the sections later.
2555 *major_collector.have_swept = FALSE;
2557 if (xdomain_checks) {
2558 sgen_clear_nursery_fragments ();
2559 sgen_check_for_xdomain_refs ();
2562 if (!concurrent_collection_in_progress) {
2563 /* Remsets are not useful for a major collection */
2564 remset.prepare_for_major_collection ();
2567 sgen_process_fin_stage_entries ();
2568 sgen_process_dislink_stage_entries ();
2571 sgen_init_pinning ();
2572 SGEN_LOG (6, "Collecting pinned addresses");
2573 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2575 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2576 if (major_collector.is_concurrent) {
2578 * The concurrent major collector cannot evict
2579 * yet, so we need to pin cemented objects to
2580 * not break some asserts.
2582 * FIXME: We could evict now!
2584 sgen_cement_iterate (pin_stage_object_callback, NULL);
2587 if (!concurrent_collection_in_progress)
2588 sgen_cement_reset ();
2591 sgen_optimize_pin_queue ();
2594 * The concurrent collector doesn't move objects, neither on
2595 * the major heap nor in the nursery, so we can mark even
2596 * before pinning has finished. For the non-concurrent
2597 * collector we start the workers after pinning.
2599 if (concurrent_collection_in_progress) {
2600 sgen_workers_start_all_workers ();
2601 sgen_workers_start_marking ();
2605 * pin_queue now contains all candidate pointers, sorted and
2606 * uniqued. We must do two passes now to figure out which
2607 * objects are pinned.
2609 * The first is to find within the pin_queue the area for each
2610 * section. This requires that the pin_queue be sorted. We
2611 * also process the LOS objects and pinned chunks here.
2613 * The second, destructive, pass is to reduce the section
2614 * areas to pointers to the actually pinned objects.
2616 SGEN_LOG (6, "Pinning from sections");
2617 /* first pass for the sections */
2618 sgen_find_section_pin_queue_start_end (nursery_section);
2619 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2620 /* identify possible pointers to the insize of large objects */
2621 SGEN_LOG (6, "Pinning from large objects");
2622 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2624 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2625 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2627 #ifdef ENABLE_DTRACE
2628 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2629 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2630 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2634 if (sgen_los_object_is_pinned (bigobj->data)) {
2635 g_assert (finish_up_concurrent_mark);
2638 sgen_los_pin_object (bigobj->data);
2639 if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
2640 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2641 if (G_UNLIKELY (do_pin_stats))
2642 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2643 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2646 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2650 notify_gc_roots (&root_report);
2651 /* second pass for the sections */
2652 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2653 ctx.copy_func = NULL;
2654 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2657 * Concurrent mark never follows references into the nursery.
2658 * In the start and finish pauses we must scan live nursery
2659 * objects, though. We could simply scan all nursery objects,
2660 * but that would be conservative. The easiest way is to do a
2661 * nursery collection, which copies all live nursery objects
2662 * (except pinned ones, with the simple nursery) to the major
2663 * heap. Scanning the mod union table later will then scan
2664 * those promoted objects, provided they're reachable. Pinned
2665 * objects in the nursery - which we can trivially find in the
2666 * pinning queue - are treated as roots in the mark pauses.
2668 * The split nursery complicates the latter part because
2669 * non-pinned objects can survive in the nursery. That's why
2670 * we need to do a full front-to-back scan of the nursery,
2671 * marking all objects.
2673 * Non-concurrent mark evacuates from the nursery, so it's
2674 * sufficient to just scan pinned nursery objects.
2676 if (concurrent_collection_in_progress && sgen_minor_collector.is_split) {
2677 scan_nursery_objects (ctx);
2679 pin_objects_in_nursery (ctx);
2680 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2681 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2684 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2685 if (old_next_pin_slot)
2686 *old_next_pin_slot = sgen_get_pinned_count ();
2689 time_major_pinning += TV_ELAPSED (atv, btv);
2690 SGEN_LOG (2, "Finding pinned pointers: %zd in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2691 SGEN_LOG (4, "Start scan with %zd pinned objects", sgen_get_pinned_count ());
2693 major_collector.init_to_space ();
2695 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2696 main_gc_thread = mono_native_thread_self ();
2699 if (!concurrent_collection_in_progress && major_collector.is_parallel) {
2700 sgen_workers_start_all_workers ();
2701 sgen_workers_start_marking ();
2704 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2705 report_registered_roots ();
2707 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2709 /* registered roots, this includes static fields */
2710 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2711 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2712 scrrjd_normal->scan_func = current_object_ops.scan_object;
2713 scrrjd_normal->heap_start = heap_start;
2714 scrrjd_normal->heap_end = heap_end;
2715 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2716 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2718 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2719 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2720 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2721 scrrjd_wbarrier->heap_start = heap_start;
2722 scrrjd_wbarrier->heap_end = heap_end;
2723 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2724 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2727 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2730 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2731 stdjd->heap_start = heap_start;
2732 stdjd->heap_end = heap_end;
2733 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2736 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2739 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2741 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2742 report_finalizer_roots ();
2744 /* scan the list of objects ready for finalization */
2745 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2746 sfejd_fin_ready->list = fin_ready_list;
2747 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2749 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2750 sfejd_critical_fin->list = critical_fin_list;
2751 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2753 if (scan_mod_union) {
2754 g_assert (finish_up_concurrent_mark);
2756 /* Mod union card table */
2757 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
2758 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
2762 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2763 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
2766 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2768 if (concurrent_collection_in_progress) {
2769 /* prepare the pin queue for the next collection */
2770 sgen_finish_pinning ();
2772 sgen_pin_stats_reset ();
2774 if (do_concurrent_checks)
2775 check_nursery_is_clean ();
2780 major_start_collection (gboolean concurrent, size_t *old_next_pin_slot)
2782 MONO_GC_BEGIN (GENERATION_OLD);
2783 binary_protocol_collection_begin (gc_stats.major_gc_count, GENERATION_OLD);
2785 current_collection_generation = GENERATION_OLD;
2786 #ifndef DISABLE_PERFCOUNTERS
2787 mono_perfcounters->gc_collections1++;
2790 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2793 g_assert (major_collector.is_concurrent);
2794 concurrent_collection_in_progress = TRUE;
2796 sgen_cement_concurrent_start ();
2798 current_object_ops = major_collector.major_concurrent_ops;
2800 current_object_ops = major_collector.major_ops;
2803 reset_pinned_from_failed_allocation ();
2805 sgen_memgov_major_collection_start ();
2807 //count_ref_nonref_objs ();
2808 //consistency_check ();
2810 check_scan_starts ();
2813 SGEN_LOG (1, "Start major collection %d", gc_stats.major_gc_count);
2814 gc_stats.major_gc_count ++;
2816 if (major_collector.start_major_collection)
2817 major_collector.start_major_collection ();
2819 major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
2823 wait_for_workers_to_finish (void)
2825 while (!sgen_workers_all_done ())
2832 if (concurrent_collection_in_progress || major_collector.is_parallel) {
2833 gray_queue_redirect (&gray_queue);
2834 sgen_workers_join ();
2837 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2839 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2840 main_gc_thread = NULL;
2845 major_finish_collection (const char *reason, size_t old_next_pin_slot, gboolean scan_mod_union)
2847 LOSObject *bigobj, *prevbo;
2853 if (concurrent_collection_in_progress || major_collector.is_parallel)
2856 if (concurrent_collection_in_progress) {
2857 current_object_ops = major_collector.major_concurrent_ops;
2859 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
2862 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2864 if (do_concurrent_checks)
2865 check_nursery_is_clean ();
2867 current_object_ops = major_collector.major_ops;
2871 * The workers have stopped so we need to finish gray queue
2872 * work that might result from finalization in the main GC
2873 * thread. Redirection must therefore be turned off.
2875 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
2876 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2878 /* all the objects in the heap */
2879 finish_gray_stack (GENERATION_OLD, &gray_queue);
2881 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2884 * The (single-threaded) finalization code might have done
2885 * some copying/marking so we can only reset the GC thread's
2886 * worker data here instead of earlier when we joined the
2889 sgen_workers_reset_data ();
2891 if (objects_pinned) {
2892 g_assert (!concurrent_collection_in_progress);
2894 /*This is slow, but we just OOM'd*/
2895 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2896 sgen_optimize_pin_queue ();
2897 sgen_find_section_pin_queue_start_end (nursery_section);
2901 reset_heap_boundaries ();
2902 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2904 if (check_mark_bits_after_major_collection)
2905 sgen_check_major_heap_marked ();
2907 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
2909 /* sweep the big objects list */
2911 for (bigobj = los_object_list; bigobj;) {
2912 g_assert (!object_is_pinned (bigobj->data));
2913 if (sgen_los_object_is_pinned (bigobj->data)) {
2914 sgen_los_unpin_object (bigobj->data);
2915 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2918 /* not referenced anywhere, so we can free it */
2920 prevbo->next = bigobj->next;
2922 los_object_list = bigobj->next;
2924 bigobj = bigobj->next;
2925 sgen_los_free_object (to_free);
2929 bigobj = bigobj->next;
2933 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
2938 time_major_los_sweep += TV_ELAPSED (btv, atv);
2940 major_collector.sweep ();
2942 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
2945 time_major_sweep += TV_ELAPSED (atv, btv);
2947 if (!concurrent_collection_in_progress) {
2948 /* walk the pin_queue, build up the fragment list of free memory, unmark
2949 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2952 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
2955 /* prepare the pin queue for the next collection */
2956 sgen_finish_pinning ();
2958 /* Clear TLABs for all threads */
2959 sgen_clear_tlabs ();
2961 sgen_pin_stats_reset ();
2964 if (concurrent_collection_in_progress)
2965 sgen_cement_concurrent_finish ();
2966 sgen_cement_clear_below_threshold ();
2969 time_major_fragment_creation += TV_ELAPSED (btv, atv);
2972 dump_heap ("major", gc_stats.major_gc_count - 1, reason);
2974 if (fin_ready_list || critical_fin_list) {
2975 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2976 mono_gc_finalize_notify ();
2979 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2981 sgen_memgov_major_collection_end ();
2982 current_collection_generation = -1;
2984 major_collector.finish_major_collection ();
2986 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2988 if (concurrent_collection_in_progress)
2989 concurrent_collection_in_progress = FALSE;
2991 check_scan_starts ();
2993 binary_protocol_flush_buffers (FALSE);
2995 //consistency_check ();
2997 MONO_GC_END (GENERATION_OLD);
2998 binary_protocol_collection_end (gc_stats.major_gc_count - 1, GENERATION_OLD);
3002 major_do_collection (const char *reason)
3004 TV_DECLARE (time_start);
3005 TV_DECLARE (time_end);
3006 size_t old_next_pin_slot;
3008 if (disable_major_collections)
3011 if (major_collector.get_and_reset_num_major_objects_marked) {
3012 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3013 g_assert (!num_marked);
3016 /* world must be stopped already */
3017 TV_GETTIME (time_start);
3019 major_start_collection (FALSE, &old_next_pin_slot);
3020 major_finish_collection (reason, old_next_pin_slot, FALSE);
3022 TV_GETTIME (time_end);
3023 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3025 /* FIXME: also report this to the user, preferably in gc-end. */
3026 if (major_collector.get_and_reset_num_major_objects_marked)
3027 major_collector.get_and_reset_num_major_objects_marked ();
3029 return bytes_pinned_from_failed_allocation > 0;
3033 major_start_concurrent_collection (const char *reason)
3035 TV_DECLARE (time_start);
3036 TV_DECLARE (time_end);
3037 long long num_objects_marked;
3039 if (disable_major_collections)
3042 TV_GETTIME (time_start);
3043 SGEN_TV_GETTIME (time_major_conc_collection_start);
3045 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3046 g_assert (num_objects_marked == 0);
3048 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3049 binary_protocol_concurrent_start ();
3051 // FIXME: store reason and pass it when finishing
3052 major_start_collection (TRUE, NULL);
3054 gray_queue_redirect (&gray_queue);
3055 sgen_workers_wait_for_jobs ();
3057 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3058 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3060 TV_GETTIME (time_end);
3061 gc_stats.major_gc_time += TV_ELAPSED (time_start, time_end);
3063 current_collection_generation = -1;
3067 major_update_or_finish_concurrent_collection (gboolean force_finish)
3069 TV_DECLARE (total_start);
3070 TV_DECLARE (total_end);
3071 SgenGrayQueue unpin_queue;
3072 memset (&unpin_queue, 0, sizeof (unpin_queue));
3074 TV_GETTIME (total_start);
3076 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3077 binary_protocol_concurrent_update_finish ();
3079 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3081 if (!force_finish && !sgen_workers_all_done ()) {
3082 major_collector.update_cardtable_mod_union ();
3083 sgen_los_update_cardtable_mod_union ();
3085 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3087 TV_GETTIME (total_end);
3088 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end);
3094 * The major collector can add global remsets which are processed in the finishing
3095 * nursery collection, below. That implies that the workers must have finished
3096 * marking before the nursery collection is allowed to run, otherwise we might miss
3099 wait_for_workers_to_finish ();
3101 SGEN_TV_GETTIME (time_major_conc_collection_end);
3102 gc_stats.major_gc_time_concurrent += SGEN_TV_ELAPSED (time_major_conc_collection_start, time_major_conc_collection_end);
3104 major_collector.update_cardtable_mod_union ();
3105 sgen_los_update_cardtable_mod_union ();
3107 collect_nursery (&unpin_queue, TRUE);
3109 if (mod_union_consistency_check)
3110 sgen_check_mod_union_consistency ();
3112 current_collection_generation = GENERATION_OLD;
3113 major_finish_collection ("finishing", -1, TRUE);
3115 if (whole_heap_check_before_collection)
3116 sgen_check_whole_heap (FALSE);
3118 unpin_objects_from_queue (&unpin_queue);
3119 sgen_gray_object_queue_deinit (&unpin_queue);
3121 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3123 TV_GETTIME (total_end);
3124 gc_stats.major_gc_time += TV_ELAPSED (total_start, total_end) - TV_ELAPSED (last_minor_collection_start_tv, last_minor_collection_end_tv);
3126 current_collection_generation = -1;
3132 * Ensure an allocation request for @size will succeed by freeing enough memory.
3134 * LOCKING: The GC lock MUST be held.
3137 sgen_ensure_free_space (size_t size)
3139 int generation_to_collect = -1;
3140 const char *reason = NULL;
3143 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3144 if (sgen_need_major_collection (size)) {
3145 reason = "LOS overflow";
3146 generation_to_collect = GENERATION_OLD;
3149 if (degraded_mode) {
3150 if (sgen_need_major_collection (size)) {
3151 reason = "Degraded mode overflow";
3152 generation_to_collect = GENERATION_OLD;
3154 } else if (sgen_need_major_collection (size)) {
3155 reason = "Minor allowance";
3156 generation_to_collect = GENERATION_OLD;
3158 generation_to_collect = GENERATION_NURSERY;
3159 reason = "Nursery full";
3163 if (generation_to_collect == -1) {
3164 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3165 generation_to_collect = GENERATION_OLD;
3166 reason = "Finish concurrent collection";
3170 if (generation_to_collect == -1)
3172 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3176 * LOCKING: Assumes the GC lock is held.
3179 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3181 TV_DECLARE (gc_end);
3182 GGTimingInfo infos [2];
3183 int overflow_generation_to_collect = -1;
3184 int oldest_generation_collected = generation_to_collect;
3185 const char *overflow_reason = NULL;
3187 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3189 binary_protocol_collection_force (generation_to_collect);
3191 g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3193 memset (infos, 0, sizeof (infos));
3194 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3196 infos [0].generation = generation_to_collect;
3197 infos [0].reason = reason;
3198 infos [0].is_overflow = FALSE;
3199 TV_GETTIME (infos [0].total_time);
3200 infos [1].generation = -1;
3202 sgen_stop_world (generation_to_collect);
3204 if (concurrent_collection_in_progress) {
3205 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3206 oldest_generation_collected = GENERATION_OLD;
3209 if (generation_to_collect == GENERATION_OLD)
3212 if (generation_to_collect == GENERATION_OLD &&
3213 allow_synchronous_major &&
3214 major_collector.want_synchronous_collection &&
3215 *major_collector.want_synchronous_collection) {
3216 wait_to_finish = TRUE;
3220 //FIXME extract overflow reason
3221 if (generation_to_collect == GENERATION_NURSERY) {
3222 if (collect_nursery (NULL, FALSE)) {
3223 overflow_generation_to_collect = GENERATION_OLD;
3224 overflow_reason = "Minor overflow";
3227 if (major_collector.is_concurrent) {
3228 g_assert (!concurrent_collection_in_progress);
3229 if (!wait_to_finish)
3230 collect_nursery (NULL, FALSE);
3233 if (major_collector.is_concurrent && !wait_to_finish) {
3234 major_start_concurrent_collection (reason);
3235 // FIXME: set infos[0] properly
3238 if (major_do_collection (reason)) {
3239 overflow_generation_to_collect = GENERATION_NURSERY;
3240 overflow_reason = "Excessive pinning";
3245 TV_GETTIME (gc_end);
3246 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3249 if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3250 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3251 infos [1].generation = overflow_generation_to_collect;
3252 infos [1].reason = overflow_reason;
3253 infos [1].is_overflow = TRUE;
3254 infos [1].total_time = gc_end;
3256 if (overflow_generation_to_collect == GENERATION_NURSERY)
3257 collect_nursery (NULL, FALSE);
3259 major_do_collection (overflow_reason);
3261 TV_GETTIME (gc_end);
3262 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3264 /* keep events symmetric */
3265 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3267 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3270 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3272 /* this also sets the proper pointers for the next allocation */
3273 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3274 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3275 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%zd pinned)", requested_size, sgen_get_pinned_count ());
3276 sgen_dump_pin_queue ();
3281 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3283 sgen_restart_world (oldest_generation_collected, infos);
3285 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3289 * ######################################################################
3290 * ######## Memory allocation from the OS
3291 * ######################################################################
3292 * This section of code deals with getting memory from the OS and
3293 * allocating memory for GC-internal data structures.
3294 * Internal memory can be handled with a freelist for small objects.
3300 G_GNUC_UNUSED static void
3301 report_internal_mem_usage (void)
3303 printf ("Internal memory usage:\n");
3304 sgen_report_internal_mem_usage ();
3305 printf ("Pinned memory usage:\n");
3306 major_collector.report_pinned_memory_usage ();
3310 * ######################################################################
3311 * ######## Finalization support
3312 * ######################################################################
3315 static inline gboolean
3316 sgen_major_is_object_alive (void *object)
3320 /* Oldgen objects can be pinned and forwarded too */
3321 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3325 * FIXME: major_collector.is_object_live() also calculates the
3326 * size. Avoid the double calculation.
3328 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3329 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3330 return sgen_los_object_is_pinned (object);
3332 return major_collector.is_object_live (object);
3336 * If the object has been forwarded it means it's still referenced from a root.
3337 * If it is pinned it's still alive as well.
3338 * A LOS object is only alive if we have pinned it.
3339 * Return TRUE if @obj is ready to be finalized.
3341 static inline gboolean
3342 sgen_is_object_alive (void *object)
3344 if (ptr_in_nursery (object))
3345 return sgen_nursery_is_object_alive (object);
3347 return sgen_major_is_object_alive (object);
3351 * This function returns true if @object is either alive or it belongs to the old gen
3352 * and we're currently doing a minor collection.
3355 sgen_is_object_alive_for_current_gen (char *object)
3357 if (ptr_in_nursery (object))
3358 return sgen_nursery_is_object_alive (object);
3360 if (current_collection_generation == GENERATION_NURSERY)
3363 return sgen_major_is_object_alive (object);
3367 * This function returns true if @object is either alive and belongs to the
3368 * current collection - major collections are full heap, so old gen objects
3369 * are never alive during a minor collection.
3372 sgen_is_object_alive_and_on_current_collection (char *object)
3374 if (ptr_in_nursery (object))
3375 return sgen_nursery_is_object_alive (object);
3377 if (current_collection_generation == GENERATION_NURSERY)
3380 return sgen_major_is_object_alive (object);
3385 sgen_gc_is_object_ready_for_finalization (void *object)
3387 return !sgen_is_object_alive (object);
3391 has_critical_finalizer (MonoObject *obj)
3395 if (!mono_defaults.critical_finalizer_object)
3398 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3400 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3404 is_finalization_aware (MonoObject *obj)
3406 MonoVTable *vt = ((MonoVTable*)LOAD_VTABLE (obj));
3407 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
3411 sgen_queue_finalization_entry (MonoObject *obj)
3413 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3414 gboolean critical = has_critical_finalizer (obj);
3415 entry->object = obj;
3417 entry->next = critical_fin_list;
3418 critical_fin_list = entry;
3420 entry->next = fin_ready_list;
3421 fin_ready_list = entry;
3424 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
3425 fin_callbacks.object_queued_for_finalization (obj);
3427 #ifdef ENABLE_DTRACE
3428 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3429 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3430 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3431 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3432 vt->klass->name_space, vt->klass->name, gen, critical);
3438 sgen_object_is_live (void *obj)
3440 return sgen_is_object_alive_and_on_current_collection (obj);
3443 /* LOCKING: requires that the GC lock is held */
3445 null_ephemerons_for_domain (MonoDomain *domain)
3447 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3450 MonoObject *object = (MonoObject*)current->array;
3452 if (object && !object->vtable) {
3453 EphemeronLinkNode *tmp = current;
3456 prev->next = current->next;
3458 ephemeron_list = current->next;
3460 current = current->next;
3461 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3464 current = current->next;
3469 /* LOCKING: requires that the GC lock is held */
3471 clear_unreachable_ephemerons (ScanCopyContext ctx)
3473 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3474 GrayQueue *queue = ctx.queue;
3475 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3477 Ephemeron *cur, *array_end;
3481 char *object = current->array;
3483 if (!sgen_is_object_alive_for_current_gen (object)) {
3484 EphemeronLinkNode *tmp = current;
3486 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3489 prev->next = current->next;
3491 ephemeron_list = current->next;
3493 current = current->next;
3494 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3499 copy_func ((void**)&object, queue);
3500 current->array = object;
3502 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3504 array = (MonoArray*)object;
3505 cur = mono_array_addr (array, Ephemeron, 0);
3506 array_end = cur + mono_array_length_fast (array);
3507 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3509 for (; cur < array_end; ++cur) {
3510 char *key = (char*)cur->key;
3512 if (!key || key == tombstone)
3515 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3516 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3517 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3519 if (!sgen_is_object_alive_for_current_gen (key)) {
3520 cur->key = tombstone;
3526 current = current->next;
3531 LOCKING: requires that the GC lock is held
3533 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3536 mark_ephemerons_in_range (ScanCopyContext ctx)
3538 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3539 GrayQueue *queue = ctx.queue;
3540 int nothing_marked = 1;
3541 EphemeronLinkNode *current = ephemeron_list;
3543 Ephemeron *cur, *array_end;
3546 for (current = ephemeron_list; current; current = current->next) {
3547 char *object = current->array;
3548 SGEN_LOG (5, "Ephemeron array at %p", object);
3550 /*It has to be alive*/
3551 if (!sgen_is_object_alive_for_current_gen (object)) {
3552 SGEN_LOG (5, "\tnot reachable");
3556 copy_func ((void**)&object, queue);
3558 array = (MonoArray*)object;
3559 cur = mono_array_addr (array, Ephemeron, 0);
3560 array_end = cur + mono_array_length_fast (array);
3561 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3563 for (; cur < array_end; ++cur) {
3564 char *key = cur->key;
3566 if (!key || key == tombstone)
3569 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3570 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3571 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3573 if (sgen_is_object_alive_for_current_gen (key)) {
3574 char *value = cur->value;
3576 copy_func ((void**)&cur->key, queue);
3578 if (!sgen_is_object_alive_for_current_gen (value))
3580 copy_func ((void**)&cur->value, queue);
3586 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3587 return nothing_marked;
3591 mono_gc_invoke_finalizers (void)
3593 FinalizeReadyEntry *entry = NULL;
3594 gboolean entry_is_critical = FALSE;
3597 /* FIXME: batch to reduce lock contention */
3598 while (fin_ready_list || critical_fin_list) {
3602 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3604 /* We have finalized entry in the last
3605 interation, now we need to remove it from
3608 *list = entry->next;
3610 FinalizeReadyEntry *e = *list;
3611 while (e->next != entry)
3613 e->next = entry->next;
3615 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3619 /* Now look for the first non-null entry. */
3620 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3623 entry_is_critical = FALSE;
3625 entry_is_critical = TRUE;
3626 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3631 g_assert (entry->object);
3632 num_ready_finalizers--;
3633 obj = entry->object;
3634 entry->object = NULL;
3635 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3643 g_assert (entry->object == NULL);
3645 /* the object is on the stack so it is pinned */
3646 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3647 mono_gc_run_finalize (obj, NULL);
3654 mono_gc_pending_finalizers (void)
3656 return fin_ready_list || critical_fin_list;
3660 * ######################################################################
3661 * ######## registered roots support
3662 * ######################################################################
3666 * We do not coalesce roots.
3669 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3671 RootRecord new_root;
3674 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3675 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3676 /* we allow changing the size and the descriptor (for thread statics etc) */
3678 size_t old_size = root->end_root - start;
3679 root->end_root = start + size;
3680 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3681 ((root->root_desc == 0) && (descr == NULL)));
3682 root->root_desc = (mword)descr;
3684 roots_size -= old_size;
3690 new_root.end_root = start + size;
3691 new_root.root_desc = (mword)descr;
3693 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3696 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3703 mono_gc_register_root (char *start, size_t size, void *descr)
3705 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3709 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3711 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3715 mono_gc_deregister_root (char* addr)
3721 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3722 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3723 roots_size -= (root.end_root - addr);
3729 * ######################################################################
3730 * ######## Thread handling (stop/start code)
3731 * ######################################################################
3734 unsigned int sgen_global_stop_count = 0;
3737 sgen_get_current_collection_generation (void)
3739 return current_collection_generation;
3743 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3745 gc_callbacks = *callbacks;
3749 mono_gc_get_gc_callbacks ()
3751 return &gc_callbacks;
3754 /* Variables holding start/end nursery so it won't have to be passed at every call */
3755 static void *scan_area_arg_start, *scan_area_arg_end;
3758 mono_gc_conservatively_scan_area (void *start, void *end)
3760 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3764 mono_gc_scan_object (void *obj, void *gc_data)
3766 UserCopyOrMarkData *data = gc_data;
3767 current_object_ops.copy_or_mark_object (&obj, data->queue);
3772 * Mark from thread stacks and registers.
3775 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3777 SgenThreadInfo *info;
3779 scan_area_arg_start = start_nursery;
3780 scan_area_arg_end = end_nursery;
3782 FOREACH_THREAD (info) {
3784 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3787 if (info->gc_disabled) {
3788 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3791 if (mono_thread_info_run_state (info) != STATE_RUNNING) {
3792 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %d)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_thread_info_run_state (info));
3795 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%zd", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3796 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3797 UserCopyOrMarkData data = { NULL, queue };
3798 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise, &data);
3799 } else if (!precise) {
3800 if (!conservative_stack_mark) {
3801 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
3802 conservative_stack_mark = TRUE;
3804 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3809 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3810 start_nursery, end_nursery, PIN_TYPE_STACK);
3812 conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
3813 start_nursery, end_nursery, PIN_TYPE_STACK);
3816 } END_FOREACH_THREAD
3820 ptr_on_stack (void *ptr)
3822 gpointer stack_start = &stack_start;
3823 SgenThreadInfo *info = mono_thread_info_current ();
3825 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3831 sgen_thread_register (SgenThreadInfo* info, void *addr)
3834 guint8 *staddr = NULL;
3836 #ifndef HAVE_KW_THREAD
3837 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3839 g_assert (!mono_native_tls_get_value (thread_info_key));
3840 mono_native_tls_set_value (thread_info_key, info);
3842 sgen_thread_info = info;
3845 #ifdef SGEN_POSIX_STW
3846 info->stop_count = -1;
3850 info->stack_start = NULL;
3851 info->stopped_ip = NULL;
3852 info->stopped_domain = NULL;
3854 memset (&info->ctx, 0, sizeof (MonoContext));
3856 memset (&info->regs, 0, sizeof (info->regs));
3859 sgen_init_tlab_info (info);
3861 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3863 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
3864 mono_thread_info_get_stack_bounds (&staddr, &stsize);
3867 info->stack_start_limit = staddr;
3869 info->stack_end = staddr + stsize;
3871 gsize stack_bottom = (gsize)addr;
3872 stack_bottom += 4095;
3873 stack_bottom &= ~4095;
3874 info->stack_end = (char*)stack_bottom;
3877 #ifdef HAVE_KW_THREAD
3878 stack_end = info->stack_end;
3881 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3883 if (gc_callbacks.thread_attach_func)
3884 info->runtime_data = gc_callbacks.thread_attach_func ();
3889 sgen_thread_detach (SgenThreadInfo *p)
3891 /* If a delegate is passed to native code and invoked on a thread we dont
3892 * know about, the jit will register it with mono_jit_thread_attach, but
3893 * we have no way of knowing when that thread goes away. SGen has a TSD
3894 * so we assume that if the domain is still registered, we can detach
3897 if (mono_domain_get ())
3898 mono_thread_detach_internal (mono_thread_internal_current ());
3902 sgen_thread_unregister (SgenThreadInfo *p)
3904 MonoNativeThreadId tid;
3906 tid = mono_thread_info_get_tid (p);
3907 binary_protocol_thread_unregister ((gpointer)tid);
3908 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
3910 #ifndef HAVE_KW_THREAD
3911 mono_native_tls_set_value (thread_info_key, NULL);
3913 sgen_thread_info = NULL;
3916 if (p->info.runtime_thread)
3917 mono_threads_add_joinable_thread ((gpointer)tid);
3919 if (gc_callbacks.thread_detach_func) {
3920 gc_callbacks.thread_detach_func (p->runtime_data);
3921 p->runtime_data = NULL;
3927 sgen_thread_attach (SgenThreadInfo *info)
3930 /*this is odd, can we get attached before the gc is inited?*/
3934 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3935 info->runtime_data = gc_callbacks.thread_attach_func ();
3938 mono_gc_register_thread (void *baseptr)
3940 return mono_thread_info_attach (baseptr) != NULL;
3944 * mono_gc_set_stack_end:
3946 * Set the end of the current threads stack to STACK_END. The stack space between
3947 * STACK_END and the real end of the threads stack will not be scanned during collections.
3950 mono_gc_set_stack_end (void *stack_end)
3952 SgenThreadInfo *info;
3955 info = mono_thread_info_current ();
3957 g_assert (stack_end < info->stack_end);
3958 info->stack_end = stack_end;
3963 #if USE_PTHREAD_INTERCEPT
3967 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
3969 return pthread_create (new_thread, attr, start_routine, arg);
3973 mono_gc_pthread_join (pthread_t thread, void **retval)
3975 return pthread_join (thread, retval);
3979 mono_gc_pthread_detach (pthread_t thread)
3981 return pthread_detach (thread);
3985 mono_gc_pthread_exit (void *retval)
3987 mono_thread_info_detach ();
3988 pthread_exit (retval);
3989 g_assert_not_reached ();
3992 #endif /* USE_PTHREAD_INTERCEPT */
3995 * ######################################################################
3996 * ######## Write barriers
3997 * ######################################################################
4001 * Note: the write barriers first do the needed GC work and then do the actual store:
4002 * this way the value is visible to the conservative GC scan after the write barrier
4003 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4004 * the conservative scan, otherwise by the remembered set scan.
4007 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4009 HEAVY_STAT (++stat_wbarrier_set_field);
4010 if (ptr_in_nursery (field_ptr)) {
4011 *(void**)field_ptr = value;
4014 SGEN_LOG (8, "Adding remset at %p", field_ptr);
4016 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4018 remset.wbarrier_set_field (obj, field_ptr, value);
4022 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4024 HEAVY_STAT (++stat_wbarrier_set_arrayref);
4025 if (ptr_in_nursery (slot_ptr)) {
4026 *(void**)slot_ptr = value;
4029 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4031 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4033 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4037 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4039 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4040 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4041 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4042 mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
4046 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4047 if (binary_protocol_is_heavy_enabled ()) {
4049 for (i = 0; i < count; ++i) {
4050 gpointer dest = (gpointer*)dest_ptr + i;
4051 gpointer obj = *((gpointer*)src_ptr + i);
4053 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4058 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4061 static char *found_obj;
4064 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4066 char *ptr = user_data;
4068 if (ptr >= obj && ptr < obj + size) {
4069 g_assert (!found_obj);
4074 /* for use in the debugger */
4075 char* find_object_for_ptr (char *ptr);
4077 find_object_for_ptr (char *ptr)
4079 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4081 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4082 find_object_for_ptr_callback, ptr, TRUE);
4088 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4093 * Very inefficient, but this is debugging code, supposed to
4094 * be called from gdb, so we don't care.
4097 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
4102 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4106 HEAVY_STAT (++stat_wbarrier_generic_store);
4108 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4109 /* FIXME: ptr_in_heap must be called with the GC lock held */
4110 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4111 char *start = find_object_for_ptr (ptr);
4112 MonoObject *value = *(MonoObject**)ptr;
4116 MonoObject *obj = (MonoObject*)start;
4117 if (obj->vtable->domain != value->vtable->domain)
4118 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4124 obj = *(gpointer*)ptr;
4126 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4128 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4129 SGEN_LOG (8, "Skipping remset at %p", ptr);
4134 * We need to record old->old pointer locations for the
4135 * concurrent collector.
4137 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4138 SGEN_LOG (8, "Skipping remset at %p", ptr);
4142 SGEN_LOG (8, "Adding remset at %p", ptr);
4144 remset.wbarrier_generic_nostore (ptr);
4148 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4150 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4151 *(void**)ptr = value;
4152 if (ptr_in_nursery (value))
4153 mono_gc_wbarrier_generic_nostore (ptr);
4154 sgen_dummy_use (value);
4157 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4158 * as an atomic operation with release semantics.
4161 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4163 HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4165 SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4167 InterlockedWritePointer (ptr, value);
4169 if (ptr_in_nursery (value))
4170 mono_gc_wbarrier_generic_nostore (ptr);
4172 sgen_dummy_use (value);
4175 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4177 mword *dest = _dest;
4182 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4187 size -= SIZEOF_VOID_P;
4192 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4194 #define HANDLE_PTR(ptr,obj) do { \
4195 gpointer o = *(gpointer*)(ptr); \
4197 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4198 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4203 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4205 #define SCAN_OBJECT_NOVTABLE
4206 #include "sgen-scan-object.h"
4211 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4213 HEAVY_STAT (++stat_wbarrier_value_copy);
4214 g_assert (klass->valuetype);
4216 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4218 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4219 size_t element_size = mono_class_value_size (klass, NULL);
4220 size_t size = count * element_size;
4221 mono_gc_memmove_atomic (dest, src, size);
4225 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4226 if (binary_protocol_is_heavy_enabled ()) {
4227 size_t element_size = mono_class_value_size (klass, NULL);
4229 for (i = 0; i < count; ++i) {
4230 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4231 (char*)src + i * element_size - sizeof (MonoObject),
4232 (mword) klass->gc_descr);
4237 remset.wbarrier_value_copy (dest, src, count, klass);
4241 * mono_gc_wbarrier_object_copy:
4243 * Write barrier to call when obj is the result of a clone or copy of an object.
4246 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4250 HEAVY_STAT (++stat_wbarrier_object_copy);
4252 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4253 size = mono_object_class (obj)->instance_size;
4254 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4255 size - sizeof (MonoObject));
4259 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4260 if (binary_protocol_is_heavy_enabled ())
4261 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4264 remset.wbarrier_object_copy (obj, src);
4269 * ######################################################################
4270 * ######## Other mono public interface functions.
4271 * ######################################################################
4274 #define REFS_SIZE 128
4277 MonoGCReferences callback;
4281 MonoObject *refs [REFS_SIZE];
4282 uintptr_t offsets [REFS_SIZE];
4286 #define HANDLE_PTR(ptr,obj) do { \
4288 if (hwi->count == REFS_SIZE) { \
4289 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4293 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4294 hwi->refs [hwi->count++] = *(ptr); \
4299 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4301 #include "sgen-scan-object.h"
4305 walk_references (char *start, size_t size, void *data)
4307 HeapWalkInfo *hwi = data;
4310 collect_references (hwi, start, size);
4311 if (hwi->count || !hwi->called)
4312 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4316 * mono_gc_walk_heap:
4317 * @flags: flags for future use
4318 * @callback: a function pointer called for each object in the heap
4319 * @data: a user data pointer that is passed to callback
4321 * This function can be used to iterate over all the live objects in the heap:
4322 * for each object, @callback is invoked, providing info about the object's
4323 * location in memory, its class, its size and the objects it references.
4324 * For each referenced object it's offset from the object address is
4325 * reported in the offsets array.
4326 * The object references may be buffered, so the callback may be invoked
4327 * multiple times for the same object: in all but the first call, the size
4328 * argument will be zero.
4329 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4330 * profiler event handler.
4332 * Returns: a non-zero value if the GC doesn't support heap walking
4335 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4340 hwi.callback = callback;
4343 sgen_clear_nursery_fragments ();
4344 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4346 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
4347 sgen_los_iterate_objects (walk_references, &hwi);
4353 mono_gc_collect (int generation)
4358 sgen_perform_collection (0, generation, "user request", TRUE);
4363 mono_gc_max_generation (void)
4369 mono_gc_collection_count (int generation)
4371 if (generation == 0)
4372 return gc_stats.minor_gc_count;
4373 return gc_stats.major_gc_count;
4377 mono_gc_get_used_size (void)
4381 tot = los_memory_usage;
4382 tot += nursery_section->next_data - nursery_section->data;
4383 tot += major_collector.get_used_size ();
4384 /* FIXME: account for pinned objects */
4390 mono_gc_get_los_limit (void)
4392 return MAX_SMALL_OBJ_SIZE;
4396 mono_gc_user_markers_supported (void)
4402 mono_object_is_alive (MonoObject* o)
4408 mono_gc_get_generation (MonoObject *obj)
4410 if (ptr_in_nursery (obj))
4416 mono_gc_enable_events (void)
4421 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4423 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4427 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4429 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4433 mono_gc_weak_link_get (void **link_addr)
4435 void * volatile *link_addr_volatile;
4439 link_addr_volatile = link_addr;
4440 ptr = (void*)*link_addr_volatile;
4442 * At this point we have a hidden pointer. If the GC runs
4443 * here, it will not recognize the hidden pointer as a
4444 * reference, and if the object behind it is not referenced
4445 * elsewhere, it will be freed. Once the world is restarted
4446 * we reveal the pointer, giving us a pointer to a freed
4447 * object. To make sure we don't return it, we load the
4448 * hidden pointer again. If it's still the same, we can be
4449 * sure the object reference is valid.
4452 obj = (MonoObject*) REVEAL_POINTER (ptr);
4456 mono_memory_barrier ();
4459 * During the second bridge processing step the world is
4460 * running again. That step processes all weak links once
4461 * more to null those that refer to dead objects. Before that
4462 * is completed, those links must not be followed, so we
4463 * conservatively wait for bridge processing when any weak
4464 * link is dereferenced.
4466 if (G_UNLIKELY (bridge_processing_in_progress))
4467 mono_gc_wait_for_bridge_processing ();
4469 if ((void*)*link_addr_volatile != ptr)
4476 mono_gc_ephemeron_array_add (MonoObject *obj)
4478 EphemeronLinkNode *node;
4482 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4487 node->array = (char*)obj;
4488 node->next = ephemeron_list;
4489 ephemeron_list = node;
4491 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4498 mono_gc_set_allow_synchronous_major (gboolean flag)
4500 if (!major_collector.is_concurrent)
4503 allow_synchronous_major = flag;
4508 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4512 result = func (data);
4513 UNLOCK_INTERRUPTION;
4518 mono_gc_is_gc_thread (void)
4522 result = mono_thread_info_current () != NULL;
4528 is_critical_method (MonoMethod *method)
4530 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4534 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4538 va_start (ap, description_format);
4540 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4541 vfprintf (stderr, description_format, ap);
4543 fprintf (stderr, " - %s", fallback);
4544 fprintf (stderr, "\n");
4550 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4553 double val = strtod (opt, &endptr);
4554 if (endptr == opt) {
4555 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4558 else if (val < min || val > max) {
4559 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4567 mono_gc_base_init (void)
4569 MonoThreadInfoCallbacks cb;
4572 char *major_collector_opt = NULL;
4573 char *minor_collector_opt = NULL;
4574 size_t max_heap = 0;
4575 size_t soft_limit = 0;
4579 gboolean debug_print_allowance = FALSE;
4580 double allowance_ratio = 0, save_target = 0;
4581 gboolean have_split_nursery = FALSE;
4582 gboolean cement_enabled = TRUE;
4585 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4588 /* already inited */
4591 /* being inited by another thread */
4595 /* we will init it */
4598 g_assert_not_reached ();
4600 } while (result != 0);
4602 SGEN_TV_GETTIME (sgen_init_timestamp);
4604 LOCK_INIT (gc_mutex);
4606 pagesize = mono_pagesize ();
4607 gc_debug_file = stderr;
4609 cb.thread_register = sgen_thread_register;
4610 cb.thread_detach = sgen_thread_detach;
4611 cb.thread_unregister = sgen_thread_unregister;
4612 cb.thread_attach = sgen_thread_attach;
4613 cb.mono_method_is_critical = (gpointer)is_critical_method;
4615 cb.thread_exit = mono_gc_pthread_exit;
4616 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4619 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4621 LOCK_INIT (sgen_interruption_mutex);
4622 LOCK_INIT (pin_queue_mutex);
4624 if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4625 opts = g_strsplit (env, ",", -1);
4626 for (ptr = opts; *ptr; ++ptr) {
4628 if (g_str_has_prefix (opt, "major=")) {
4629 opt = strchr (opt, '=') + 1;
4630 major_collector_opt = g_strdup (opt);
4631 } else if (g_str_has_prefix (opt, "minor=")) {
4632 opt = strchr (opt, '=') + 1;
4633 minor_collector_opt = g_strdup (opt);
4641 sgen_init_internal_allocator ();
4642 sgen_init_nursery_allocator ();
4643 sgen_init_fin_weak_hash ();
4645 sgen_init_hash_table ();
4647 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4648 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4649 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4650 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4652 #ifndef HAVE_KW_THREAD
4653 mono_native_tls_alloc (&thread_info_key, NULL);
4654 #if defined(__APPLE__) || defined (HOST_WIN32)
4656 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4657 * where the two are the same.
4659 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4663 int tls_offset = -1;
4664 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4665 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4670 * This needs to happen before any internal allocations because
4671 * it inits the small id which is required for hazard pointer
4676 mono_thread_info_attach (&dummy);
4678 if (!minor_collector_opt) {
4679 sgen_simple_nursery_init (&sgen_minor_collector);
4681 if (!strcmp (minor_collector_opt, "simple")) {
4683 sgen_simple_nursery_init (&sgen_minor_collector);
4684 } else if (!strcmp (minor_collector_opt, "split")) {
4685 sgen_split_nursery_init (&sgen_minor_collector);
4686 have_split_nursery = TRUE;
4688 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4689 goto use_simple_nursery;
4693 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4694 use_marksweep_major:
4695 sgen_marksweep_init (&major_collector);
4696 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4697 sgen_marksweep_par_init (&major_collector);
4698 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4699 sgen_marksweep_conc_init (&major_collector);
4701 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4702 goto use_marksweep_major;
4705 if (have_split_nursery && major_collector.is_parallel) {
4706 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Disabling split minor collector.", "`minor=split` is not supported with the parallel collector yet.");
4707 have_split_nursery = FALSE;
4710 num_workers = mono_cpu_count ();
4711 g_assert (num_workers > 0);
4712 if (num_workers > 16)
4715 ///* Keep this the default for now */
4716 /* Precise marking is broken on all supported targets. Disable until fixed. */
4717 conservative_stack_mark = TRUE;
4719 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4722 gboolean usage_printed = FALSE;
4724 for (ptr = opts; *ptr; ++ptr) {
4726 if (!strcmp (opt, ""))
4728 if (g_str_has_prefix (opt, "major="))
4730 if (g_str_has_prefix (opt, "minor="))
4732 if (g_str_has_prefix (opt, "max-heap-size=")) {
4733 size_t max_heap_candidate = 0;
4734 opt = strchr (opt, '=') + 1;
4735 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4736 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(size_t)(mono_pagesize () - 1);
4737 if (max_heap != max_heap_candidate)
4738 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4740 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4744 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4745 opt = strchr (opt, '=') + 1;
4746 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4747 if (soft_limit <= 0) {
4748 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4752 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4756 if (g_str_has_prefix (opt, "workers=")) {
4759 if (!major_collector.is_parallel) {
4760 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "The `workers` option can only be used for parallel collectors.");
4763 opt = strchr (opt, '=') + 1;
4764 val = strtol (opt, &endptr, 10);
4765 if (!*opt || *endptr) {
4766 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Cannot parse the `workers` option value.");
4769 if (val <= 0 || val > 16) {
4770 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "The number of `workers` must be in the range 1 to 16.");
4773 num_workers = (int)val;
4776 if (g_str_has_prefix (opt, "stack-mark=")) {
4777 opt = strchr (opt, '=') + 1;
4778 if (!strcmp (opt, "precise")) {
4779 conservative_stack_mark = FALSE;
4780 } else if (!strcmp (opt, "conservative")) {
4781 conservative_stack_mark = TRUE;
4783 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4784 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
4788 if (g_str_has_prefix (opt, "bridge-implementation=")) {
4789 opt = strchr (opt, '=') + 1;
4790 sgen_set_bridge_implementation (opt);
4793 if (g_str_has_prefix (opt, "toggleref-test")) {
4794 sgen_register_test_toggleref_callback ();
4799 if (g_str_has_prefix (opt, "nursery-size=")) {
4801 opt = strchr (opt, '=') + 1;
4802 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4803 #ifdef SGEN_ALIGN_NURSERY
4804 if ((val & (val - 1))) {
4805 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
4809 if (val < SGEN_MAX_NURSERY_WASTE) {
4810 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
4811 "`nursery-size` must be at least %d bytes.", SGEN_MAX_NURSERY_WASTE);
4815 sgen_nursery_size = val;
4816 sgen_nursery_bits = 0;
4817 while (ONE_P << (++ sgen_nursery_bits) != sgen_nursery_size)
4820 sgen_nursery_size = val;
4823 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
4829 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4831 opt = strchr (opt, '=') + 1;
4832 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
4833 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
4838 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4840 opt = strchr (opt, '=') + 1;
4841 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
4842 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
4843 allowance_ratio = val;
4847 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
4848 if (!major_collector.is_concurrent) {
4849 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
4853 opt = strchr (opt, '=') + 1;
4855 if (!strcmp (opt, "yes")) {
4856 allow_synchronous_major = TRUE;
4857 } else if (!strcmp (opt, "no")) {
4858 allow_synchronous_major = FALSE;
4860 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
4865 if (!strcmp (opt, "cementing")) {
4866 if (major_collector.is_parallel) {
4867 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported for the parallel major collector.");
4870 cement_enabled = TRUE;
4873 if (!strcmp (opt, "no-cementing")) {
4874 cement_enabled = FALSE;
4878 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4881 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4884 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4889 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
4890 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4891 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4892 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4893 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par')\n");
4894 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4895 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4896 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4897 fprintf (stderr, " [no-]cementing\n");
4898 if (major_collector.is_concurrent)
4899 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
4900 if (major_collector.print_gc_param_usage)
4901 major_collector.print_gc_param_usage ();
4902 if (sgen_minor_collector.print_gc_param_usage)
4903 sgen_minor_collector.print_gc_param_usage ();
4904 fprintf (stderr, " Experimental options:\n");
4905 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4906 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4907 fprintf (stderr, "\n");
4909 usage_printed = TRUE;
4914 if (major_collector.is_parallel) {
4915 cement_enabled = FALSE;
4916 sgen_workers_init (num_workers);
4917 } else if (major_collector.is_concurrent) {
4918 sgen_workers_init (1);
4921 if (major_collector_opt)
4922 g_free (major_collector_opt);
4924 if (minor_collector_opt)
4925 g_free (minor_collector_opt);
4929 sgen_cement_init (cement_enabled);
4931 if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
4932 gboolean usage_printed = FALSE;
4934 opts = g_strsplit (env, ",", -1);
4935 for (ptr = opts; ptr && *ptr; ptr ++) {
4937 if (!strcmp (opt, ""))
4939 if (opt [0] >= '0' && opt [0] <= '9') {
4940 gc_debug_level = atoi (opt);
4946 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
4948 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
4950 gc_debug_file = fopen (rf, "wb");
4952 gc_debug_file = stderr;
4955 } else if (!strcmp (opt, "print-allowance")) {
4956 debug_print_allowance = TRUE;
4957 } else if (!strcmp (opt, "print-pinning")) {
4958 do_pin_stats = TRUE;
4959 } else if (!strcmp (opt, "verify-before-allocs")) {
4960 verify_before_allocs = 1;
4961 has_per_allocation_action = TRUE;
4962 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4963 char *arg = strchr (opt, '=') + 1;
4964 verify_before_allocs = atoi (arg);
4965 has_per_allocation_action = TRUE;
4966 } else if (!strcmp (opt, "collect-before-allocs")) {
4967 collect_before_allocs = 1;
4968 has_per_allocation_action = TRUE;
4969 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4970 char *arg = strchr (opt, '=') + 1;
4971 has_per_allocation_action = TRUE;
4972 collect_before_allocs = atoi (arg);
4973 } else if (!strcmp (opt, "verify-before-collections")) {
4974 whole_heap_check_before_collection = TRUE;
4975 } else if (!strcmp (opt, "check-at-minor-collections")) {
4976 consistency_check_at_minor_collection = TRUE;
4977 nursery_clear_policy = CLEAR_AT_GC;
4978 } else if (!strcmp (opt, "mod-union-consistency-check")) {
4979 if (!major_collector.is_concurrent) {
4980 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
4983 mod_union_consistency_check = TRUE;
4984 } else if (!strcmp (opt, "check-mark-bits")) {
4985 check_mark_bits_after_major_collection = TRUE;
4986 } else if (!strcmp (opt, "check-nursery-pinned")) {
4987 check_nursery_objects_pinned = TRUE;
4988 } else if (!strcmp (opt, "xdomain-checks")) {
4989 xdomain_checks = TRUE;
4990 } else if (!strcmp (opt, "clear-at-gc")) {
4991 nursery_clear_policy = CLEAR_AT_GC;
4992 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
4993 nursery_clear_policy = CLEAR_AT_GC;
4994 } else if (!strcmp (opt, "clear-at-tlab-creation")) {
4995 nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
4996 } else if (!strcmp (opt, "debug-clear-at-tlab-creation")) {
4997 nursery_clear_policy = CLEAR_AT_TLAB_CREATION_DEBUG;
4998 } else if (!strcmp (opt, "check-scan-starts")) {
4999 do_scan_starts_check = TRUE;
5000 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5001 do_verify_nursery = TRUE;
5002 } else if (!strcmp (opt, "check-concurrent")) {
5003 if (!major_collector.is_concurrent) {
5004 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
5007 do_concurrent_checks = TRUE;
5008 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5009 do_dump_nursery_content = TRUE;
5010 } else if (!strcmp (opt, "no-managed-allocator")) {
5011 sgen_set_use_managed_allocator (FALSE);
5012 } else if (!strcmp (opt, "disable-minor")) {
5013 disable_minor_collections = TRUE;
5014 } else if (!strcmp (opt, "disable-major")) {
5015 disable_major_collections = TRUE;
5016 } else if (g_str_has_prefix (opt, "heap-dump=")) {
5017 char *filename = strchr (opt, '=') + 1;
5018 nursery_clear_policy = CLEAR_AT_GC;
5019 heap_dump_file = fopen (filename, "w");
5020 if (heap_dump_file) {
5021 fprintf (heap_dump_file, "<sgen-dump>\n");
5022 do_pin_stats = TRUE;
5024 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5025 char *filename = strchr (opt, '=') + 1;
5026 char *colon = strrchr (filename, ':');
5029 if (!mono_gc_parse_environment_string_extract_number (colon + 1, &limit)) {
5030 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring limit.", "Binary protocol file size limit must be an integer.");
5035 binary_protocol_init (filename, (long long)limit);
5036 } else if (!sgen_bridge_handle_gc_debug (opt)) {
5037 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5042 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
5043 fprintf (stderr, "Valid <option>s are:\n");
5044 fprintf (stderr, " collect-before-allocs[=<n>]\n");
5045 fprintf (stderr, " verify-before-allocs[=<n>]\n");
5046 fprintf (stderr, " check-at-minor-collections\n");
5047 fprintf (stderr, " check-mark-bits\n");
5048 fprintf (stderr, " check-nursery-pinned\n");
5049 fprintf (stderr, " verify-before-collections\n");
5050 fprintf (stderr, " verify-nursery-at-minor-gc\n");
5051 fprintf (stderr, " dump-nursery-at-minor-gc\n");
5052 fprintf (stderr, " disable-minor\n");
5053 fprintf (stderr, " disable-major\n");
5054 fprintf (stderr, " xdomain-checks\n");
5055 fprintf (stderr, " check-concurrent\n");
5056 fprintf (stderr, " clear-[nursery-]at-gc\n");
5057 fprintf (stderr, " clear-at-tlab-creation\n");
5058 fprintf (stderr, " debug-clear-at-tlab-creation\n");
5059 fprintf (stderr, " check-scan-starts\n");
5060 fprintf (stderr, " no-managed-allocator\n");
5061 fprintf (stderr, " print-allowance\n");
5062 fprintf (stderr, " print-pinning\n");
5063 fprintf (stderr, " heap-dump=<filename>\n");
5064 fprintf (stderr, " binary-protocol=<filename>[:<file-size-limit>]\n");
5065 sgen_bridge_print_gc_debug_usage ();
5066 fprintf (stderr, "\n");
5068 usage_printed = TRUE;
5074 if (major_collector.is_parallel) {
5075 if (heap_dump_file) {
5076 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "Cannot do `heap-dump` with the parallel collector.");
5077 fclose (heap_dump_file);
5078 heap_dump_file = NULL;
5081 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "`print-pinning` is not supported with the parallel collector.");
5082 do_pin_stats = FALSE;
5086 if (major_collector.post_param_init)
5087 major_collector.post_param_init (&major_collector);
5089 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5091 memset (&remset, 0, sizeof (remset));
5093 sgen_card_table_init (&remset);
5099 mono_gc_get_gc_name (void)
5104 static MonoMethod *write_barrier_method;
5107 sgen_is_critical_method (MonoMethod *method)
5109 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5113 sgen_has_critical_method (void)
5115 return write_barrier_method || sgen_has_managed_allocator ();
5121 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5123 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5124 #ifdef SGEN_ALIGN_NURSERY
5125 // if (ptr_in_nursery (ptr)) return;
5127 * Masking out the bits might be faster, but we would have to use 64 bit
5128 * immediates, which might be slower.
5130 mono_mb_emit_ldarg (mb, 0);
5131 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5132 mono_mb_emit_byte (mb, CEE_SHR_UN);
5133 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5134 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5136 if (!major_collector.is_concurrent) {
5137 // if (!ptr_in_nursery (*ptr)) return;
5138 mono_mb_emit_ldarg (mb, 0);
5139 mono_mb_emit_byte (mb, CEE_LDIND_I);
5140 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5141 mono_mb_emit_byte (mb, CEE_SHR_UN);
5142 mono_mb_emit_ptr (mb, (gpointer)((mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS));
5143 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5146 int label_continue1, label_continue2;
5147 int dereferenced_var;
5149 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5150 mono_mb_emit_ldarg (mb, 0);
5151 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5152 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5154 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5155 mono_mb_emit_ldarg (mb, 0);
5156 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5157 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5160 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5163 mono_mb_patch_branch (mb, label_continue_1);
5164 mono_mb_patch_branch (mb, label_continue_2);
5166 // Dereference and store in local var
5167 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5168 mono_mb_emit_ldarg (mb, 0);
5169 mono_mb_emit_byte (mb, CEE_LDIND_I);
5170 mono_mb_emit_stloc (mb, dereferenced_var);
5172 if (!major_collector.is_concurrent) {
5173 // if (*ptr < sgen_get_nursery_start ()) return;
5174 mono_mb_emit_ldloc (mb, dereferenced_var);
5175 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5176 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5178 // if (*ptr >= sgen_get_nursery_end ()) return;
5179 mono_mb_emit_ldloc (mb, dereferenced_var);
5180 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5181 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5188 mono_gc_get_write_barrier (void)
5191 MonoMethodBuilder *mb;
5192 MonoMethodSignature *sig;
5193 #ifdef MANAGED_WBARRIER
5194 int i, nursery_check_labels [3];
5196 #ifdef HAVE_KW_THREAD
5197 int stack_end_offset = -1;
5199 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5200 g_assert (stack_end_offset != -1);
5204 // FIXME: Maybe create a separate version for ctors (the branch would be
5205 // correctly predicted more times)
5206 if (write_barrier_method)
5207 return write_barrier_method;
5209 /* Create the IL version of mono_gc_barrier_generic_store () */
5210 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5211 sig->ret = &mono_defaults.void_class->byval_arg;
5212 sig->params [0] = &mono_defaults.int_class->byval_arg;
5214 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5217 #ifdef MANAGED_WBARRIER
5218 emit_nursery_check (mb, nursery_check_labels);
5220 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5224 LDC_PTR sgen_cardtable
5226 address >> CARD_BITS
5230 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5231 LDC_PTR card_table_mask
5238 mono_mb_emit_ptr (mb, sgen_cardtable);
5239 mono_mb_emit_ldarg (mb, 0);
5240 mono_mb_emit_icon (mb, CARD_BITS);
5241 mono_mb_emit_byte (mb, CEE_SHR_UN);
5242 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5243 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5244 mono_mb_emit_byte (mb, CEE_AND);
5246 mono_mb_emit_byte (mb, CEE_ADD);
5247 mono_mb_emit_icon (mb, 1);
5248 mono_mb_emit_byte (mb, CEE_STIND_I1);
5251 for (i = 0; i < 3; ++i) {
5252 if (nursery_check_labels [i])
5253 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5255 mono_mb_emit_byte (mb, CEE_RET);
5257 mono_mb_emit_ldarg (mb, 0);
5258 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5259 mono_mb_emit_byte (mb, CEE_RET);
5262 res = mono_mb_create_method (mb, sig, 16);
5266 if (write_barrier_method) {
5267 /* Already created */
5268 mono_free_method (res);
5270 /* double-checked locking */
5271 mono_memory_barrier ();
5272 write_barrier_method = res;
5276 return write_barrier_method;
5280 mono_gc_get_description (void)
5282 return g_strdup ("sgen");
5286 mono_gc_set_desktop_mode (void)
5291 mono_gc_is_moving (void)
5297 mono_gc_is_disabled (void)
5303 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5310 sgen_get_nursery_clear_policy (void)
5312 return nursery_clear_policy;
5316 sgen_get_array_fill_vtable (void)
5318 if (!array_fill_vtable) {
5319 static MonoClass klass;
5320 static MonoVTable vtable;
5323 MonoDomain *domain = mono_get_root_domain ();
5326 klass.element_class = mono_defaults.byte_class;
5328 klass.instance_size = sizeof (MonoArray);
5329 klass.sizes.element_size = 1;
5330 klass.name = "array_filler_type";
5332 vtable.klass = &klass;
5334 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5337 array_fill_vtable = &vtable;
5339 return array_fill_vtable;
5349 sgen_gc_unlock (void)
5351 gboolean try_free = sgen_try_free_some_memory;
5352 sgen_try_free_some_memory = FALSE;
5353 mono_mutex_unlock (&gc_mutex);
5354 MONO_GC_UNLOCKED ();
5356 mono_thread_hazardous_try_free_some ();
5360 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5362 major_collector.iterate_live_block_ranges (callback);
5366 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5368 major_collector.scan_card_table (FALSE, queue);
5372 sgen_get_major_collector (void)
5374 return &major_collector;
5377 void mono_gc_set_skip_thread (gboolean skip)
5379 SgenThreadInfo *info = mono_thread_info_current ();
5382 info->gc_disabled = skip;
5387 sgen_get_remset (void)
5393 mono_gc_get_vtable_bits (MonoClass *class)
5396 /* FIXME move this to the bridge code */
5397 if (sgen_need_bridge_processing ()) {
5398 switch (sgen_bridge_class_kind (class)) {
5399 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
5400 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
5401 res = SGEN_GC_BIT_BRIDGE_OBJECT;
5403 case GC_BRIDGE_OPAQUE_CLASS:
5404 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
5408 if (fin_callbacks.is_class_finalization_aware) {
5409 if (fin_callbacks.is_class_finalization_aware (class))
5410 res |= SGEN_GC_BIT_FINALIZER_AWARE;
5416 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5423 sgen_check_whole_heap_stw (void)
5425 sgen_stop_world (0);
5426 sgen_clear_nursery_fragments ();
5427 sgen_check_whole_heap (FALSE);
5428 sgen_restart_world (0, NULL);
5432 sgen_gc_event_moves (void)
5434 if (moved_objects_idx) {
5435 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5436 moved_objects_idx = 0;
5441 sgen_timestamp (void)
5443 SGEN_TV_DECLARE (timestamp);
5444 SGEN_TV_GETTIME (timestamp);
5445 return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
5449 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
5451 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
5452 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
5454 fin_callbacks = *callbacks;
5457 #endif /* HAVE_SGEN_GC */