2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-protocol.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/sgen-bridge.h"
204 #include "metadata/sgen-memory-governor.h"
205 #include "metadata/sgen-hash-table.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/mempool-internals.h"
211 #include "metadata/marshal.h"
212 #include "metadata/runtime.h"
213 #include "metadata/sgen-cardtable.h"
214 #include "metadata/sgen-pinning.h"
215 #include "metadata/sgen-workers.h"
216 #include "metadata/sgen-layout-stats.h"
217 #include "utils/mono-mmap.h"
218 #include "utils/mono-time.h"
219 #include "utils/mono-semaphore.h"
220 #include "utils/mono-counters.h"
221 #include "utils/mono-proclib.h"
222 #include "utils/mono-memory-model.h"
223 #include "utils/mono-logger-internal.h"
224 #include "utils/dtrace.h"
226 #include <mono/utils/mono-logger-internal.h>
227 #include <mono/utils/memcheck.h>
229 #if defined(__MACH__)
230 #include "utils/mach-support.h"
233 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
237 #include "mono/cil/opcode.def"
243 #undef pthread_create
245 #undef pthread_detach
248 * ######################################################################
249 * ######## Types and constants used by the GC.
250 * ######################################################################
253 /* 0 means not initialized, 1 is initialized, -1 means in progress */
254 static int gc_initialized = 0;
255 /* If set, check if we need to do something every X allocations */
256 gboolean has_per_allocation_action;
257 /* If set, do a heap check every X allocation */
258 guint32 verify_before_allocs = 0;
259 /* If set, do a minor collection before every X allocation */
260 guint32 collect_before_allocs = 0;
261 /* If set, do a whole heap check before each collection */
262 static gboolean whole_heap_check_before_collection = FALSE;
263 /* If set, do a heap consistency check before each minor collection */
264 static gboolean consistency_check_at_minor_collection = FALSE;
265 /* If set, do a mod union consistency check before each finishing collection pause */
266 static gboolean mod_union_consistency_check = FALSE;
267 /* If set, check whether mark bits are consistent after major collections */
268 static gboolean check_mark_bits_after_major_collection = FALSE;
269 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
270 static gboolean check_nursery_objects_pinned = FALSE;
271 /* If set, do a few checks when the concurrent collector is used */
272 static gboolean do_concurrent_checks = FALSE;
273 /* If set, check that there are no references to the domain left at domain unload */
274 static gboolean xdomain_checks = FALSE;
275 /* If not null, dump the heap after each collection into this file */
276 static FILE *heap_dump_file = NULL;
277 /* If set, mark stacks conservatively, even if precise marking is possible */
278 static gboolean conservative_stack_mark = FALSE;
279 /* If set, do a plausibility check on the scan_starts before and after
281 static gboolean do_scan_starts_check = FALSE;
283 * If the major collector is concurrent and this is FALSE, we will
284 * never initiate a synchronous major collection, unless requested via
287 static gboolean allow_synchronous_major = TRUE;
288 static gboolean nursery_collection_is_parallel = FALSE;
289 static gboolean disable_minor_collections = FALSE;
290 static gboolean disable_major_collections = FALSE;
291 gboolean do_pin_stats = FALSE;
292 static gboolean do_verify_nursery = FALSE;
293 static gboolean do_dump_nursery_content = FALSE;
295 #ifdef HEAVY_STATISTICS
296 long long stat_objects_alloced_degraded = 0;
297 long long stat_bytes_alloced_degraded = 0;
299 long long stat_copy_object_called_nursery = 0;
300 long long stat_objects_copied_nursery = 0;
301 long long stat_copy_object_called_major = 0;
302 long long stat_objects_copied_major = 0;
304 long long stat_scan_object_called_nursery = 0;
305 long long stat_scan_object_called_major = 0;
307 long long stat_slots_allocated_in_vain;
309 long long stat_nursery_copy_object_failed_from_space = 0;
310 long long stat_nursery_copy_object_failed_forwarded = 0;
311 long long stat_nursery_copy_object_failed_pinned = 0;
312 long long stat_nursery_copy_object_failed_to_space = 0;
314 static int stat_wbarrier_add_to_global_remset = 0;
315 static int stat_wbarrier_set_field = 0;
316 static int stat_wbarrier_set_arrayref = 0;
317 static int stat_wbarrier_arrayref_copy = 0;
318 static int stat_wbarrier_generic_store = 0;
319 static int stat_wbarrier_generic_store_atomic = 0;
320 static int stat_wbarrier_set_root = 0;
321 static int stat_wbarrier_value_copy = 0;
322 static int stat_wbarrier_object_copy = 0;
325 int stat_minor_gcs = 0;
326 int stat_major_gcs = 0;
328 static long long stat_pinned_objects = 0;
330 static long long time_minor_pre_collection_fragment_clear = 0;
331 static long long time_minor_pinning = 0;
332 static long long time_minor_scan_remsets = 0;
333 static long long time_minor_scan_pinned = 0;
334 static long long time_minor_scan_registered_roots = 0;
335 static long long time_minor_scan_thread_data = 0;
336 static long long time_minor_finish_gray_stack = 0;
337 static long long time_minor_fragment_creation = 0;
339 static long long time_major_pre_collection_fragment_clear = 0;
340 static long long time_major_pinning = 0;
341 static long long time_major_scan_pinned = 0;
342 static long long time_major_scan_registered_roots = 0;
343 static long long time_major_scan_thread_data = 0;
344 static long long time_major_scan_alloc_pinned = 0;
345 static long long time_major_scan_finalized = 0;
346 static long long time_major_scan_big_objects = 0;
347 static long long time_major_finish_gray_stack = 0;
348 static long long time_major_free_bigobjs = 0;
349 static long long time_major_los_sweep = 0;
350 static long long time_major_sweep = 0;
351 static long long time_major_fragment_creation = 0;
353 int gc_debug_level = 0;
358 mono_gc_flush_info (void)
360 fflush (gc_debug_file);
364 #define TV_DECLARE SGEN_TV_DECLARE
365 #define TV_GETTIME SGEN_TV_GETTIME
366 #define TV_ELAPSED SGEN_TV_ELAPSED
367 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
369 SGEN_TV_DECLARE (sgen_init_timestamp);
371 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
373 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
375 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
376 #define object_is_pinned SGEN_OBJECT_IS_PINNED
377 #define pin_object SGEN_PIN_OBJECT
378 #define unpin_object SGEN_UNPIN_OBJECT
380 #define ptr_in_nursery sgen_ptr_in_nursery
382 #define LOAD_VTABLE SGEN_LOAD_VTABLE
385 safe_name (void* obj)
387 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
388 return vt->klass->name;
391 #define safe_object_get_size sgen_safe_object_get_size
394 sgen_safe_name (void* obj)
396 return safe_name (obj);
400 * ######################################################################
401 * ######## Global data.
402 * ######################################################################
404 LOCK_DECLARE (gc_mutex);
405 gboolean sgen_try_free_some_memory;
407 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
409 static mword pagesize = 4096;
410 int degraded_mode = 0;
412 static mword bytes_pinned_from_failed_allocation = 0;
414 GCMemSection *nursery_section = NULL;
415 static mword lowest_heap_address = ~(mword)0;
416 static mword highest_heap_address = 0;
418 LOCK_DECLARE (sgen_interruption_mutex);
419 static LOCK_DECLARE (pin_queue_mutex);
421 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
422 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
424 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
425 struct _FinalizeReadyEntry {
426 FinalizeReadyEntry *next;
430 typedef struct _EphemeronLinkNode EphemeronLinkNode;
432 struct _EphemeronLinkNode {
433 EphemeronLinkNode *next;
442 int current_collection_generation = -1;
443 volatile gboolean concurrent_collection_in_progress = FALSE;
445 /* objects that are ready to be finalized */
446 static FinalizeReadyEntry *fin_ready_list = NULL;
447 static FinalizeReadyEntry *critical_fin_list = NULL;
449 static EphemeronLinkNode *ephemeron_list;
451 /* registered roots: the key to the hash is the root start address */
453 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
455 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
456 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
457 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
458 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
460 static mword roots_size = 0; /* amount of memory in the root set */
462 #define GC_ROOT_NUM 32
464 int count; /* must be the first field */
465 void *objects [GC_ROOT_NUM];
466 int root_types [GC_ROOT_NUM];
467 uintptr_t extra_info [GC_ROOT_NUM];
471 notify_gc_roots (GCRootReport *report)
475 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
480 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
482 if (report->count == GC_ROOT_NUM)
483 notify_gc_roots (report);
484 report->objects [report->count] = object;
485 report->root_types [report->count] = rtype;
486 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
489 MonoNativeTlsKey thread_info_key;
491 #ifdef HAVE_KW_THREAD
492 __thread SgenThreadInfo *sgen_thread_info;
493 __thread char *stack_end;
496 /* The size of a TLAB */
497 /* The bigger the value, the less often we have to go to the slow path to allocate a new
498 * one, but the more space is wasted by threads not allocating much memory.
500 * FIXME: Make this self-tuning for each thread.
502 guint32 tlab_size = (1024 * 4);
504 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
506 /* Functions supplied by the runtime to be called by the GC */
507 static MonoGCCallbacks gc_callbacks;
509 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
510 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
512 #define ALIGN_UP SGEN_ALIGN_UP
514 #define MOVED_OBJECTS_NUM 64
515 static void *moved_objects [MOVED_OBJECTS_NUM];
516 static int moved_objects_idx = 0;
518 /* Vtable of the objects used to fill out nursery fragments before a collection */
519 static MonoVTable *array_fill_vtable;
521 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
522 MonoNativeThreadId main_gc_thread = NULL;
525 /*Object was pinned during the current collection*/
526 static mword objects_pinned;
529 * ######################################################################
530 * ######## Macros and function declarations.
531 * ######################################################################
535 align_pointer (void *ptr)
537 mword p = (mword)ptr;
538 p += sizeof (gpointer) - 1;
539 p &= ~ (sizeof (gpointer) - 1);
543 typedef SgenGrayQueue GrayQueue;
545 /* forward declarations */
546 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
547 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
548 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
549 static void report_finalizer_roots (void);
550 static void report_registered_roots (void);
552 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
553 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
554 static void finish_gray_stack (int generation, GrayQueue *queue);
556 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
559 static void init_stats (void);
561 static int mark_ephemerons_in_range (ScanCopyContext ctx);
562 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
563 static void null_ephemerons_for_domain (MonoDomain *domain);
565 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
567 SgenObjectOperations current_object_ops;
568 SgenMajorCollector major_collector;
569 SgenMinorCollector sgen_minor_collector;
570 static GrayQueue gray_queue;
572 static SgenRemeberedSet remset;
574 /* The gray queue to use from the main collection thread. */
575 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
578 * The gray queue a worker job must use. If we're not parallel or
579 * concurrent, we use the main gray queue.
581 static SgenGrayQueue*
582 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
584 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
588 gray_queue_redirect (SgenGrayQueue *queue)
590 gboolean wake = FALSE;
594 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
597 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
602 g_assert (concurrent_collection_in_progress ||
603 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
604 if (sgen_workers_have_started ()) {
605 sgen_workers_wake_up_all ();
607 if (concurrent_collection_in_progress)
608 g_assert (current_collection_generation == -1);
614 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
616 while (start < end) {
620 if (!*(void**)start) {
621 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
626 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
632 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
634 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
635 callback (obj, size, data);
642 need_remove_object_for_domain (char *start, MonoDomain *domain)
644 if (mono_object_domain (start) == domain) {
645 SGEN_LOG (4, "Need to cleanup object %p", start);
646 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
653 process_object_for_domain_clearing (char *start, MonoDomain *domain)
655 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
656 if (vt->klass == mono_defaults.internal_thread_class)
657 g_assert (mono_object_domain (start) == mono_get_root_domain ());
658 /* The object could be a proxy for an object in the domain
660 #ifndef DISABLE_REMOTING
661 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
662 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
664 /* The server could already have been zeroed out, so
665 we need to check for that, too. */
666 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
667 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
668 ((MonoRealProxy*)start)->unwrapped_server = NULL;
675 clear_domain_process_object (char *obj, MonoDomain *domain)
679 process_object_for_domain_clearing (obj, domain);
680 remove = need_remove_object_for_domain (obj, domain);
682 if (remove && ((MonoObject*)obj)->synchronisation) {
683 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
685 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
692 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
694 if (clear_domain_process_object (obj, domain))
695 memset (obj, 0, size);
699 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
701 clear_domain_process_object (obj, domain);
705 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
707 if (need_remove_object_for_domain (obj, domain))
708 major_collector.free_non_pinned_object (obj, size);
712 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
714 if (need_remove_object_for_domain (obj, domain))
715 major_collector.free_pinned_object (obj, size);
719 * When appdomains are unloaded we can easily remove objects that have finalizers,
720 * but all the others could still be present in random places on the heap.
721 * We need a sweep to get rid of them even though it's going to be costly
723 * The reason we need to remove them is because we access the vtable and class
724 * structures to know the object size and the reference bitmap: once the domain is
725 * unloaded the point to random memory.
728 mono_gc_clear_domain (MonoDomain * domain)
730 LOSObject *bigobj, *prev;
735 binary_protocol_domain_unload_begin (domain);
739 if (concurrent_collection_in_progress)
740 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
741 g_assert (!concurrent_collection_in_progress);
743 sgen_process_fin_stage_entries ();
744 sgen_process_dislink_stage_entries ();
746 sgen_clear_nursery_fragments ();
748 if (xdomain_checks && domain != mono_get_root_domain ()) {
749 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
750 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
751 sgen_check_for_xdomain_refs ();
754 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
755 to memory returned to the OS.*/
756 null_ephemerons_for_domain (domain);
758 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
759 sgen_null_links_for_domain (domain, i);
761 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
762 sgen_remove_finalizers_for_domain (domain, i);
764 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
765 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
767 /* We need two passes over major and large objects because
768 freeing such objects might give their memory back to the OS
769 (in the case of large objects) or obliterate its vtable
770 (pinned objects with major-copying or pinned and non-pinned
771 objects with major-mark&sweep), but we might need to
772 dereference a pointer from an object to another object if
773 the first object is a proxy. */
774 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
775 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
776 clear_domain_process_object (bigobj->data, domain);
779 for (bigobj = los_object_list; bigobj;) {
780 if (need_remove_object_for_domain (bigobj->data, domain)) {
781 LOSObject *to_free = bigobj;
783 prev->next = bigobj->next;
785 los_object_list = bigobj->next;
786 bigobj = bigobj->next;
787 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
788 sgen_los_free_object (to_free);
792 bigobj = bigobj->next;
794 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
795 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
797 if (domain == mono_get_root_domain ()) {
798 if (G_UNLIKELY (do_pin_stats))
799 sgen_pin_stats_print_class_stats ();
800 sgen_object_layout_dump (stdout);
803 sgen_restart_world (0, NULL);
805 binary_protocol_domain_unload_end (domain);
811 * sgen_add_to_global_remset:
813 * The global remset contains locations which point into newspace after
814 * a minor collection. This can happen if the objects they point to are pinned.
816 * LOCKING: If called from a parallel collector, the global remset
817 * lock must be held. For serial collectors that is not necessary.
820 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
822 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
824 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
826 if (!major_collector.is_concurrent) {
827 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
829 if (current_collection_generation == -1)
830 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
833 if (!object_is_pinned (obj))
834 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
835 else if (sgen_cement_lookup_or_register (obj))
838 remset.record_pointer (ptr);
840 if (G_UNLIKELY (do_pin_stats))
841 sgen_pin_stats_register_global_remset (obj);
843 SGEN_LOG (8, "Adding global remset for %p", ptr);
844 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
848 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
849 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
850 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
851 vt->klass->name_space, vt->klass->name);
857 * sgen_drain_gray_stack:
859 * Scan objects in the gray stack until the stack is empty. This should be called
860 * frequently after each object is copied, to achieve better locality and cache
864 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
867 ScanObjectFunc scan_func = ctx.scan_func;
868 GrayQueue *queue = ctx.queue;
870 if (max_objs == -1) {
872 GRAY_OBJECT_DEQUEUE (queue, obj);
875 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
876 scan_func (obj, queue);
882 for (i = 0; i != max_objs; ++i) {
883 GRAY_OBJECT_DEQUEUE (queue, obj);
886 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
887 scan_func (obj, queue);
889 } while (max_objs < 0);
895 * Addresses from start to end are already sorted. This function finds
896 * the object header for each address and pins the object. The
897 * addresses must be inside the passed section. The (start of the)
898 * address array is overwritten with the addresses of the actually
899 * pinned objects. Return the number of pinned objects.
902 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
907 void *last_obj = NULL;
908 size_t last_obj_size = 0;
911 void **definitely_pinned = start;
912 ScanObjectFunc scan_func = ctx.scan_func;
913 SgenGrayQueue *queue = ctx.queue;
915 sgen_nursery_allocator_prepare_for_pinning ();
917 while (start < end) {
919 /* the range check should be reduntant */
920 if (addr != last && addr >= start_nursery && addr < end_nursery) {
921 SGEN_LOG (5, "Considering pinning addr %p", addr);
922 /* multiple pointers to the same object */
923 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
927 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
928 g_assert (idx < section->num_scan_start);
929 search_start = (void*)section->scan_starts [idx];
930 if (!search_start || search_start > addr) {
933 search_start = section->scan_starts [idx];
934 if (search_start && search_start <= addr)
937 if (!search_start || search_start > addr)
938 search_start = start_nursery;
940 if (search_start < last_obj)
941 search_start = (char*)last_obj + last_obj_size;
942 /* now addr should be in an object a short distance from search_start
943 * Note that search_start must point to zeroed mem or point to an object.
947 if (!*(void**)search_start) {
948 /* Consistency check */
950 for (frag = nursery_fragments; frag; frag = frag->next) {
951 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
952 g_assert_not_reached ();
956 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
959 last_obj = search_start;
960 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
962 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
963 /* Marks the beginning of a nursery fragment, skip */
965 SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
966 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
968 scan_func (search_start, queue);
970 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
971 search_start, *(void**)search_start, safe_name (search_start), count);
972 binary_protocol_pin (search_start,
973 (gpointer)LOAD_VTABLE (search_start),
974 safe_object_get_size (search_start));
977 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
978 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
979 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
980 MONO_GC_OBJ_PINNED ((mword)search_start,
981 sgen_safe_object_get_size (search_start),
982 vt->klass->name_space, vt->klass->name, gen);
986 pin_object (search_start);
987 GRAY_OBJECT_ENQUEUE (queue, search_start);
988 if (G_UNLIKELY (do_pin_stats))
989 sgen_pin_stats_register_object (search_start, last_obj_size);
990 definitely_pinned [count] = search_start;
996 /* skip to the next object */
997 search_start = (void*)((char*)search_start + last_obj_size);
998 } while (search_start <= addr);
999 /* we either pinned the correct object or we ignored the addr because
1000 * it points to unused zeroed memory.
1006 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1007 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1008 GCRootReport report;
1010 for (idx = 0; idx < count; ++idx)
1011 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1012 notify_gc_roots (&report);
1014 stat_pinned_objects += count;
1019 sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
1021 int num_entries = section->pin_queue_num_entries;
1023 void **start = section->pin_queue_start;
1025 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1026 section->data, section->next_data, ctx);
1027 section->pin_queue_num_entries = reduced_to;
1029 section->pin_queue_start = NULL;
1035 sgen_pin_object (void *object, GrayQueue *queue)
1037 g_assert (!concurrent_collection_in_progress);
1039 if (sgen_collection_is_parallel ()) {
1041 /*object arrives pinned*/
1042 sgen_pin_stage_ptr (object);
1046 SGEN_PIN_OBJECT (object);
1047 sgen_pin_stage_ptr (object);
1049 if (G_UNLIKELY (do_pin_stats))
1050 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1052 GRAY_OBJECT_ENQUEUE (queue, object);
1053 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1055 #ifdef ENABLE_DTRACE
1056 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1057 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1058 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1059 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1065 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1069 gboolean major_pinned = FALSE;
1071 if (sgen_ptr_in_nursery (obj)) {
1072 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1073 sgen_pin_object (obj, queue);
1077 major_collector.pin_major_object (obj, queue);
1078 major_pinned = TRUE;
1081 vtable_word = *(mword*)obj;
1082 /*someone else forwarded it, update the pointer and bail out*/
1083 if (vtable_word & SGEN_FORWARDED_BIT) {
1084 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1088 /*someone pinned it, nothing to do.*/
1089 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1094 /* Sort the addresses in array in increasing order.
1095 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1098 sgen_sort_addresses (void **array, int size)
1103 for (i = 1; i < size; ++i) {
1106 int parent = (child - 1) / 2;
1108 if (array [parent] >= array [child])
1111 tmp = array [parent];
1112 array [parent] = array [child];
1113 array [child] = tmp;
1119 for (i = size - 1; i > 0; --i) {
1122 array [i] = array [0];
1128 while (root * 2 + 1 <= end) {
1129 int child = root * 2 + 1;
1131 if (child < end && array [child] < array [child + 1])
1133 if (array [root] >= array [child])
1137 array [root] = array [child];
1138 array [child] = tmp;
1146 * Scan the memory between start and end and queue values which could be pointers
1147 * to the area between start_nursery and end_nursery for later consideration.
1148 * Typically used for thread stacks.
1151 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1155 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1156 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1159 while (start < end) {
1160 if (*start >= start_nursery && *start < end_nursery) {
1162 * *start can point to the middle of an object
1163 * note: should we handle pointing at the end of an object?
1164 * pinning in C# code disallows pointing at the end of an object
1165 * but there is some small chance that an optimizing C compiler
1166 * may keep the only reference to an object by pointing
1167 * at the end of it. We ignore this small chance for now.
1168 * Pointers to the end of an object are indistinguishable
1169 * from pointers to the start of the next object in memory
1170 * so if we allow that we'd need to pin two objects...
1171 * We queue the pointer in an array, the
1172 * array will then be sorted and uniqued. This way
1173 * we can coalesce several pinning pointers and it should
1174 * be faster since we'd do a memory scan with increasing
1175 * addresses. Note: we can align the address to the allocation
1176 * alignment, so the unique process is more effective.
1178 mword addr = (mword)*start;
1179 addr &= ~(ALLOC_ALIGN - 1);
1180 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1181 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1182 sgen_pin_stage_ptr ((void*)addr);
1185 if (G_UNLIKELY (do_pin_stats)) {
1186 if (ptr_in_nursery ((void*)addr))
1187 sgen_pin_stats_register_address ((char*)addr, pin_type);
1193 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1197 * The first thing we do in a collection is to identify pinned objects.
1198 * This function considers all the areas of memory that need to be
1199 * conservatively scanned.
1202 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1206 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1207 /* objects pinned from the API are inside these roots */
1208 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1209 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1210 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1211 } SGEN_HASH_TABLE_FOREACH_END;
1212 /* now deal with the thread stacks
1213 * in the future we should be able to conservatively scan only:
1214 * *) the cpu registers
1215 * *) the unmanaged stack frames
1216 * *) the _last_ managed stack frame
1217 * *) pointers slots in managed frames
1219 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1223 unpin_objects_from_queue (SgenGrayQueue *queue)
1227 GRAY_OBJECT_DEQUEUE (queue, addr);
1230 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1231 SGEN_UNPIN_OBJECT (addr);
1236 CopyOrMarkObjectFunc func;
1238 } UserCopyOrMarkData;
1240 static MonoNativeTlsKey user_copy_or_mark_key;
1243 init_user_copy_or_mark_key (void)
1245 mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1249 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1251 mono_native_tls_set_value (user_copy_or_mark_key, data);
1255 single_arg_user_copy_or_mark (void **obj)
1257 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1259 data->func (obj, data->queue);
1263 * The memory area from start_root to end_root contains pointers to objects.
1264 * Their position is precisely described by @desc (this means that the pointer
1265 * can be either NULL or the pointer to the start of an object).
1266 * This functions copies them to to_space updates them.
1268 * This function is not thread-safe!
1271 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1273 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1274 SgenGrayQueue *queue = ctx.queue;
1276 switch (desc & ROOT_DESC_TYPE_MASK) {
1277 case ROOT_DESC_BITMAP:
1278 desc >>= ROOT_DESC_TYPE_SHIFT;
1280 if ((desc & 1) && *start_root) {
1281 copy_func (start_root, queue);
1282 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1283 sgen_drain_gray_stack (-1, ctx);
1289 case ROOT_DESC_COMPLEX: {
1290 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1291 int bwords = (*bitmap_data) - 1;
1292 void **start_run = start_root;
1294 while (bwords-- > 0) {
1295 gsize bmap = *bitmap_data++;
1296 void **objptr = start_run;
1298 if ((bmap & 1) && *objptr) {
1299 copy_func (objptr, queue);
1300 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1301 sgen_drain_gray_stack (-1, ctx);
1306 start_run += GC_BITS_PER_WORD;
1310 case ROOT_DESC_USER: {
1311 UserCopyOrMarkData data = { copy_func, queue };
1312 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1313 set_user_copy_or_mark_data (&data);
1314 marker (start_root, single_arg_user_copy_or_mark);
1315 set_user_copy_or_mark_data (NULL);
1318 case ROOT_DESC_RUN_LEN:
1319 g_assert_not_reached ();
1321 g_assert_not_reached ();
1326 reset_heap_boundaries (void)
1328 lowest_heap_address = ~(mword)0;
1329 highest_heap_address = 0;
1333 sgen_update_heap_boundaries (mword low, mword high)
1338 old = lowest_heap_address;
1341 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1344 old = highest_heap_address;
1347 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1351 * Allocate and setup the data structures needed to be able to allocate objects
1352 * in the nursery. The nursery is stored in nursery_section.
1355 alloc_nursery (void)
1357 GCMemSection *section;
1362 if (nursery_section)
1364 SGEN_LOG (2, "Allocating nursery size: %lu", (size_t)sgen_nursery_size);
1365 /* later we will alloc a larger area for the nursery but only activate
1366 * what we need. The rest will be used as expansion if we have too many pinned
1367 * objects in the existing nursery.
1369 /* FIXME: handle OOM */
1370 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1372 alloc_size = sgen_nursery_size;
1374 /* If there isn't enough space even for the nursery we should simply abort. */
1375 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1377 #ifdef SGEN_ALIGN_NURSERY
1378 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1380 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1382 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1383 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1384 section->data = section->next_data = data;
1385 section->size = alloc_size;
1386 section->end_data = data + sgen_nursery_size;
1387 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1388 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1389 section->num_scan_start = scan_starts;
1391 nursery_section = section;
1393 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1397 mono_gc_get_nursery (int *shift_bits, size_t *size)
1399 *size = sgen_nursery_size;
1400 #ifdef SGEN_ALIGN_NURSERY
1401 *shift_bits = DEFAULT_NURSERY_BITS;
1405 return sgen_get_nursery_start ();
1409 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1411 SgenThreadInfo *info = mono_thread_info_current ();
1413 /* Could be called from sgen_thread_unregister () with a NULL info */
1416 info->stopped_domain = domain;
1421 mono_gc_precise_stack_mark_enabled (void)
1423 return !conservative_stack_mark;
1427 mono_gc_get_logfile (void)
1429 return gc_debug_file;
1433 report_finalizer_roots_list (FinalizeReadyEntry *list)
1435 GCRootReport report;
1436 FinalizeReadyEntry *fin;
1439 for (fin = list; fin; fin = fin->next) {
1442 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1444 notify_gc_roots (&report);
1448 report_finalizer_roots (void)
1450 report_finalizer_roots_list (fin_ready_list);
1451 report_finalizer_roots_list (critical_fin_list);
1454 static GCRootReport *root_report;
1457 single_arg_report_root (void **obj)
1460 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1464 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1466 switch (desc & ROOT_DESC_TYPE_MASK) {
1467 case ROOT_DESC_BITMAP:
1468 desc >>= ROOT_DESC_TYPE_SHIFT;
1470 if ((desc & 1) && *start_root) {
1471 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1477 case ROOT_DESC_COMPLEX: {
1478 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1479 int bwords = (*bitmap_data) - 1;
1480 void **start_run = start_root;
1482 while (bwords-- > 0) {
1483 gsize bmap = *bitmap_data++;
1484 void **objptr = start_run;
1486 if ((bmap & 1) && *objptr) {
1487 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1492 start_run += GC_BITS_PER_WORD;
1496 case ROOT_DESC_USER: {
1497 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1498 root_report = report;
1499 marker (start_root, single_arg_report_root);
1502 case ROOT_DESC_RUN_LEN:
1503 g_assert_not_reached ();
1505 g_assert_not_reached ();
1510 report_registered_roots_by_type (int root_type)
1512 GCRootReport report;
1516 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1517 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1518 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1519 } SGEN_HASH_TABLE_FOREACH_END;
1520 notify_gc_roots (&report);
1524 report_registered_roots (void)
1526 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1527 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1531 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1533 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1534 SgenGrayQueue *queue = ctx.queue;
1535 FinalizeReadyEntry *fin;
1537 for (fin = list; fin; fin = fin->next) {
1540 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1541 copy_func (&fin->object, queue);
1546 generation_name (int generation)
1548 switch (generation) {
1549 case GENERATION_NURSERY: return "nursery";
1550 case GENERATION_OLD: return "old";
1551 default: g_assert_not_reached ();
1556 sgen_generation_name (int generation)
1558 return generation_name (generation);
1561 SgenObjectOperations *
1562 sgen_get_current_object_ops (void){
1563 return ¤t_object_ops;
1568 finish_gray_stack (int generation, GrayQueue *queue)
1572 int done_with_ephemerons, ephemeron_rounds = 0;
1573 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1574 ScanObjectFunc scan_func = current_object_ops.scan_object;
1575 ScanCopyContext ctx = { scan_func, copy_func, queue };
1576 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1577 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1580 * We copied all the reachable objects. Now it's the time to copy
1581 * the objects that were not referenced by the roots, but by the copied objects.
1582 * we built a stack of objects pointed to by gray_start: they are
1583 * additional roots and we may add more items as we go.
1584 * We loop until gray_start == gray_objects which means no more objects have
1585 * been added. Note this is iterative: no recursion is involved.
1586 * We need to walk the LO list as well in search of marked big objects
1587 * (use a flag since this is needed only on major collections). We need to loop
1588 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1589 * To achieve better cache locality and cache usage, we drain the gray stack
1590 * frequently, after each object is copied, and just finish the work here.
1592 sgen_drain_gray_stack (-1, ctx);
1594 SGEN_LOG (2, "%s generation done", generation_name (generation));
1597 Reset bridge data, we might have lingering data from a previous collection if this is a major
1598 collection trigged by minor overflow.
1600 We must reset the gathered bridges since their original block might be evacuated due to major
1601 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1603 if (sgen_need_bridge_processing ())
1604 sgen_bridge_reset_data ();
1607 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1608 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1609 * objects that are in fact reachable.
1611 done_with_ephemerons = 0;
1613 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1614 sgen_drain_gray_stack (-1, ctx);
1616 } while (!done_with_ephemerons);
1618 sgen_mark_togglerefs (start_addr, end_addr, ctx);
1620 if (sgen_need_bridge_processing ()) {
1621 /*Make sure the gray stack is empty before we process bridge objects so we get liveness right*/
1622 sgen_drain_gray_stack (-1, ctx);
1623 sgen_collect_bridge_objects (generation, ctx);
1624 if (generation == GENERATION_OLD)
1625 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1628 Do the first bridge step here, as the collector liveness state will become useless after that.
1630 An important optimization is to only proccess the possibly dead part of the object graph and skip
1631 over all live objects as we transitively know everything they point must be alive too.
1633 The above invariant is completely wrong if we let the gray queue be drained and mark/copy everything.
1635 This has the unfortunate side effect of making overflow collections perform the first step twice, but
1636 given we now have heuristics that perform major GC in anticipation of minor overflows this should not
1639 sgen_bridge_processing_stw_step ();
1643 Make sure we drain the gray stack before processing disappearing links and finalizers.
1644 If we don't make sure it is empty we might wrongly see a live object as dead.
1646 sgen_drain_gray_stack (-1, ctx);
1649 We must clear weak links that don't track resurrection before processing object ready for
1650 finalization so they can be cleared before that.
1652 sgen_null_link_in_range (generation, TRUE, ctx);
1653 if (generation == GENERATION_OLD)
1654 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1657 /* walk the finalization queue and move also the objects that need to be
1658 * finalized: use the finalized objects as new roots so the objects they depend
1659 * on are also not reclaimed. As with the roots above, only objects in the nursery
1660 * are marked/copied.
1662 sgen_finalize_in_range (generation, ctx);
1663 if (generation == GENERATION_OLD)
1664 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1665 /* drain the new stack that might have been created */
1666 SGEN_LOG (6, "Precise scan of gray area post fin");
1667 sgen_drain_gray_stack (-1, ctx);
1670 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1672 done_with_ephemerons = 0;
1674 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1675 sgen_drain_gray_stack (-1, ctx);
1677 } while (!done_with_ephemerons);
1680 * Clear ephemeron pairs with unreachable keys.
1681 * We pass the copy func so we can figure out if an array was promoted or not.
1683 clear_unreachable_ephemerons (ctx);
1686 * We clear togglerefs only after all possible chances of revival are done.
1687 * This is semantically more inline with what users expect and it allows for
1688 * user finalizers to correctly interact with TR objects.
1690 sgen_clear_togglerefs (start_addr, end_addr, ctx);
1693 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1696 * handle disappearing links
1697 * Note we do this after checking the finalization queue because if an object
1698 * survives (at least long enough to be finalized) we don't clear the link.
1699 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1700 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1703 g_assert (sgen_gray_object_queue_is_empty (queue));
1705 sgen_null_link_in_range (generation, FALSE, ctx);
1706 if (generation == GENERATION_OLD)
1707 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1708 if (sgen_gray_object_queue_is_empty (queue))
1710 sgen_drain_gray_stack (-1, ctx);
1713 g_assert (sgen_gray_object_queue_is_empty (queue));
1717 sgen_check_section_scan_starts (GCMemSection *section)
1720 for (i = 0; i < section->num_scan_start; ++i) {
1721 if (section->scan_starts [i]) {
1722 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
1723 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
1729 check_scan_starts (void)
1731 if (!do_scan_starts_check)
1733 sgen_check_section_scan_starts (nursery_section);
1734 major_collector.check_scan_starts ();
1738 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
1742 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1743 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1744 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
1745 } SGEN_HASH_TABLE_FOREACH_END;
1749 sgen_dump_occupied (char *start, char *end, char *section_start)
1751 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
1755 sgen_dump_section (GCMemSection *section, const char *type)
1757 char *start = section->data;
1758 char *end = section->data + section->size;
1759 char *occ_start = NULL;
1761 char *old_start = NULL; /* just for debugging */
1763 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
1765 while (start < end) {
1769 if (!*(void**)start) {
1771 sgen_dump_occupied (occ_start, start, section->data);
1774 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
1777 g_assert (start < section->next_data);
1782 vt = (GCVTable*)LOAD_VTABLE (start);
1785 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
1788 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
1789 start - section->data,
1790 vt->klass->name_space, vt->klass->name,
1798 sgen_dump_occupied (occ_start, start, section->data);
1800 fprintf (heap_dump_file, "</section>\n");
1804 dump_object (MonoObject *obj, gboolean dump_location)
1806 static char class_name [1024];
1808 MonoClass *class = mono_object_class (obj);
1812 * Python's XML parser is too stupid to parse angle brackets
1813 * in strings, so we just ignore them;
1816 while (class->name [i] && j < sizeof (class_name) - 1) {
1817 if (!strchr ("<>\"", class->name [i]))
1818 class_name [j++] = class->name [i];
1821 g_assert (j < sizeof (class_name));
1824 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
1825 class->name_space, class_name,
1826 safe_object_get_size (obj));
1827 if (dump_location) {
1828 const char *location;
1829 if (ptr_in_nursery (obj))
1830 location = "nursery";
1831 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
1835 fprintf (heap_dump_file, " location=\"%s\"", location);
1837 fprintf (heap_dump_file, "/>\n");
1841 dump_heap (const char *type, int num, const char *reason)
1846 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
1848 fprintf (heap_dump_file, " reason=\"%s\"", reason);
1849 fprintf (heap_dump_file, ">\n");
1850 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
1851 sgen_dump_internal_mem_usage (heap_dump_file);
1852 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
1853 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
1854 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
1856 fprintf (heap_dump_file, "<pinned-objects>\n");
1857 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
1858 dump_object (list->obj, TRUE);
1859 fprintf (heap_dump_file, "</pinned-objects>\n");
1861 sgen_dump_section (nursery_section, "nursery");
1863 major_collector.dump_heap (heap_dump_file);
1865 fprintf (heap_dump_file, "<los>\n");
1866 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1867 dump_object ((MonoObject*)bigobj->data, FALSE);
1868 fprintf (heap_dump_file, "</los>\n");
1870 fprintf (heap_dump_file, "</collection>\n");
1874 sgen_register_moved_object (void *obj, void *destination)
1876 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
1878 /* FIXME: handle this for parallel collector */
1879 g_assert (!sgen_collection_is_parallel ());
1881 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1882 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
1883 moved_objects_idx = 0;
1885 moved_objects [moved_objects_idx++] = obj;
1886 moved_objects [moved_objects_idx++] = destination;
1892 static gboolean inited = FALSE;
1897 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_pre_collection_fragment_clear);
1898 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_pinning);
1899 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_remsets);
1900 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_pinned);
1901 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_registered_roots);
1902 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_scan_thread_data);
1903 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_finish_gray_stack);
1904 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_minor_fragment_creation);
1906 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_pre_collection_fragment_clear);
1907 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_pinning);
1908 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_pinned);
1909 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_registered_roots);
1910 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_thread_data);
1911 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_alloc_pinned);
1912 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_finalized);
1913 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_scan_big_objects);
1914 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_finish_gray_stack);
1915 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_free_bigobjs);
1916 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_los_sweep);
1917 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_sweep);
1918 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &time_major_fragment_creation);
1920 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
1922 #ifdef HEAVY_STATISTICS
1923 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
1924 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
1925 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
1926 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
1927 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
1928 mono_counters_register ("WBarrier generic atomic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_atomic);
1929 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
1930 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
1931 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
1933 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
1934 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
1936 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
1937 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
1938 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
1939 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
1941 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
1942 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
1944 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
1946 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
1947 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
1948 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
1949 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
1951 sgen_nursery_allocator_init_heavy_stats ();
1952 sgen_alloc_init_heavy_stats ();
1960 reset_pinned_from_failed_allocation (void)
1962 bytes_pinned_from_failed_allocation = 0;
1966 sgen_set_pinned_from_failed_allocation (mword objsize)
1968 bytes_pinned_from_failed_allocation += objsize;
1972 sgen_collection_is_parallel (void)
1974 switch (current_collection_generation) {
1975 case GENERATION_NURSERY:
1976 return nursery_collection_is_parallel;
1977 case GENERATION_OLD:
1978 return major_collector.is_parallel;
1980 g_error ("Invalid current generation %d", current_collection_generation);
1985 sgen_collection_is_concurrent (void)
1987 switch (current_collection_generation) {
1988 case GENERATION_NURSERY:
1990 case GENERATION_OLD:
1991 return concurrent_collection_in_progress;
1993 g_error ("Invalid current generation %d", current_collection_generation);
1998 sgen_concurrent_collection_in_progress (void)
2000 return concurrent_collection_in_progress;
2007 } FinishRememberedSetScanJobData;
2010 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2012 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2014 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2015 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2020 CopyOrMarkObjectFunc copy_or_mark_func;
2021 ScanObjectFunc scan_func;
2025 } ScanFromRegisteredRootsJobData;
2028 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2030 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2031 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2032 sgen_workers_get_job_gray_queue (worker_data) };
2034 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2035 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2042 } ScanThreadDataJobData;
2045 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2047 ScanThreadDataJobData *job_data = job_data_untyped;
2049 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2050 sgen_workers_get_job_gray_queue (worker_data));
2051 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2056 FinalizeReadyEntry *list;
2057 } ScanFinalizerEntriesJobData;
2060 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2062 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2063 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2065 scan_finalizer_entries (job_data->list, ctx);
2066 sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2070 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2072 g_assert (concurrent_collection_in_progress);
2073 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2077 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2079 g_assert (concurrent_collection_in_progress);
2080 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2084 verify_scan_starts (char *start, char *end)
2088 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2089 char *addr = nursery_section->scan_starts [i];
2090 if (addr > start && addr < end)
2091 SGEN_LOG (1, "NFC-BAD SCAN START [%ld] %p for obj [%p %p]", i, addr, start, end);
2096 verify_nursery (void)
2098 char *start, *end, *cur, *hole_start;
2100 if (!do_verify_nursery)
2103 /*This cleans up unused fragments */
2104 sgen_nursery_allocator_prepare_for_pinning ();
2106 hole_start = start = cur = sgen_get_nursery_start ();
2107 end = sgen_get_nursery_end ();
2112 if (!*(void**)cur) {
2113 cur += sizeof (void*);
2117 if (object_is_forwarded (cur))
2118 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2119 else if (object_is_pinned (cur))
2120 SGEN_LOG (1, "PINNED OBJ %p", cur);
2122 ss = safe_object_get_size ((MonoObject*)cur);
2123 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2124 verify_scan_starts (cur, cur + size);
2125 if (do_dump_nursery_content) {
2126 if (cur > hole_start)
2127 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2128 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2136 * Checks that no objects in the nursery are fowarded or pinned. This
2137 * is a precondition to restarting the mutator while doing a
2138 * concurrent collection. Note that we don't clear fragments because
2139 * we depend on that having happened earlier.
2142 check_nursery_is_clean (void)
2144 char *start, *end, *cur;
2146 start = cur = sgen_get_nursery_start ();
2147 end = sgen_get_nursery_end ();
2152 if (!*(void**)cur) {
2153 cur += sizeof (void*);
2157 g_assert (!object_is_forwarded (cur));
2158 g_assert (!object_is_pinned (cur));
2160 ss = safe_object_get_size ((MonoObject*)cur);
2161 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2162 verify_scan_starts (cur, cur + size);
2169 init_gray_queue (void)
2171 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2172 sgen_workers_init_distribute_gray_queue ();
2173 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2174 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2176 sgen_gray_object_queue_init (&gray_queue, NULL);
2181 pin_stage_object_callback (char *obj, size_t size, void *data)
2183 sgen_pin_stage_ptr (obj);
2184 /* FIXME: do pin stats if enabled */
2188 * Collect objects in the nursery. Returns whether to trigger a major
2192 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2194 gboolean needs_major;
2195 size_t max_garbage_amount;
2197 FinishRememberedSetScanJobData *frssjd;
2198 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2199 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2200 ScanThreadDataJobData *stdjd;
2201 mword fragment_total;
2202 ScanCopyContext ctx;
2203 TV_DECLARE (all_atv);
2204 TV_DECLARE (all_btv);
2208 if (disable_minor_collections)
2211 MONO_GC_BEGIN (GENERATION_NURSERY);
2212 binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
2216 #ifndef DISABLE_PERFCOUNTERS
2217 mono_perfcounters->gc_collections0++;
2220 current_collection_generation = GENERATION_NURSERY;
2221 if (sgen_collection_is_parallel ())
2222 current_object_ops = sgen_minor_collector.parallel_ops;
2224 current_object_ops = sgen_minor_collector.serial_ops;
2226 reset_pinned_from_failed_allocation ();
2228 check_scan_starts ();
2230 sgen_nursery_alloc_prepare_for_minor ();
2234 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2235 /* FIXME: optimize later to use the higher address where an object can be present */
2236 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2238 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2239 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2240 g_assert (nursery_section->size >= max_garbage_amount);
2242 /* world must be stopped already */
2243 TV_GETTIME (all_atv);
2247 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2249 if (xdomain_checks) {
2250 sgen_clear_nursery_fragments ();
2251 sgen_check_for_xdomain_refs ();
2254 nursery_section->next_data = nursery_next;
2256 major_collector.start_nursery_collection ();
2258 sgen_memgov_minor_collection_start ();
2263 gc_stats.minor_gc_count ++;
2265 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2267 sgen_process_fin_stage_entries ();
2268 sgen_process_dislink_stage_entries ();
2270 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2272 /* pin from pinned handles */
2273 sgen_init_pinning ();
2274 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2275 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2276 /* pin cemented objects */
2277 sgen_cement_iterate (pin_stage_object_callback, NULL);
2278 /* identify pinned objects */
2279 sgen_optimize_pin_queue (0);
2280 sgen_pinning_setup_section (nursery_section);
2281 ctx.scan_func = NULL;
2282 ctx.copy_func = NULL;
2283 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2284 sgen_pin_objects_in_section (nursery_section, ctx);
2285 sgen_pinning_trim_queue_to_section (nursery_section);
2288 time_minor_pinning += TV_ELAPSED (btv, atv);
2289 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2290 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2292 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2294 if (whole_heap_check_before_collection) {
2295 sgen_clear_nursery_fragments ();
2296 sgen_check_whole_heap (finish_up_concurrent_mark);
2298 if (consistency_check_at_minor_collection)
2299 sgen_check_consistency ();
2301 sgen_workers_start_all_workers ();
2302 sgen_workers_start_marking ();
2304 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2305 frssjd->heap_start = sgen_get_nursery_start ();
2306 frssjd->heap_end = nursery_next;
2307 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2309 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2311 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2312 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2314 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2316 if (!sgen_collection_is_parallel ()) {
2317 ctx.scan_func = current_object_ops.scan_object;
2318 ctx.copy_func = NULL;
2319 ctx.queue = &gray_queue;
2320 sgen_drain_gray_stack (-1, ctx);
2323 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2324 report_registered_roots ();
2325 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2326 report_finalizer_roots ();
2328 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2330 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2332 /* registered roots, this includes static fields */
2333 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2334 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2335 scrrjd_normal->scan_func = current_object_ops.scan_object;
2336 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2337 scrrjd_normal->heap_end = nursery_next;
2338 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2339 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2341 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2342 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2343 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2344 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2345 scrrjd_wbarrier->heap_end = nursery_next;
2346 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2347 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2350 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2352 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2355 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2356 stdjd->heap_start = sgen_get_nursery_start ();
2357 stdjd->heap_end = nursery_next;
2358 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2361 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2364 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2366 g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2368 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2369 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2371 /* Scan the list of objects ready for finalization. If */
2372 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2373 sfejd_fin_ready->list = fin_ready_list;
2374 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2376 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2377 sfejd_critical_fin->list = critical_fin_list;
2378 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2380 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2382 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2384 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2385 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2387 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2390 * The (single-threaded) finalization code might have done
2391 * some copying/marking so we can only reset the GC thread's
2392 * worker data here instead of earlier when we joined the
2395 sgen_workers_reset_data ();
2397 if (objects_pinned) {
2398 sgen_optimize_pin_queue (0);
2399 sgen_pinning_setup_section (nursery_section);
2402 /* walk the pin_queue, build up the fragment list of free memory, unmark
2403 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2406 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2407 fragment_total = sgen_build_nursery_fragments (nursery_section,
2408 nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2410 if (!fragment_total)
2413 /* Clear TLABs for all threads */
2414 sgen_clear_tlabs ();
2416 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2418 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2419 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2421 if (consistency_check_at_minor_collection)
2422 sgen_check_major_refs ();
2424 major_collector.finish_nursery_collection ();
2426 TV_GETTIME (all_btv);
2427 gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2430 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2432 /* prepare the pin queue for the next collection */
2433 sgen_finish_pinning ();
2434 if (fin_ready_list || critical_fin_list) {
2435 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2436 mono_gc_finalize_notify ();
2438 sgen_pin_stats_reset ();
2439 /* clear cemented hash */
2440 sgen_cement_clear_below_threshold ();
2442 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2444 remset.finish_minor_collection ();
2446 check_scan_starts ();
2448 binary_protocol_flush_buffers (FALSE);
2450 sgen_memgov_minor_collection_end ();
2452 /*objects are late pinned because of lack of memory, so a major is a good call*/
2453 needs_major = objects_pinned > 0;
2454 current_collection_generation = -1;
2457 MONO_GC_END (GENERATION_NURSERY);
2458 binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
2460 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2461 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2467 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2469 ctx->scan_func (obj, ctx->queue);
2473 scan_nursery_objects (ScanCopyContext ctx)
2475 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2476 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2480 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2485 /* FIXME: only use these values for the precise scan
2486 * note that to_space pointers should be excluded anyway...
2488 char *heap_start = NULL;
2489 char *heap_end = (char*)-1;
2490 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2491 GCRootReport root_report = { 0 };
2492 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2493 ScanThreadDataJobData *stdjd;
2494 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2495 ScanCopyContext ctx;
2497 if (concurrent_collection_in_progress) {
2498 /*This cleans up unused fragments */
2499 sgen_nursery_allocator_prepare_for_pinning ();
2501 if (do_concurrent_checks)
2502 check_nursery_is_clean ();
2504 /* The concurrent collector doesn't touch the nursery. */
2505 sgen_nursery_alloc_prepare_for_major ();
2512 /* Pinning depends on this */
2513 sgen_clear_nursery_fragments ();
2515 if (whole_heap_check_before_collection)
2516 sgen_check_whole_heap (finish_up_concurrent_mark);
2519 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2521 if (!sgen_collection_is_concurrent ())
2522 nursery_section->next_data = sgen_get_nursery_end ();
2523 /* we should also coalesce scanning from sections close to each other
2524 * and deal with pointers outside of the sections later.
2528 *major_collector.have_swept = FALSE;
2530 if (xdomain_checks) {
2531 sgen_clear_nursery_fragments ();
2532 sgen_check_for_xdomain_refs ();
2535 if (!concurrent_collection_in_progress) {
2536 /* Remsets are not useful for a major collection */
2537 remset.prepare_for_major_collection ();
2540 sgen_process_fin_stage_entries ();
2541 sgen_process_dislink_stage_entries ();
2544 sgen_init_pinning ();
2545 SGEN_LOG (6, "Collecting pinned addresses");
2546 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2548 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2549 if (major_collector.is_concurrent) {
2551 * The concurrent major collector cannot evict
2552 * yet, so we need to pin cemented objects to
2553 * not break some asserts.
2555 * FIXME: We could evict now!
2557 sgen_cement_iterate (pin_stage_object_callback, NULL);
2560 if (!concurrent_collection_in_progress)
2561 sgen_cement_reset ();
2564 sgen_optimize_pin_queue (0);
2567 * The concurrent collector doesn't move objects, neither on
2568 * the major heap nor in the nursery, so we can mark even
2569 * before pinning has finished. For the non-concurrent
2570 * collector we start the workers after pinning.
2572 if (concurrent_collection_in_progress) {
2573 sgen_workers_start_all_workers ();
2574 sgen_workers_start_marking ();
2578 * pin_queue now contains all candidate pointers, sorted and
2579 * uniqued. We must do two passes now to figure out which
2580 * objects are pinned.
2582 * The first is to find within the pin_queue the area for each
2583 * section. This requires that the pin_queue be sorted. We
2584 * also process the LOS objects and pinned chunks here.
2586 * The second, destructive, pass is to reduce the section
2587 * areas to pointers to the actually pinned objects.
2589 SGEN_LOG (6, "Pinning from sections");
2590 /* first pass for the sections */
2591 sgen_find_section_pin_queue_start_end (nursery_section);
2592 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2593 /* identify possible pointers to the insize of large objects */
2594 SGEN_LOG (6, "Pinning from large objects");
2595 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2597 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2598 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2600 #ifdef ENABLE_DTRACE
2601 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2602 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2603 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2607 if (sgen_los_object_is_pinned (bigobj->data)) {
2608 g_assert (finish_up_concurrent_mark);
2611 sgen_los_pin_object (bigobj->data);
2612 if (SGEN_OBJECT_HAS_REFERENCES (bigobj->data))
2613 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2614 if (G_UNLIKELY (do_pin_stats))
2615 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2616 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2619 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2623 notify_gc_roots (&root_report);
2624 /* second pass for the sections */
2625 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2626 ctx.copy_func = NULL;
2627 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2630 * Concurrent mark never follows references into the nursery.
2631 * In the start and finish pauses we must scan live nursery
2632 * objects, though. We could simply scan all nursery objects,
2633 * but that would be conservative. The easiest way is to do a
2634 * nursery collection, which copies all live nursery objects
2635 * (except pinned ones, with the simple nursery) to the major
2636 * heap. Scanning the mod union table later will then scan
2637 * those promoted objects, provided they're reachable. Pinned
2638 * objects in the nursery - which we can trivially find in the
2639 * pinning queue - are treated as roots in the mark pauses.
2641 * The split nursery complicates the latter part because
2642 * non-pinned objects can survive in the nursery. That's why
2643 * we need to do a full front-to-back scan of the nursery,
2644 * marking all objects.
2646 * Non-concurrent mark evacuates from the nursery, so it's
2647 * sufficient to just scan pinned nursery objects.
2649 if (concurrent_collection_in_progress && sgen_minor_collector.is_split) {
2650 scan_nursery_objects (ctx);
2652 sgen_pin_objects_in_section (nursery_section, ctx);
2653 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2654 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2657 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2658 if (old_next_pin_slot)
2659 *old_next_pin_slot = sgen_get_pinned_count ();
2662 time_major_pinning += TV_ELAPSED (atv, btv);
2663 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2664 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2666 major_collector.init_to_space ();
2668 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2669 main_gc_thread = mono_native_thread_self ();
2672 if (!concurrent_collection_in_progress && major_collector.is_parallel) {
2673 sgen_workers_start_all_workers ();
2674 sgen_workers_start_marking ();
2677 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2678 report_registered_roots ();
2680 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2682 /* registered roots, this includes static fields */
2683 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2684 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2685 scrrjd_normal->scan_func = current_object_ops.scan_object;
2686 scrrjd_normal->heap_start = heap_start;
2687 scrrjd_normal->heap_end = heap_end;
2688 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2689 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2691 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2692 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2693 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2694 scrrjd_wbarrier->heap_start = heap_start;
2695 scrrjd_wbarrier->heap_end = heap_end;
2696 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2697 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2700 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2703 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2704 stdjd->heap_start = heap_start;
2705 stdjd->heap_end = heap_end;
2706 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2709 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2712 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2714 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2715 report_finalizer_roots ();
2717 /* scan the list of objects ready for finalization */
2718 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2719 sfejd_fin_ready->list = fin_ready_list;
2720 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2722 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2723 sfejd_critical_fin->list = critical_fin_list;
2724 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2726 if (scan_mod_union) {
2727 g_assert (finish_up_concurrent_mark);
2729 /* Mod union card table */
2730 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
2731 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
2735 time_major_scan_finalized += TV_ELAPSED (btv, atv);
2736 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
2739 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
2741 if (concurrent_collection_in_progress) {
2742 /* prepare the pin queue for the next collection */
2743 sgen_finish_pinning ();
2745 sgen_pin_stats_reset ();
2747 if (do_concurrent_checks)
2748 check_nursery_is_clean ();
2753 major_start_collection (gboolean concurrent, int *old_next_pin_slot)
2755 MONO_GC_BEGIN (GENERATION_OLD);
2756 binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
2758 current_collection_generation = GENERATION_OLD;
2759 #ifndef DISABLE_PERFCOUNTERS
2760 mono_perfcounters->gc_collections1++;
2763 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2766 g_assert (major_collector.is_concurrent);
2767 concurrent_collection_in_progress = TRUE;
2769 sgen_cement_concurrent_start ();
2771 current_object_ops = major_collector.major_concurrent_ops;
2773 current_object_ops = major_collector.major_ops;
2776 reset_pinned_from_failed_allocation ();
2778 sgen_memgov_major_collection_start ();
2780 //count_ref_nonref_objs ();
2781 //consistency_check ();
2783 check_scan_starts ();
2786 SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
2788 gc_stats.major_gc_count ++;
2790 if (major_collector.start_major_collection)
2791 major_collector.start_major_collection ();
2793 major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
2797 wait_for_workers_to_finish (void)
2799 while (!sgen_workers_all_done ())
2806 if (concurrent_collection_in_progress || major_collector.is_parallel) {
2807 gray_queue_redirect (&gray_queue);
2808 sgen_workers_join ();
2811 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2813 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2814 main_gc_thread = NULL;
2819 major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
2821 LOSObject *bigobj, *prevbo;
2827 if (concurrent_collection_in_progress || major_collector.is_parallel)
2830 if (concurrent_collection_in_progress) {
2831 current_object_ops = major_collector.major_concurrent_ops;
2833 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
2836 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2838 if (do_concurrent_checks)
2839 check_nursery_is_clean ();
2841 current_object_ops = major_collector.major_ops;
2845 * The workers have stopped so we need to finish gray queue
2846 * work that might result from finalization in the main GC
2847 * thread. Redirection must therefore be turned off.
2849 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
2850 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2852 /* all the objects in the heap */
2853 finish_gray_stack (GENERATION_OLD, &gray_queue);
2855 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
2858 * The (single-threaded) finalization code might have done
2859 * some copying/marking so we can only reset the GC thread's
2860 * worker data here instead of earlier when we joined the
2863 sgen_workers_reset_data ();
2865 if (objects_pinned) {
2866 g_assert (!concurrent_collection_in_progress);
2868 /*This is slow, but we just OOM'd*/
2869 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
2870 sgen_optimize_pin_queue (0);
2871 sgen_find_section_pin_queue_start_end (nursery_section);
2875 reset_heap_boundaries ();
2876 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
2878 if (check_mark_bits_after_major_collection)
2879 sgen_check_major_heap_marked ();
2881 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
2883 /* sweep the big objects list */
2885 for (bigobj = los_object_list; bigobj;) {
2886 g_assert (!object_is_pinned (bigobj->data));
2887 if (sgen_los_object_is_pinned (bigobj->data)) {
2888 sgen_los_unpin_object (bigobj->data);
2889 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
2892 /* not referenced anywhere, so we can free it */
2894 prevbo->next = bigobj->next;
2896 los_object_list = bigobj->next;
2898 bigobj = bigobj->next;
2899 sgen_los_free_object (to_free);
2903 bigobj = bigobj->next;
2907 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
2912 time_major_los_sweep += TV_ELAPSED (btv, atv);
2914 major_collector.sweep ();
2916 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
2919 time_major_sweep += TV_ELAPSED (atv, btv);
2921 if (!concurrent_collection_in_progress) {
2922 /* walk the pin_queue, build up the fragment list of free memory, unmark
2923 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2926 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
2929 /* prepare the pin queue for the next collection */
2930 sgen_finish_pinning ();
2932 /* Clear TLABs for all threads */
2933 sgen_clear_tlabs ();
2935 sgen_pin_stats_reset ();
2938 if (concurrent_collection_in_progress)
2939 sgen_cement_concurrent_finish ();
2940 sgen_cement_clear_below_threshold ();
2943 time_major_fragment_creation += TV_ELAPSED (btv, atv);
2946 dump_heap ("major", stat_major_gcs - 1, reason);
2948 if (fin_ready_list || critical_fin_list) {
2949 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2950 mono_gc_finalize_notify ();
2953 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2955 sgen_memgov_major_collection_end ();
2956 current_collection_generation = -1;
2958 major_collector.finish_major_collection ();
2960 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
2962 if (concurrent_collection_in_progress)
2963 concurrent_collection_in_progress = FALSE;
2965 check_scan_starts ();
2967 binary_protocol_flush_buffers (FALSE);
2969 //consistency_check ();
2971 MONO_GC_END (GENERATION_OLD);
2972 binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
2976 major_do_collection (const char *reason)
2978 TV_DECLARE (all_atv);
2979 TV_DECLARE (all_btv);
2980 int old_next_pin_slot;
2982 if (disable_major_collections)
2985 if (major_collector.get_and_reset_num_major_objects_marked) {
2986 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
2987 g_assert (!num_marked);
2990 /* world must be stopped already */
2991 TV_GETTIME (all_atv);
2993 major_start_collection (FALSE, &old_next_pin_slot);
2994 major_finish_collection (reason, old_next_pin_slot, FALSE);
2996 TV_GETTIME (all_btv);
2997 gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2999 /* FIXME: also report this to the user, preferably in gc-end. */
3000 if (major_collector.get_and_reset_num_major_objects_marked)
3001 major_collector.get_and_reset_num_major_objects_marked ();
3003 return bytes_pinned_from_failed_allocation > 0;
3007 major_start_concurrent_collection (const char *reason)
3009 long long num_objects_marked;
3011 if (disable_major_collections)
3014 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3015 g_assert (num_objects_marked == 0);
3017 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3018 binary_protocol_concurrent_start ();
3020 // FIXME: store reason and pass it when finishing
3021 major_start_collection (TRUE, NULL);
3023 gray_queue_redirect (&gray_queue);
3024 sgen_workers_wait_for_jobs ();
3026 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3027 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3029 current_collection_generation = -1;
3033 major_update_or_finish_concurrent_collection (gboolean force_finish)
3035 SgenGrayQueue unpin_queue;
3036 memset (&unpin_queue, 0, sizeof (unpin_queue));
3038 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3039 binary_protocol_concurrent_update_finish ();
3041 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3043 if (!force_finish && !sgen_workers_all_done ()) {
3044 major_collector.update_cardtable_mod_union ();
3045 sgen_los_update_cardtable_mod_union ();
3047 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3052 * The major collector can add global remsets which are processed in the finishing
3053 * nursery collection, below. That implies that the workers must have finished
3054 * marking before the nursery collection is allowed to run, otherwise we might miss
3057 wait_for_workers_to_finish ();
3059 major_collector.update_cardtable_mod_union ();
3060 sgen_los_update_cardtable_mod_union ();
3062 collect_nursery (&unpin_queue, TRUE);
3064 if (mod_union_consistency_check)
3065 sgen_check_mod_union_consistency ();
3067 current_collection_generation = GENERATION_OLD;
3068 major_finish_collection ("finishing", -1, TRUE);
3070 if (whole_heap_check_before_collection)
3071 sgen_check_whole_heap (FALSE);
3073 unpin_objects_from_queue (&unpin_queue);
3074 sgen_gray_object_queue_deinit (&unpin_queue);
3076 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3078 current_collection_generation = -1;
3084 * Ensure an allocation request for @size will succeed by freeing enough memory.
3086 * LOCKING: The GC lock MUST be held.
3089 sgen_ensure_free_space (size_t size)
3091 int generation_to_collect = -1;
3092 const char *reason = NULL;
3095 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3096 if (sgen_need_major_collection (size)) {
3097 reason = "LOS overflow";
3098 generation_to_collect = GENERATION_OLD;
3101 if (degraded_mode) {
3102 if (sgen_need_major_collection (size)) {
3103 reason = "Degraded mode overflow";
3104 generation_to_collect = GENERATION_OLD;
3106 } else if (sgen_need_major_collection (size)) {
3107 reason = "Minor allowance";
3108 generation_to_collect = GENERATION_OLD;
3110 generation_to_collect = GENERATION_NURSERY;
3111 reason = "Nursery full";
3115 if (generation_to_collect == -1) {
3116 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3117 generation_to_collect = GENERATION_OLD;
3118 reason = "Finish concurrent collection";
3122 if (generation_to_collect == -1)
3124 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3128 * LOCKING: Assumes the GC lock is held.
3131 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3133 TV_DECLARE (gc_end);
3134 GGTimingInfo infos [2];
3135 int overflow_generation_to_collect = -1;
3136 int oldest_generation_collected = generation_to_collect;
3137 const char *overflow_reason = NULL;
3139 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3141 binary_protocol_collection_force (generation_to_collect);
3143 g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3145 memset (infos, 0, sizeof (infos));
3146 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3148 infos [0].generation = generation_to_collect;
3149 infos [0].reason = reason;
3150 infos [0].is_overflow = FALSE;
3151 TV_GETTIME (infos [0].total_time);
3152 infos [1].generation = -1;
3154 sgen_stop_world (generation_to_collect);
3156 if (concurrent_collection_in_progress) {
3157 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3158 oldest_generation_collected = GENERATION_OLD;
3161 if (generation_to_collect == GENERATION_OLD)
3164 if (generation_to_collect == GENERATION_OLD &&
3165 allow_synchronous_major &&
3166 major_collector.want_synchronous_collection &&
3167 *major_collector.want_synchronous_collection) {
3168 wait_to_finish = TRUE;
3172 //FIXME extract overflow reason
3173 if (generation_to_collect == GENERATION_NURSERY) {
3174 if (collect_nursery (NULL, FALSE)) {
3175 overflow_generation_to_collect = GENERATION_OLD;
3176 overflow_reason = "Minor overflow";
3179 if (major_collector.is_concurrent) {
3180 g_assert (!concurrent_collection_in_progress);
3181 if (!wait_to_finish)
3182 collect_nursery (NULL, FALSE);
3185 if (major_collector.is_concurrent && !wait_to_finish) {
3186 major_start_concurrent_collection (reason);
3187 // FIXME: set infos[0] properly
3190 if (major_do_collection (reason)) {
3191 overflow_generation_to_collect = GENERATION_NURSERY;
3192 overflow_reason = "Excessive pinning";
3197 TV_GETTIME (gc_end);
3198 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3201 if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3202 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3203 infos [1].generation = overflow_generation_to_collect;
3204 infos [1].reason = overflow_reason;
3205 infos [1].is_overflow = TRUE;
3206 infos [1].total_time = gc_end;
3208 if (overflow_generation_to_collect == GENERATION_NURSERY)
3209 collect_nursery (NULL, FALSE);
3211 major_do_collection (overflow_reason);
3213 TV_GETTIME (gc_end);
3214 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3216 /* keep events symmetric */
3217 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3219 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3222 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3224 /* this also sets the proper pointers for the next allocation */
3225 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3226 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3227 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3228 sgen_dump_pin_queue ();
3233 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3235 sgen_restart_world (oldest_generation_collected, infos);
3237 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3241 * ######################################################################
3242 * ######## Memory allocation from the OS
3243 * ######################################################################
3244 * This section of code deals with getting memory from the OS and
3245 * allocating memory for GC-internal data structures.
3246 * Internal memory can be handled with a freelist for small objects.
3252 G_GNUC_UNUSED static void
3253 report_internal_mem_usage (void)
3255 printf ("Internal memory usage:\n");
3256 sgen_report_internal_mem_usage ();
3257 printf ("Pinned memory usage:\n");
3258 major_collector.report_pinned_memory_usage ();
3262 * ######################################################################
3263 * ######## Finalization support
3264 * ######################################################################
3267 static inline gboolean
3268 sgen_major_is_object_alive (void *object)
3272 /* Oldgen objects can be pinned and forwarded too */
3273 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3277 * FIXME: major_collector.is_object_live() also calculates the
3278 * size. Avoid the double calculation.
3280 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3281 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3282 return sgen_los_object_is_pinned (object);
3284 return major_collector.is_object_live (object);
3288 * If the object has been forwarded it means it's still referenced from a root.
3289 * If it is pinned it's still alive as well.
3290 * A LOS object is only alive if we have pinned it.
3291 * Return TRUE if @obj is ready to be finalized.
3293 static inline gboolean
3294 sgen_is_object_alive (void *object)
3296 if (ptr_in_nursery (object))
3297 return sgen_nursery_is_object_alive (object);
3299 return sgen_major_is_object_alive (object);
3303 * This function returns true if @object is either alive or it belongs to the old gen
3304 * and we're currently doing a minor collection.
3307 sgen_is_object_alive_for_current_gen (char *object)
3309 if (ptr_in_nursery (object))
3310 return sgen_nursery_is_object_alive (object);
3312 if (current_collection_generation == GENERATION_NURSERY)
3315 return sgen_major_is_object_alive (object);
3319 * This function returns true if @object is either alive and belongs to the
3320 * current collection - major collections are full heap, so old gen objects
3321 * are never alive during a minor collection.
3324 sgen_is_object_alive_and_on_current_collection (char *object)
3326 if (ptr_in_nursery (object))
3327 return sgen_nursery_is_object_alive (object);
3329 if (current_collection_generation == GENERATION_NURSERY)
3332 return sgen_major_is_object_alive (object);
3337 sgen_gc_is_object_ready_for_finalization (void *object)
3339 return !sgen_is_object_alive (object);
3343 has_critical_finalizer (MonoObject *obj)
3347 if (!mono_defaults.critical_finalizer_object)
3350 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3352 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3356 sgen_queue_finalization_entry (MonoObject *obj)
3358 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3359 gboolean critical = has_critical_finalizer (obj);
3360 entry->object = obj;
3362 entry->next = critical_fin_list;
3363 critical_fin_list = entry;
3365 entry->next = fin_ready_list;
3366 fin_ready_list = entry;
3369 #ifdef ENABLE_DTRACE
3370 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3371 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3372 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3373 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3374 vt->klass->name_space, vt->klass->name, gen, critical);
3380 sgen_object_is_live (void *obj)
3382 return sgen_is_object_alive_and_on_current_collection (obj);
3385 /* LOCKING: requires that the GC lock is held */
3387 null_ephemerons_for_domain (MonoDomain *domain)
3389 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3392 MonoObject *object = (MonoObject*)current->array;
3394 if (object && !object->vtable) {
3395 EphemeronLinkNode *tmp = current;
3398 prev->next = current->next;
3400 ephemeron_list = current->next;
3402 current = current->next;
3403 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3406 current = current->next;
3411 /* LOCKING: requires that the GC lock is held */
3413 clear_unreachable_ephemerons (ScanCopyContext ctx)
3415 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3416 GrayQueue *queue = ctx.queue;
3417 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3419 Ephemeron *cur, *array_end;
3423 char *object = current->array;
3425 if (!sgen_is_object_alive_for_current_gen (object)) {
3426 EphemeronLinkNode *tmp = current;
3428 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3431 prev->next = current->next;
3433 ephemeron_list = current->next;
3435 current = current->next;
3436 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3441 copy_func ((void**)&object, queue);
3442 current->array = object;
3444 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3446 array = (MonoArray*)object;
3447 cur = mono_array_addr (array, Ephemeron, 0);
3448 array_end = cur + mono_array_length_fast (array);
3449 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3451 for (; cur < array_end; ++cur) {
3452 char *key = (char*)cur->key;
3454 if (!key || key == tombstone)
3457 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3458 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3459 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3461 if (!sgen_is_object_alive_for_current_gen (key)) {
3462 cur->key = tombstone;
3468 current = current->next;
3473 LOCKING: requires that the GC lock is held
3475 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3478 mark_ephemerons_in_range (ScanCopyContext ctx)
3480 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3481 GrayQueue *queue = ctx.queue;
3482 int nothing_marked = 1;
3483 EphemeronLinkNode *current = ephemeron_list;
3485 Ephemeron *cur, *array_end;
3488 for (current = ephemeron_list; current; current = current->next) {
3489 char *object = current->array;
3490 SGEN_LOG (5, "Ephemeron array at %p", object);
3492 /*It has to be alive*/
3493 if (!sgen_is_object_alive_for_current_gen (object)) {
3494 SGEN_LOG (5, "\tnot reachable");
3498 copy_func ((void**)&object, queue);
3500 array = (MonoArray*)object;
3501 cur = mono_array_addr (array, Ephemeron, 0);
3502 array_end = cur + mono_array_length_fast (array);
3503 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3505 for (; cur < array_end; ++cur) {
3506 char *key = cur->key;
3508 if (!key || key == tombstone)
3511 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3512 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3513 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3515 if (sgen_is_object_alive_for_current_gen (key)) {
3516 char *value = cur->value;
3518 copy_func ((void**)&cur->key, queue);
3520 if (!sgen_is_object_alive_for_current_gen (value))
3522 copy_func ((void**)&cur->value, queue);
3528 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3529 return nothing_marked;
3533 mono_gc_invoke_finalizers (void)
3535 FinalizeReadyEntry *entry = NULL;
3536 gboolean entry_is_critical = FALSE;
3539 /* FIXME: batch to reduce lock contention */
3540 while (fin_ready_list || critical_fin_list) {
3544 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3546 /* We have finalized entry in the last
3547 interation, now we need to remove it from
3550 *list = entry->next;
3552 FinalizeReadyEntry *e = *list;
3553 while (e->next != entry)
3555 e->next = entry->next;
3557 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3561 /* Now look for the first non-null entry. */
3562 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3565 entry_is_critical = FALSE;
3567 entry_is_critical = TRUE;
3568 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3573 g_assert (entry->object);
3574 num_ready_finalizers--;
3575 obj = entry->object;
3576 entry->object = NULL;
3577 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3585 g_assert (entry->object == NULL);
3587 /* the object is on the stack so it is pinned */
3588 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3589 mono_gc_run_finalize (obj, NULL);
3596 mono_gc_pending_finalizers (void)
3598 return fin_ready_list || critical_fin_list;
3602 * ######################################################################
3603 * ######## registered roots support
3604 * ######################################################################
3608 * We do not coalesce roots.
3611 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3613 RootRecord new_root;
3616 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3617 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3618 /* we allow changing the size and the descriptor (for thread statics etc) */
3620 size_t old_size = root->end_root - start;
3621 root->end_root = start + size;
3622 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3623 ((root->root_desc == 0) && (descr == NULL)));
3624 root->root_desc = (mword)descr;
3626 roots_size -= old_size;
3632 new_root.end_root = start + size;
3633 new_root.root_desc = (mword)descr;
3635 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3638 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3645 mono_gc_register_root (char *start, size_t size, void *descr)
3647 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3651 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3653 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3657 mono_gc_deregister_root (char* addr)
3663 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3664 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3665 roots_size -= (root.end_root - addr);
3671 * ######################################################################
3672 * ######## Thread handling (stop/start code)
3673 * ######################################################################
3676 unsigned int sgen_global_stop_count = 0;
3679 sgen_get_current_collection_generation (void)
3681 return current_collection_generation;
3685 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3687 gc_callbacks = *callbacks;
3691 mono_gc_get_gc_callbacks ()
3693 return &gc_callbacks;
3696 /* Variables holding start/end nursery so it won't have to be passed at every call */
3697 static void *scan_area_arg_start, *scan_area_arg_end;
3700 mono_gc_conservatively_scan_area (void *start, void *end)
3702 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3706 mono_gc_scan_object (void *obj)
3708 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
3709 current_object_ops.copy_or_mark_object (&obj, data->queue);
3714 * Mark from thread stacks and registers.
3717 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3719 SgenThreadInfo *info;
3721 scan_area_arg_start = start_nursery;
3722 scan_area_arg_end = end_nursery;
3724 FOREACH_THREAD (info) {
3726 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3729 if (info->gc_disabled) {
3730 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3733 if (mono_thread_info_run_state (info) != STATE_RUNNING) {
3734 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %td (state %d)", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_thread_info_run_state (info));
3737 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3738 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3739 UserCopyOrMarkData data = { NULL, queue };
3740 set_user_copy_or_mark_data (&data);
3741 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
3742 set_user_copy_or_mark_data (NULL);
3743 } else if (!precise) {
3744 if (!conservative_stack_mark) {
3745 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
3746 conservative_stack_mark = TRUE;
3748 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
3753 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
3754 start_nursery, end_nursery, PIN_TYPE_STACK);
3756 conservatively_pin_objects_from ((void**)&info->regs, (void**)&info->regs + ARCH_NUM_REGS,
3757 start_nursery, end_nursery, PIN_TYPE_STACK);
3760 } END_FOREACH_THREAD
3764 ptr_on_stack (void *ptr)
3766 gpointer stack_start = &stack_start;
3767 SgenThreadInfo *info = mono_thread_info_current ();
3769 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
3775 sgen_thread_register (SgenThreadInfo* info, void *addr)
3778 guint8 *staddr = NULL;
3780 #ifndef HAVE_KW_THREAD
3781 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
3783 g_assert (!mono_native_tls_get_value (thread_info_key));
3784 mono_native_tls_set_value (thread_info_key, info);
3786 sgen_thread_info = info;
3789 #ifdef SGEN_POSIX_STW
3790 info->stop_count = -1;
3794 info->stack_start = NULL;
3795 info->stopped_ip = NULL;
3796 info->stopped_domain = NULL;
3798 memset (&info->ctx, 0, sizeof (MonoContext));
3800 memset (&info->regs, 0, sizeof (info->regs));
3803 sgen_init_tlab_info (info);
3805 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
3807 /* On win32, stack_start_limit should be 0, since the stack can grow dynamically */
3809 mono_thread_info_get_stack_bounds (&staddr, &stsize);
3812 info->stack_start_limit = staddr;
3813 info->stack_end = staddr + stsize;
3815 gsize stack_bottom = (gsize)addr;
3816 stack_bottom += 4095;
3817 stack_bottom &= ~4095;
3818 info->stack_end = (char*)stack_bottom;
3821 #ifdef HAVE_KW_THREAD
3822 stack_end = info->stack_end;
3825 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
3827 if (gc_callbacks.thread_attach_func)
3828 info->runtime_data = gc_callbacks.thread_attach_func ();
3833 sgen_thread_detach (SgenThreadInfo *p)
3835 /* If a delegate is passed to native code and invoked on a thread we dont
3836 * know about, the jit will register it with mono_jit_thread_attach, but
3837 * we have no way of knowing when that thread goes away. SGen has a TSD
3838 * so we assume that if the domain is still registered, we can detach
3841 if (mono_domain_get ())
3842 mono_thread_detach_internal (mono_thread_internal_current ());
3846 sgen_thread_unregister (SgenThreadInfo *p)
3848 MonoNativeThreadId tid;
3850 tid = mono_thread_info_get_tid (p);
3851 binary_protocol_thread_unregister ((gpointer)tid);
3852 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
3854 mono_threads_add_joinable_thread ((gpointer)tid);
3856 if (gc_callbacks.thread_detach_func) {
3857 gc_callbacks.thread_detach_func (p->runtime_data);
3858 p->runtime_data = NULL;
3864 sgen_thread_attach (SgenThreadInfo *info)
3867 /*this is odd, can we get attached before the gc is inited?*/
3871 if (gc_callbacks.thread_attach_func && !info->runtime_data)
3872 info->runtime_data = gc_callbacks.thread_attach_func ();
3875 mono_gc_register_thread (void *baseptr)
3877 return mono_thread_info_attach (baseptr) != NULL;
3881 * mono_gc_set_stack_end:
3883 * Set the end of the current threads stack to STACK_END. The stack space between
3884 * STACK_END and the real end of the threads stack will not be scanned during collections.
3887 mono_gc_set_stack_end (void *stack_end)
3889 SgenThreadInfo *info;
3892 info = mono_thread_info_current ();
3894 g_assert (stack_end < info->stack_end);
3895 info->stack_end = stack_end;
3900 #if USE_PTHREAD_INTERCEPT
3904 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
3906 return pthread_create (new_thread, attr, start_routine, arg);
3910 mono_gc_pthread_join (pthread_t thread, void **retval)
3912 return pthread_join (thread, retval);
3916 mono_gc_pthread_detach (pthread_t thread)
3918 return pthread_detach (thread);
3922 mono_gc_pthread_exit (void *retval)
3924 mono_thread_info_detach ();
3925 pthread_exit (retval);
3928 #endif /* USE_PTHREAD_INTERCEPT */
3931 * ######################################################################
3932 * ######## Write barriers
3933 * ######################################################################
3937 * Note: the write barriers first do the needed GC work and then do the actual store:
3938 * this way the value is visible to the conservative GC scan after the write barrier
3939 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
3940 * the conservative scan, otherwise by the remembered set scan.
3943 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
3945 HEAVY_STAT (++stat_wbarrier_set_field);
3946 if (ptr_in_nursery (field_ptr)) {
3947 *(void**)field_ptr = value;
3950 SGEN_LOG (8, "Adding remset at %p", field_ptr);
3952 binary_protocol_wbarrier (field_ptr, value, value->vtable);
3954 remset.wbarrier_set_field (obj, field_ptr, value);
3958 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
3960 HEAVY_STAT (++stat_wbarrier_set_arrayref);
3961 if (ptr_in_nursery (slot_ptr)) {
3962 *(void**)slot_ptr = value;
3965 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
3967 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
3969 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
3973 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
3975 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
3976 /*This check can be done without taking a lock since dest_ptr array is pinned*/
3977 if (ptr_in_nursery (dest_ptr) || count <= 0) {
3978 mono_gc_memmove_aligned (dest_ptr, src_ptr, count * sizeof (gpointer));
3982 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
3983 if (binary_protocol_is_heavy_enabled ()) {
3985 for (i = 0; i < count; ++i) {
3986 gpointer dest = (gpointer*)dest_ptr + i;
3987 gpointer obj = *((gpointer*)src_ptr + i);
3989 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
3994 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
3997 static char *found_obj;
4000 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4002 char *ptr = user_data;
4004 if (ptr >= obj && ptr < obj + size) {
4005 g_assert (!found_obj);
4010 /* for use in the debugger */
4011 char* find_object_for_ptr (char *ptr);
4013 find_object_for_ptr (char *ptr)
4015 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4017 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4018 find_object_for_ptr_callback, ptr, TRUE);
4024 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4029 * Very inefficient, but this is debugging code, supposed to
4030 * be called from gdb, so we don't care.
4033 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, find_object_for_ptr_callback, ptr);
4038 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4042 HEAVY_STAT (++stat_wbarrier_generic_store);
4044 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4045 /* FIXME: ptr_in_heap must be called with the GC lock held */
4046 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4047 char *start = find_object_for_ptr (ptr);
4048 MonoObject *value = *(MonoObject**)ptr;
4052 MonoObject *obj = (MonoObject*)start;
4053 if (obj->vtable->domain != value->vtable->domain)
4054 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4060 obj = *(gpointer*)ptr;
4062 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4064 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4065 SGEN_LOG (8, "Skipping remset at %p", ptr);
4070 * We need to record old->old pointer locations for the
4071 * concurrent collector.
4073 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4074 SGEN_LOG (8, "Skipping remset at %p", ptr);
4078 SGEN_LOG (8, "Adding remset at %p", ptr);
4080 remset.wbarrier_generic_nostore (ptr);
4084 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4086 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4087 *(void**)ptr = value;
4088 if (ptr_in_nursery (value))
4089 mono_gc_wbarrier_generic_nostore (ptr);
4090 sgen_dummy_use (value);
4093 /* Same as mono_gc_wbarrier_generic_store () but performs the store
4094 * as an atomic operation with release semantics.
4097 mono_gc_wbarrier_generic_store_atomic (gpointer ptr, MonoObject *value)
4099 HEAVY_STAT (++stat_wbarrier_generic_store_atomic);
4101 SGEN_LOG (8, "Wbarrier atomic store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4103 InterlockedWritePointer (ptr, value);
4105 if (ptr_in_nursery (value))
4106 mono_gc_wbarrier_generic_nostore (ptr);
4108 sgen_dummy_use (value);
4111 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4113 mword *dest = _dest;
4118 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4123 size -= SIZEOF_VOID_P;
4128 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4130 #define HANDLE_PTR(ptr,obj) do { \
4131 gpointer o = *(gpointer*)(ptr); \
4133 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4134 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4139 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4141 #define SCAN_OBJECT_NOVTABLE
4142 #include "sgen-scan-object.h"
4147 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4149 HEAVY_STAT (++stat_wbarrier_value_copy);
4150 g_assert (klass->valuetype);
4152 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4154 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4155 size_t element_size = mono_class_value_size (klass, NULL);
4156 size_t size = count * element_size;
4157 mono_gc_memmove_atomic (dest, src, size);
4161 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4162 if (binary_protocol_is_heavy_enabled ()) {
4163 size_t element_size = mono_class_value_size (klass, NULL);
4165 for (i = 0; i < count; ++i) {
4166 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4167 (char*)src + i * element_size - sizeof (MonoObject),
4168 (mword) klass->gc_descr);
4173 remset.wbarrier_value_copy (dest, src, count, klass);
4177 * mono_gc_wbarrier_object_copy:
4179 * Write barrier to call when obj is the result of a clone or copy of an object.
4182 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4186 HEAVY_STAT (++stat_wbarrier_object_copy);
4188 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4189 size = mono_object_class (obj)->instance_size;
4190 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4191 size - sizeof (MonoObject));
4195 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
4196 if (binary_protocol_is_heavy_enabled ())
4197 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4200 remset.wbarrier_object_copy (obj, src);
4205 * ######################################################################
4206 * ######## Other mono public interface functions.
4207 * ######################################################################
4210 #define REFS_SIZE 128
4213 MonoGCReferences callback;
4217 MonoObject *refs [REFS_SIZE];
4218 uintptr_t offsets [REFS_SIZE];
4222 #define HANDLE_PTR(ptr,obj) do { \
4224 if (hwi->count == REFS_SIZE) { \
4225 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4229 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4230 hwi->refs [hwi->count++] = *(ptr); \
4235 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4237 #include "sgen-scan-object.h"
4241 walk_references (char *start, size_t size, void *data)
4243 HeapWalkInfo *hwi = data;
4246 collect_references (hwi, start, size);
4247 if (hwi->count || !hwi->called)
4248 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4252 * mono_gc_walk_heap:
4253 * @flags: flags for future use
4254 * @callback: a function pointer called for each object in the heap
4255 * @data: a user data pointer that is passed to callback
4257 * This function can be used to iterate over all the live objects in the heap:
4258 * for each object, @callback is invoked, providing info about the object's
4259 * location in memory, its class, its size and the objects it references.
4260 * For each referenced object it's offset from the object address is
4261 * reported in the offsets array.
4262 * The object references may be buffered, so the callback may be invoked
4263 * multiple times for the same object: in all but the first call, the size
4264 * argument will be zero.
4265 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4266 * profiler event handler.
4268 * Returns: a non-zero value if the GC doesn't support heap walking
4271 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4276 hwi.callback = callback;
4279 sgen_clear_nursery_fragments ();
4280 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4282 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
4283 sgen_los_iterate_objects (walk_references, &hwi);
4289 mono_gc_collect (int generation)
4294 sgen_perform_collection (0, generation, "user request", TRUE);
4299 mono_gc_max_generation (void)
4305 mono_gc_collection_count (int generation)
4307 if (generation == 0)
4308 return stat_minor_gcs;
4309 return stat_major_gcs;
4313 mono_gc_get_used_size (void)
4317 tot = los_memory_usage;
4318 tot += nursery_section->next_data - nursery_section->data;
4319 tot += major_collector.get_used_size ();
4320 /* FIXME: account for pinned objects */
4326 mono_gc_get_los_limit (void)
4328 return MAX_SMALL_OBJ_SIZE;
4332 mono_gc_user_markers_supported (void)
4338 mono_object_is_alive (MonoObject* o)
4344 mono_gc_get_generation (MonoObject *obj)
4346 if (ptr_in_nursery (obj))
4352 mono_gc_enable_events (void)
4357 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4359 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4363 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4365 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4369 mono_gc_weak_link_get (void **link_addr)
4371 void * volatile *link_addr_volatile;
4375 link_addr_volatile = link_addr;
4376 ptr = (void*)*link_addr_volatile;
4378 * At this point we have a hidden pointer. If the GC runs
4379 * here, it will not recognize the hidden pointer as a
4380 * reference, and if the object behind it is not referenced
4381 * elsewhere, it will be freed. Once the world is restarted
4382 * we reveal the pointer, giving us a pointer to a freed
4383 * object. To make sure we don't return it, we load the
4384 * hidden pointer again. If it's still the same, we can be
4385 * sure the object reference is valid.
4388 obj = (MonoObject*) REVEAL_POINTER (ptr);
4392 mono_memory_barrier ();
4395 * During the second bridge processing step the world is
4396 * running again. That step processes all weak links once
4397 * more to null those that refer to dead objects. Before that
4398 * is completed, those links must not be followed, so we
4399 * conservatively wait for bridge processing when any weak
4400 * link is dereferenced.
4402 if (G_UNLIKELY (bridge_processing_in_progress))
4403 mono_gc_wait_for_bridge_processing ();
4405 if ((void*)*link_addr_volatile != ptr)
4412 mono_gc_ephemeron_array_add (MonoObject *obj)
4414 EphemeronLinkNode *node;
4418 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4423 node->array = (char*)obj;
4424 node->next = ephemeron_list;
4425 ephemeron_list = node;
4427 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4434 mono_gc_set_allow_synchronous_major (gboolean flag)
4436 if (!major_collector.is_concurrent)
4439 allow_synchronous_major = flag;
4444 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4448 result = func (data);
4449 UNLOCK_INTERRUPTION;
4454 mono_gc_is_gc_thread (void)
4458 result = mono_thread_info_current () != NULL;
4464 is_critical_method (MonoMethod *method)
4466 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4470 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4474 va_start (ap, description_format);
4476 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4477 vfprintf (stderr, description_format, ap);
4479 fprintf (stderr, " - %s", fallback);
4480 fprintf (stderr, "\n");
4486 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4489 double val = strtod (opt, &endptr);
4490 if (endptr == opt) {
4491 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4494 else if (val < min || val > max) {
4495 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4503 mono_gc_base_init (void)
4505 MonoThreadInfoCallbacks cb;
4508 char *major_collector_opt = NULL;
4509 char *minor_collector_opt = NULL;
4510 size_t max_heap = 0;
4511 size_t soft_limit = 0;
4515 gboolean debug_print_allowance = FALSE;
4516 double allowance_ratio = 0, save_target = 0;
4517 gboolean have_split_nursery = FALSE;
4518 gboolean cement_enabled = TRUE;
4521 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4524 /* already inited */
4527 /* being inited by another thread */
4531 /* we will init it */
4534 g_assert_not_reached ();
4536 } while (result != 0);
4538 SGEN_TV_GETTIME (sgen_init_timestamp);
4540 LOCK_INIT (gc_mutex);
4542 pagesize = mono_pagesize ();
4543 gc_debug_file = stderr;
4545 cb.thread_register = sgen_thread_register;
4546 cb.thread_detach = sgen_thread_detach;
4547 cb.thread_unregister = sgen_thread_unregister;
4548 cb.thread_attach = sgen_thread_attach;
4549 cb.mono_method_is_critical = (gpointer)is_critical_method;
4551 cb.thread_exit = mono_gc_pthread_exit;
4552 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4555 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4557 LOCK_INIT (sgen_interruption_mutex);
4558 LOCK_INIT (pin_queue_mutex);
4560 init_user_copy_or_mark_key ();
4562 if ((env = g_getenv (MONO_GC_PARAMS_NAME))) {
4563 opts = g_strsplit (env, ",", -1);
4564 for (ptr = opts; *ptr; ++ptr) {
4566 if (g_str_has_prefix (opt, "major=")) {
4567 opt = strchr (opt, '=') + 1;
4568 major_collector_opt = g_strdup (opt);
4569 } else if (g_str_has_prefix (opt, "minor=")) {
4570 opt = strchr (opt, '=') + 1;
4571 minor_collector_opt = g_strdup (opt);
4579 sgen_init_internal_allocator ();
4580 sgen_init_nursery_allocator ();
4581 sgen_init_fin_weak_hash ();
4583 sgen_init_hash_table ();
4585 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4586 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4587 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4588 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4590 #ifndef HAVE_KW_THREAD
4591 mono_native_tls_alloc (&thread_info_key, NULL);
4592 #if defined(__APPLE__) || defined (HOST_WIN32)
4594 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
4595 * where the two are the same.
4597 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, thread_info_key);
4601 int tls_offset = -1;
4602 MONO_THREAD_VAR_OFFSET (sgen_thread_info, tls_offset);
4603 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO, tls_offset);
4608 * This needs to happen before any internal allocations because
4609 * it inits the small id which is required for hazard pointer
4614 mono_thread_info_attach (&dummy);
4616 if (!minor_collector_opt) {
4617 sgen_simple_nursery_init (&sgen_minor_collector);
4619 if (!strcmp (minor_collector_opt, "simple")) {
4621 sgen_simple_nursery_init (&sgen_minor_collector);
4622 } else if (!strcmp (minor_collector_opt, "split")) {
4623 sgen_split_nursery_init (&sgen_minor_collector);
4624 have_split_nursery = TRUE;
4626 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4627 goto use_simple_nursery;
4631 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4632 use_marksweep_major:
4633 sgen_marksweep_init (&major_collector);
4634 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4635 sgen_marksweep_fixed_init (&major_collector);
4636 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4637 sgen_marksweep_par_init (&major_collector);
4638 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4639 sgen_marksweep_fixed_par_init (&major_collector);
4640 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4641 sgen_marksweep_conc_init (&major_collector);
4643 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4644 goto use_marksweep_major;
4647 if (have_split_nursery && major_collector.is_parallel) {
4648 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Disabling split minor collector.", "`minor=split` is not supported with the parallel collector yet.");
4649 have_split_nursery = FALSE;
4652 num_workers = mono_cpu_count ();
4653 g_assert (num_workers > 0);
4654 if (num_workers > 16)
4657 ///* Keep this the default for now */
4658 /* Precise marking is broken on all supported targets. Disable until fixed. */
4659 conservative_stack_mark = TRUE;
4661 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4664 gboolean usage_printed = FALSE;
4666 for (ptr = opts; *ptr; ++ptr) {
4668 if (!strcmp (opt, ""))
4670 if (g_str_has_prefix (opt, "major="))
4672 if (g_str_has_prefix (opt, "minor="))
4674 if (g_str_has_prefix (opt, "max-heap-size=")) {
4675 size_t max_heap_candidate = 0;
4676 opt = strchr (opt, '=') + 1;
4677 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4678 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(size_t)(mono_pagesize () - 1);
4679 if (max_heap != max_heap_candidate)
4680 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4682 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4686 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4687 opt = strchr (opt, '=') + 1;
4688 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4689 if (soft_limit <= 0) {
4690 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4694 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4698 if (g_str_has_prefix (opt, "workers=")) {
4701 if (!major_collector.is_parallel) {
4702 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "The `workers` option can only be used for parallel collectors.");
4705 opt = strchr (opt, '=') + 1;
4706 val = strtol (opt, &endptr, 10);
4707 if (!*opt || *endptr) {
4708 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Cannot parse the `workers` option value.");
4711 if (val <= 0 || val > 16) {
4712 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "The number of `workers` must be in the range 1 to 16.");
4715 num_workers = (int)val;
4718 if (g_str_has_prefix (opt, "stack-mark=")) {
4719 opt = strchr (opt, '=') + 1;
4720 if (!strcmp (opt, "precise")) {
4721 conservative_stack_mark = FALSE;
4722 } else if (!strcmp (opt, "conservative")) {
4723 conservative_stack_mark = TRUE;
4725 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4726 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
4730 if (g_str_has_prefix (opt, "bridge-implementation=")) {
4731 opt = strchr (opt, '=') + 1;
4732 sgen_set_bridge_implementation (opt);
4735 if (g_str_has_prefix (opt, "toggleref-test")) {
4736 sgen_register_test_toggleref_callback ();
4741 if (g_str_has_prefix (opt, "nursery-size=")) {
4743 opt = strchr (opt, '=') + 1;
4744 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
4745 #ifdef SGEN_ALIGN_NURSERY
4746 if ((val & (val - 1))) {
4747 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
4751 if (val < SGEN_MAX_NURSERY_WASTE) {
4752 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
4753 "`nursery-size` must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
4757 sgen_nursery_size = val;
4758 sgen_nursery_bits = 0;
4759 while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
4762 sgen_nursery_size = val;
4765 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
4771 if (g_str_has_prefix (opt, "save-target-ratio=")) {
4773 opt = strchr (opt, '=') + 1;
4774 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
4775 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
4780 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
4782 opt = strchr (opt, '=') + 1;
4783 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
4784 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
4785 allowance_ratio = val;
4789 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
4790 if (!major_collector.is_concurrent) {
4791 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
4795 opt = strchr (opt, '=') + 1;
4797 if (!strcmp (opt, "yes")) {
4798 allow_synchronous_major = TRUE;
4799 } else if (!strcmp (opt, "no")) {
4800 allow_synchronous_major = FALSE;
4802 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
4807 if (!strcmp (opt, "cementing")) {
4808 if (major_collector.is_parallel) {
4809 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`cementing` is not supported for the parallel major collector.");
4812 cement_enabled = TRUE;
4815 if (!strcmp (opt, "no-cementing")) {
4816 cement_enabled = FALSE;
4820 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
4823 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
4826 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4831 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
4832 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4833 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
4834 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
4835 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
4836 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
4837 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
4838 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
4839 fprintf (stderr, " [no-]cementing\n");
4840 if (major_collector.is_concurrent)
4841 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
4842 if (major_collector.print_gc_param_usage)
4843 major_collector.print_gc_param_usage ();
4844 if (sgen_minor_collector.print_gc_param_usage)
4845 sgen_minor_collector.print_gc_param_usage ();
4846 fprintf (stderr, " Experimental options:\n");
4847 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
4848 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
4849 fprintf (stderr, "\n");
4851 usage_printed = TRUE;
4856 if (major_collector.is_parallel) {
4857 cement_enabled = FALSE;
4858 sgen_workers_init (num_workers);
4859 } else if (major_collector.is_concurrent) {
4860 sgen_workers_init (1);
4863 if (major_collector_opt)
4864 g_free (major_collector_opt);
4866 if (minor_collector_opt)
4867 g_free (minor_collector_opt);
4871 sgen_cement_init (cement_enabled);
4873 if ((env = g_getenv (MONO_GC_DEBUG_NAME))) {
4874 gboolean usage_printed = FALSE;
4876 opts = g_strsplit (env, ",", -1);
4877 for (ptr = opts; ptr && *ptr; ptr ++) {
4879 if (!strcmp (opt, ""))
4881 if (opt [0] >= '0' && opt [0] <= '9') {
4882 gc_debug_level = atoi (opt);
4888 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
4890 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
4892 gc_debug_file = fopen (rf, "wb");
4894 gc_debug_file = stderr;
4897 } else if (!strcmp (opt, "print-allowance")) {
4898 debug_print_allowance = TRUE;
4899 } else if (!strcmp (opt, "print-pinning")) {
4900 do_pin_stats = TRUE;
4901 } else if (!strcmp (opt, "verify-before-allocs")) {
4902 verify_before_allocs = 1;
4903 has_per_allocation_action = TRUE;
4904 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
4905 char *arg = strchr (opt, '=') + 1;
4906 verify_before_allocs = atoi (arg);
4907 has_per_allocation_action = TRUE;
4908 } else if (!strcmp (opt, "collect-before-allocs")) {
4909 collect_before_allocs = 1;
4910 has_per_allocation_action = TRUE;
4911 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
4912 char *arg = strchr (opt, '=') + 1;
4913 has_per_allocation_action = TRUE;
4914 collect_before_allocs = atoi (arg);
4915 } else if (!strcmp (opt, "verify-before-collections")) {
4916 whole_heap_check_before_collection = TRUE;
4917 } else if (!strcmp (opt, "check-at-minor-collections")) {
4918 consistency_check_at_minor_collection = TRUE;
4919 nursery_clear_policy = CLEAR_AT_GC;
4920 } else if (!strcmp (opt, "mod-union-consistency-check")) {
4921 if (!major_collector.is_concurrent) {
4922 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
4925 mod_union_consistency_check = TRUE;
4926 } else if (!strcmp (opt, "check-mark-bits")) {
4927 check_mark_bits_after_major_collection = TRUE;
4928 } else if (!strcmp (opt, "check-nursery-pinned")) {
4929 check_nursery_objects_pinned = TRUE;
4930 } else if (!strcmp (opt, "xdomain-checks")) {
4931 xdomain_checks = TRUE;
4932 } else if (!strcmp (opt, "clear-at-gc")) {
4933 nursery_clear_policy = CLEAR_AT_GC;
4934 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
4935 nursery_clear_policy = CLEAR_AT_GC;
4936 } else if (!strcmp (opt, "check-scan-starts")) {
4937 do_scan_starts_check = TRUE;
4938 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
4939 do_verify_nursery = TRUE;
4940 } else if (!strcmp (opt, "check-concurrent")) {
4941 if (!major_collector.is_concurrent) {
4942 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
4945 do_concurrent_checks = TRUE;
4946 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
4947 do_dump_nursery_content = TRUE;
4948 } else if (!strcmp (opt, "no-managed-allocator")) {
4949 sgen_set_use_managed_allocator (FALSE);
4950 } else if (!strcmp (opt, "disable-minor")) {
4951 disable_minor_collections = TRUE;
4952 } else if (!strcmp (opt, "disable-major")) {
4953 disable_major_collections = TRUE;
4954 } else if (g_str_has_prefix (opt, "heap-dump=")) {
4955 char *filename = strchr (opt, '=') + 1;
4956 nursery_clear_policy = CLEAR_AT_GC;
4957 heap_dump_file = fopen (filename, "w");
4958 if (heap_dump_file) {
4959 fprintf (heap_dump_file, "<sgen-dump>\n");
4960 do_pin_stats = TRUE;
4962 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
4963 char *filename = strchr (opt, '=') + 1;
4964 binary_protocol_init (filename);
4965 } else if (!sgen_bridge_handle_gc_debug (opt)) {
4966 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
4971 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
4972 fprintf (stderr, "Valid <option>s are:\n");
4973 fprintf (stderr, " collect-before-allocs[=<n>]\n");
4974 fprintf (stderr, " verify-before-allocs[=<n>]\n");
4975 fprintf (stderr, " check-at-minor-collections\n");
4976 fprintf (stderr, " check-mark-bits\n");
4977 fprintf (stderr, " check-nursery-pinned\n");
4978 fprintf (stderr, " verify-before-collections\n");
4979 fprintf (stderr, " verify-nursery-at-minor-gc\n");
4980 fprintf (stderr, " dump-nursery-at-minor-gc\n");
4981 fprintf (stderr, " disable-minor\n");
4982 fprintf (stderr, " disable-major\n");
4983 fprintf (stderr, " xdomain-checks\n");
4984 fprintf (stderr, " check-concurrent\n");
4985 fprintf (stderr, " clear-at-gc\n");
4986 fprintf (stderr, " clear-nursery-at-gc\n");
4987 fprintf (stderr, " check-scan-starts\n");
4988 fprintf (stderr, " no-managed-allocator\n");
4989 fprintf (stderr, " print-allowance\n");
4990 fprintf (stderr, " print-pinning\n");
4991 fprintf (stderr, " heap-dump=<filename>\n");
4992 fprintf (stderr, " binary-protocol=<filename>\n");
4993 sgen_bridge_print_gc_debug_usage ();
4994 fprintf (stderr, "\n");
4996 usage_printed = TRUE;
5002 if (major_collector.is_parallel) {
5003 if (heap_dump_file) {
5004 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "Cannot do `heap-dump` with the parallel collector.");
5005 fclose (heap_dump_file);
5006 heap_dump_file = NULL;
5009 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "`print-pinning` is not supported with the parallel collector.");
5010 do_pin_stats = FALSE;
5014 if (major_collector.post_param_init)
5015 major_collector.post_param_init (&major_collector);
5017 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5019 memset (&remset, 0, sizeof (remset));
5021 sgen_card_table_init (&remset);
5027 mono_gc_get_gc_name (void)
5032 static MonoMethod *write_barrier_method;
5035 sgen_is_critical_method (MonoMethod *method)
5037 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5041 sgen_has_critical_method (void)
5043 return write_barrier_method || sgen_has_managed_allocator ();
5049 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5051 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5052 #ifdef SGEN_ALIGN_NURSERY
5053 // if (ptr_in_nursery (ptr)) return;
5055 * Masking out the bits might be faster, but we would have to use 64 bit
5056 * immediates, which might be slower.
5058 mono_mb_emit_ldarg (mb, 0);
5059 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5060 mono_mb_emit_byte (mb, CEE_SHR_UN);
5061 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5062 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5064 if (!major_collector.is_concurrent) {
5065 // if (!ptr_in_nursery (*ptr)) return;
5066 mono_mb_emit_ldarg (mb, 0);
5067 mono_mb_emit_byte (mb, CEE_LDIND_I);
5068 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5069 mono_mb_emit_byte (mb, CEE_SHR_UN);
5070 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5071 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5074 int label_continue1, label_continue2;
5075 int dereferenced_var;
5077 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5078 mono_mb_emit_ldarg (mb, 0);
5079 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5080 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5082 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5083 mono_mb_emit_ldarg (mb, 0);
5084 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5085 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5088 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5091 mono_mb_patch_branch (mb, label_continue_1);
5092 mono_mb_patch_branch (mb, label_continue_2);
5094 // Dereference and store in local var
5095 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5096 mono_mb_emit_ldarg (mb, 0);
5097 mono_mb_emit_byte (mb, CEE_LDIND_I);
5098 mono_mb_emit_stloc (mb, dereferenced_var);
5100 if (!major_collector.is_concurrent) {
5101 // if (*ptr < sgen_get_nursery_start ()) return;
5102 mono_mb_emit_ldloc (mb, dereferenced_var);
5103 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5104 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5106 // if (*ptr >= sgen_get_nursery_end ()) return;
5107 mono_mb_emit_ldloc (mb, dereferenced_var);
5108 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5109 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5116 mono_gc_get_write_barrier (void)
5119 MonoMethodBuilder *mb;
5120 MonoMethodSignature *sig;
5121 #ifdef MANAGED_WBARRIER
5122 int i, nursery_check_labels [3];
5124 #ifdef HAVE_KW_THREAD
5125 int stack_end_offset = -1;
5127 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5128 g_assert (stack_end_offset != -1);
5132 // FIXME: Maybe create a separate version for ctors (the branch would be
5133 // correctly predicted more times)
5134 if (write_barrier_method)
5135 return write_barrier_method;
5137 /* Create the IL version of mono_gc_barrier_generic_store () */
5138 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5139 sig->ret = &mono_defaults.void_class->byval_arg;
5140 sig->params [0] = &mono_defaults.int_class->byval_arg;
5142 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5145 #ifdef MANAGED_WBARRIER
5146 emit_nursery_check (mb, nursery_check_labels);
5148 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5152 LDC_PTR sgen_cardtable
5154 address >> CARD_BITS
5158 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5159 LDC_PTR card_table_mask
5166 mono_mb_emit_ptr (mb, sgen_cardtable);
5167 mono_mb_emit_ldarg (mb, 0);
5168 mono_mb_emit_icon (mb, CARD_BITS);
5169 mono_mb_emit_byte (mb, CEE_SHR_UN);
5170 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5171 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5172 mono_mb_emit_byte (mb, CEE_AND);
5174 mono_mb_emit_byte (mb, CEE_ADD);
5175 mono_mb_emit_icon (mb, 1);
5176 mono_mb_emit_byte (mb, CEE_STIND_I1);
5179 for (i = 0; i < 3; ++i) {
5180 if (nursery_check_labels [i])
5181 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5183 mono_mb_emit_byte (mb, CEE_RET);
5185 mono_mb_emit_ldarg (mb, 0);
5186 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5187 mono_mb_emit_byte (mb, CEE_RET);
5190 res = mono_mb_create_method (mb, sig, 16);
5194 if (write_barrier_method) {
5195 /* Already created */
5196 mono_free_method (res);
5198 /* double-checked locking */
5199 mono_memory_barrier ();
5200 write_barrier_method = res;
5204 return write_barrier_method;
5208 mono_gc_get_description (void)
5210 return g_strdup ("sgen");
5214 mono_gc_set_desktop_mode (void)
5219 mono_gc_is_moving (void)
5225 mono_gc_is_disabled (void)
5231 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5238 sgen_get_nursery_clear_policy (void)
5240 return nursery_clear_policy;
5244 sgen_get_array_fill_vtable (void)
5246 if (!array_fill_vtable) {
5247 static MonoClass klass;
5248 static MonoVTable vtable;
5251 MonoDomain *domain = mono_get_root_domain ();
5254 klass.element_class = mono_defaults.byte_class;
5256 klass.instance_size = sizeof (MonoArray);
5257 klass.sizes.element_size = 1;
5258 klass.name = "array_filler_type";
5260 vtable.klass = &klass;
5262 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5265 array_fill_vtable = &vtable;
5267 return array_fill_vtable;
5277 sgen_gc_unlock (void)
5279 gboolean try_free = sgen_try_free_some_memory;
5280 sgen_try_free_some_memory = FALSE;
5281 mono_mutex_unlock (&gc_mutex);
5282 MONO_GC_UNLOCKED ();
5284 mono_thread_hazardous_try_free_some ();
5288 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5290 major_collector.iterate_live_block_ranges (callback);
5294 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5296 major_collector.scan_card_table (FALSE, queue);
5300 sgen_get_major_collector (void)
5302 return &major_collector;
5305 void mono_gc_set_skip_thread (gboolean skip)
5307 SgenThreadInfo *info = mono_thread_info_current ();
5310 info->gc_disabled = skip;
5315 sgen_get_remset (void)
5321 mono_gc_get_vtable_bits (MonoClass *class)
5323 /* FIXME move this to the bridge code */
5324 if (!sgen_need_bridge_processing ())
5326 switch (sgen_bridge_class_kind (class)) {
5327 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
5328 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
5329 return SGEN_GC_BIT_BRIDGE_OBJECT;
5330 case GC_BRIDGE_OPAQUE_CLASS:
5331 return SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
5337 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5344 sgen_check_whole_heap_stw (void)
5346 sgen_stop_world (0);
5347 sgen_clear_nursery_fragments ();
5348 sgen_check_whole_heap (FALSE);
5349 sgen_restart_world (0, NULL);
5353 sgen_gc_event_moves (void)
5355 if (moved_objects_idx) {
5356 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5357 moved_objects_idx = 0;
5362 sgen_timestamp (void)
5364 SGEN_TV_DECLARE (timestamp);
5365 SGEN_TV_GETTIME (timestamp);
5366 return SGEN_TV_ELAPSED (sgen_init_timestamp, timestamp);
5369 #endif /* HAVE_SGEN_GC */