2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-ssb.h"
202 #include "metadata/sgen-protocol.h"
203 #include "metadata/sgen-archdep.h"
204 #include "metadata/sgen-bridge.h"
205 #include "metadata/sgen-memory-governor.h"
206 #include "metadata/sgen-hash-table.h"
207 #include "metadata/mono-gc.h"
208 #include "metadata/method-builder.h"
209 #include "metadata/profiler-private.h"
210 #include "metadata/monitor.h"
211 #include "metadata/threadpool-internals.h"
212 #include "metadata/mempool-internals.h"
213 #include "metadata/marshal.h"
214 #include "metadata/runtime.h"
215 #include "metadata/sgen-cardtable.h"
216 #include "metadata/sgen-pinning.h"
217 #include "metadata/sgen-workers.h"
218 #include "utils/mono-mmap.h"
219 #include "utils/mono-time.h"
220 #include "utils/mono-semaphore.h"
221 #include "utils/mono-counters.h"
222 #include "utils/mono-proclib.h"
223 #include "utils/mono-memory-model.h"
224 #include "utils/mono-logger-internal.h"
225 #include "utils/dtrace.h"
227 #include <mono/utils/mono-logger-internal.h>
228 #include <mono/utils/memcheck.h>
230 #if defined(__MACH__)
231 #include "utils/mach-support.h"
234 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
238 #include "mono/cil/opcode.def"
244 #undef pthread_create
246 #undef pthread_detach
249 * ######################################################################
250 * ######## Types and constants used by the GC.
251 * ######################################################################
254 /* 0 means not initialized, 1 is initialized, -1 means in progress */
255 static int gc_initialized = 0;
256 /* If set, check if we need to do something every X allocations */
257 gboolean has_per_allocation_action;
258 /* If set, do a heap check every X allocation */
259 guint32 verify_before_allocs = 0;
260 /* If set, do a minor collection before every X allocation */
261 guint32 collect_before_allocs = 0;
262 /* If set, do a whole heap check before each collection */
263 static gboolean whole_heap_check_before_collection = FALSE;
264 /* If set, do a heap consistency check before each minor collection */
265 static gboolean consistency_check_at_minor_collection = FALSE;
266 /* If set, check whether mark bits are consistent after major collections */
267 static gboolean check_mark_bits_after_major_collection = FALSE;
268 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
269 static gboolean check_nursery_objects_pinned = FALSE;
270 /* If set, do a few checks when the concurrent collector is used */
271 static gboolean do_concurrent_checks = FALSE;
272 /* If set, check that there are no references to the domain left at domain unload */
273 static gboolean xdomain_checks = FALSE;
274 /* If not null, dump the heap after each collection into this file */
275 static FILE *heap_dump_file = NULL;
276 /* If set, mark stacks conservatively, even if precise marking is possible */
277 static gboolean conservative_stack_mark = FALSE;
278 /* If set, do a plausibility check on the scan_starts before and after
280 static gboolean do_scan_starts_check = FALSE;
281 static gboolean nursery_collection_is_parallel = FALSE;
282 static gboolean disable_minor_collections = FALSE;
283 static gboolean disable_major_collections = FALSE;
284 gboolean do_pin_stats = FALSE;
285 static gboolean do_verify_nursery = FALSE;
286 static gboolean do_dump_nursery_content = FALSE;
288 #ifdef HEAVY_STATISTICS
289 long long stat_objects_alloced_degraded = 0;
290 long long stat_bytes_alloced_degraded = 0;
292 long long stat_copy_object_called_nursery = 0;
293 long long stat_objects_copied_nursery = 0;
294 long long stat_copy_object_called_major = 0;
295 long long stat_objects_copied_major = 0;
297 long long stat_scan_object_called_nursery = 0;
298 long long stat_scan_object_called_major = 0;
300 long long stat_slots_allocated_in_vain;
302 long long stat_nursery_copy_object_failed_from_space = 0;
303 long long stat_nursery_copy_object_failed_forwarded = 0;
304 long long stat_nursery_copy_object_failed_pinned = 0;
305 long long stat_nursery_copy_object_failed_to_space = 0;
307 static int stat_wbarrier_set_field = 0;
308 static int stat_wbarrier_set_arrayref = 0;
309 static int stat_wbarrier_arrayref_copy = 0;
310 static int stat_wbarrier_generic_store = 0;
311 static int stat_wbarrier_set_root = 0;
312 static int stat_wbarrier_value_copy = 0;
313 static int stat_wbarrier_object_copy = 0;
316 int stat_minor_gcs = 0;
317 int stat_major_gcs = 0;
319 static long long stat_pinned_objects = 0;
321 static long long time_minor_pre_collection_fragment_clear = 0;
322 static long long time_minor_pinning = 0;
323 static long long time_minor_scan_remsets = 0;
324 static long long time_minor_scan_pinned = 0;
325 static long long time_minor_scan_registered_roots = 0;
326 static long long time_minor_scan_thread_data = 0;
327 static long long time_minor_finish_gray_stack = 0;
328 static long long time_minor_fragment_creation = 0;
330 static long long time_major_pre_collection_fragment_clear = 0;
331 static long long time_major_pinning = 0;
332 static long long time_major_scan_pinned = 0;
333 static long long time_major_scan_registered_roots = 0;
334 static long long time_major_scan_thread_data = 0;
335 static long long time_major_scan_alloc_pinned = 0;
336 static long long time_major_scan_finalized = 0;
337 static long long time_major_scan_big_objects = 0;
338 static long long time_major_finish_gray_stack = 0;
339 static long long time_major_free_bigobjs = 0;
340 static long long time_major_los_sweep = 0;
341 static long long time_major_sweep = 0;
342 static long long time_major_fragment_creation = 0;
344 int gc_debug_level = 0;
349 mono_gc_flush_info (void)
351 fflush (gc_debug_file);
355 #define TV_DECLARE SGEN_TV_DECLARE
356 #define TV_GETTIME SGEN_TV_GETTIME
357 #define TV_ELAPSED SGEN_TV_ELAPSED
358 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
360 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
362 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
364 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
365 #define object_is_pinned SGEN_OBJECT_IS_PINNED
366 #define pin_object SGEN_PIN_OBJECT
367 #define unpin_object SGEN_UNPIN_OBJECT
369 #define ptr_in_nursery sgen_ptr_in_nursery
371 #define LOAD_VTABLE SGEN_LOAD_VTABLE
374 safe_name (void* obj)
376 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
377 return vt->klass->name;
380 #define safe_object_get_size sgen_safe_object_get_size
383 sgen_safe_name (void* obj)
385 return safe_name (obj);
389 * ######################################################################
390 * ######## Global data.
391 * ######################################################################
393 LOCK_DECLARE (gc_mutex);
394 static int gc_disabled = 0;
396 static gboolean use_cardtable;
398 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
400 static mword pagesize = 4096;
401 int degraded_mode = 0;
403 static mword bytes_pinned_from_failed_allocation = 0;
405 GCMemSection *nursery_section = NULL;
406 static mword lowest_heap_address = ~(mword)0;
407 static mword highest_heap_address = 0;
409 LOCK_DECLARE (sgen_interruption_mutex);
410 static LOCK_DECLARE (pin_queue_mutex);
412 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
413 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
415 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
416 struct _FinalizeReadyEntry {
417 FinalizeReadyEntry *next;
421 typedef struct _EphemeronLinkNode EphemeronLinkNode;
423 struct _EphemeronLinkNode {
424 EphemeronLinkNode *next;
433 int current_collection_generation = -1;
434 volatile gboolean concurrent_collection_in_progress = FALSE;
436 /* objects that are ready to be finalized */
437 static FinalizeReadyEntry *fin_ready_list = NULL;
438 static FinalizeReadyEntry *critical_fin_list = NULL;
440 static EphemeronLinkNode *ephemeron_list;
442 /* registered roots: the key to the hash is the root start address */
444 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
446 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
447 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
448 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
449 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
451 static mword roots_size = 0; /* amount of memory in the root set */
453 #define GC_ROOT_NUM 32
455 int count; /* must be the first field */
456 void *objects [GC_ROOT_NUM];
457 int root_types [GC_ROOT_NUM];
458 uintptr_t extra_info [GC_ROOT_NUM];
462 notify_gc_roots (GCRootReport *report)
466 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
471 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
473 if (report->count == GC_ROOT_NUM)
474 notify_gc_roots (report);
475 report->objects [report->count] = object;
476 report->root_types [report->count] = rtype;
477 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
480 MonoNativeTlsKey thread_info_key;
482 #ifdef HAVE_KW_THREAD
483 __thread SgenThreadInfo *sgen_thread_info;
484 __thread gpointer *store_remset_buffer;
485 __thread long store_remset_buffer_index;
486 __thread char *stack_end;
487 __thread long *store_remset_buffer_index_addr;
490 /* The size of a TLAB */
491 /* The bigger the value, the less often we have to go to the slow path to allocate a new
492 * one, but the more space is wasted by threads not allocating much memory.
494 * FIXME: Make this self-tuning for each thread.
496 guint32 tlab_size = (1024 * 4);
498 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
500 /* Functions supplied by the runtime to be called by the GC */
501 static MonoGCCallbacks gc_callbacks;
503 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
504 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
506 #define ALIGN_UP SGEN_ALIGN_UP
508 #define MOVED_OBJECTS_NUM 64
509 static void *moved_objects [MOVED_OBJECTS_NUM];
510 static int moved_objects_idx = 0;
512 /* Vtable of the objects used to fill out nursery fragments before a collection */
513 static MonoVTable *array_fill_vtable;
515 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
516 MonoNativeThreadId main_gc_thread = NULL;
519 /*Object was pinned during the current collection*/
520 static mword objects_pinned;
523 * ######################################################################
524 * ######## Macros and function declarations.
525 * ######################################################################
529 align_pointer (void *ptr)
531 mword p = (mword)ptr;
532 p += sizeof (gpointer) - 1;
533 p &= ~ (sizeof (gpointer) - 1);
537 typedef SgenGrayQueue GrayQueue;
539 /* forward declarations */
540 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
541 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
542 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
543 static void report_finalizer_roots (void);
544 static void report_registered_roots (void);
546 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
547 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
548 static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
550 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
553 static void init_stats (void);
555 static int mark_ephemerons_in_range (ScanCopyContext ctx);
556 static void clear_unreachable_ephemerons (gboolean concurrent_cementing, ScanCopyContext ctx);
557 static void null_ephemerons_for_domain (MonoDomain *domain);
559 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
561 SgenObjectOperations current_object_ops;
562 SgenMajorCollector major_collector;
563 SgenMinorCollector sgen_minor_collector;
564 static GrayQueue gray_queue;
565 static GrayQueue remember_major_objects_gray_queue;
567 static SgenRemeberedSet remset;
569 /* The gray queue to use from the main collection thread. */
570 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
573 * The gray queue a worker job must use. If we're not parallel or
574 * concurrent, we use the main gray queue.
576 static SgenGrayQueue*
577 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
579 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
582 static gboolean have_non_collection_major_object_remembers = FALSE;
585 sgen_remember_major_object_for_concurrent_mark (char *obj)
587 if (!major_collector.is_concurrent)
590 g_assert (current_collection_generation == GENERATION_NURSERY || current_collection_generation == -1);
592 if (!concurrent_collection_in_progress)
595 GRAY_OBJECT_ENQUEUE (&remember_major_objects_gray_queue, obj);
597 if (current_collection_generation != GENERATION_NURSERY) {
599 * This happens when the mutator allocates large or
600 * pinned objects or when allocating in degraded
603 have_non_collection_major_object_remembers = TRUE;
610 gray_queue_redirect (SgenGrayQueue *queue)
612 gboolean wake = FALSE;
616 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
619 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
624 g_assert (concurrent_collection_in_progress ||
625 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
626 if (sgen_workers_have_started ()) {
627 sgen_workers_wake_up_all ();
629 if (concurrent_collection_in_progress)
630 g_assert (current_collection_generation == -1);
636 redirect_major_object_remembers (void)
638 gray_queue_redirect (&remember_major_objects_gray_queue);
639 have_non_collection_major_object_remembers = FALSE;
643 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
645 MonoObject *o = (MonoObject*)(obj);
646 MonoObject *ref = (MonoObject*)*(ptr);
647 int offset = (char*)(ptr) - (char*)o;
649 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
651 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
653 if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
654 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
656 /* Thread.cached_culture_info */
657 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
658 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
659 !strcmp(o->vtable->klass->name_space, "System") &&
660 !strcmp(o->vtable->klass->name, "Object[]"))
663 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
664 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
665 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
666 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
667 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
668 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
669 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
670 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
671 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
673 if (!strcmp (ref->vtable->klass->name_space, "System") &&
674 !strcmp (ref->vtable->klass->name, "Byte[]") &&
675 !strcmp (o->vtable->klass->name_space, "System.IO") &&
676 !strcmp (o->vtable->klass->name, "MemoryStream"))
678 /* append_job() in threadpool.c */
679 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
680 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
681 !strcmp (o->vtable->klass->name_space, "System") &&
682 !strcmp (o->vtable->klass->name, "Object[]") &&
683 mono_thread_pool_is_queue_array ((MonoArray*) o))
689 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
691 MonoObject *o = (MonoObject*)(obj);
692 MonoObject *ref = (MonoObject*)*(ptr);
693 int offset = (char*)(ptr) - (char*)o;
695 MonoClassField *field;
698 if (!ref || ref->vtable->domain == domain)
700 if (is_xdomain_ref_allowed (ptr, obj, domain))
704 for (class = o->vtable->klass; class; class = class->parent) {
707 for (i = 0; i < class->field.count; ++i) {
708 if (class->fields[i].offset == offset) {
709 field = &class->fields[i];
717 if (ref->vtable->klass == mono_defaults.string_class)
718 str = mono_string_to_utf8 ((MonoString*)ref);
721 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
722 o, o->vtable->klass->name_space, o->vtable->klass->name,
723 offset, field ? field->name : "",
724 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
725 mono_gc_scan_for_specific_ref (o, TRUE);
731 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
734 scan_object_for_xdomain_refs (char *start, mword size, void *data)
736 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
738 #include "sgen-scan-object.h"
741 static gboolean scan_object_for_specific_ref_precise = TRUE;
744 #define HANDLE_PTR(ptr,obj) do { \
745 if ((MonoObject*)*(ptr) == key) { \
746 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
747 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
752 scan_object_for_specific_ref (char *start, MonoObject *key)
756 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
759 if (scan_object_for_specific_ref_precise) {
760 #include "sgen-scan-object.h"
762 mword *words = (mword*)start;
763 size_t size = safe_object_get_size ((MonoObject*)start);
765 for (i = 0; i < size / sizeof (mword); ++i) {
766 if (words [i] == (mword)key) {
767 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
768 key, start, safe_name (start), i * sizeof (mword));
775 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
777 while (start < end) {
781 if (!*(void**)start) {
782 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
787 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
793 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
795 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
796 callback (obj, size, data);
803 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
805 scan_object_for_specific_ref (obj, key);
809 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
813 g_print ("found ref to %p in root record %p\n", key, root);
816 static MonoObject *check_key = NULL;
817 static RootRecord *check_root = NULL;
820 check_root_obj_specific_ref_from_marker (void **obj)
822 check_root_obj_specific_ref (check_root, check_key, *obj);
826 scan_roots_for_specific_ref (MonoObject *key, int root_type)
832 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
833 mword desc = root->root_desc;
837 switch (desc & ROOT_DESC_TYPE_MASK) {
838 case ROOT_DESC_BITMAP:
839 desc >>= ROOT_DESC_TYPE_SHIFT;
842 check_root_obj_specific_ref (root, key, *start_root);
847 case ROOT_DESC_COMPLEX: {
848 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
849 int bwords = (*bitmap_data) - 1;
850 void **start_run = start_root;
852 while (bwords-- > 0) {
853 gsize bmap = *bitmap_data++;
854 void **objptr = start_run;
857 check_root_obj_specific_ref (root, key, *objptr);
861 start_run += GC_BITS_PER_WORD;
865 case ROOT_DESC_USER: {
866 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
867 marker (start_root, check_root_obj_specific_ref_from_marker);
870 case ROOT_DESC_RUN_LEN:
871 g_assert_not_reached ();
873 g_assert_not_reached ();
875 } SGEN_HASH_TABLE_FOREACH_END;
882 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
887 scan_object_for_specific_ref_precise = precise;
889 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
890 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
892 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
894 sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
896 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
897 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
899 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
900 while (ptr < (void**)root->end_root) {
901 check_root_obj_specific_ref (root, *ptr, key);
904 } SGEN_HASH_TABLE_FOREACH_END;
908 need_remove_object_for_domain (char *start, MonoDomain *domain)
910 if (mono_object_domain (start) == domain) {
911 SGEN_LOG (4, "Need to cleanup object %p", start);
912 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
919 process_object_for_domain_clearing (char *start, MonoDomain *domain)
921 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
922 if (vt->klass == mono_defaults.internal_thread_class)
923 g_assert (mono_object_domain (start) == mono_get_root_domain ());
924 /* The object could be a proxy for an object in the domain
926 if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
927 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
929 /* The server could already have been zeroed out, so
930 we need to check for that, too. */
931 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
932 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
933 ((MonoRealProxy*)start)->unwrapped_server = NULL;
938 static MonoDomain *check_domain = NULL;
941 check_obj_not_in_domain (void **o)
943 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
947 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
951 check_domain = domain;
952 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
953 mword desc = root->root_desc;
955 /* The MonoDomain struct is allowed to hold
956 references to objects in its own domain. */
957 if (start_root == (void**)domain)
960 switch (desc & ROOT_DESC_TYPE_MASK) {
961 case ROOT_DESC_BITMAP:
962 desc >>= ROOT_DESC_TYPE_SHIFT;
964 if ((desc & 1) && *start_root)
965 check_obj_not_in_domain (*start_root);
970 case ROOT_DESC_COMPLEX: {
971 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
972 int bwords = (*bitmap_data) - 1;
973 void **start_run = start_root;
975 while (bwords-- > 0) {
976 gsize bmap = *bitmap_data++;
977 void **objptr = start_run;
979 if ((bmap & 1) && *objptr)
980 check_obj_not_in_domain (*objptr);
984 start_run += GC_BITS_PER_WORD;
988 case ROOT_DESC_USER: {
989 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
990 marker (start_root, check_obj_not_in_domain);
993 case ROOT_DESC_RUN_LEN:
994 g_assert_not_reached ();
996 g_assert_not_reached ();
998 } SGEN_HASH_TABLE_FOREACH_END;
1000 check_domain = NULL;
1004 check_for_xdomain_refs (void)
1008 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1009 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
1011 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
1013 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1014 scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
1018 clear_domain_process_object (char *obj, MonoDomain *domain)
1022 process_object_for_domain_clearing (obj, domain);
1023 remove = need_remove_object_for_domain (obj, domain);
1025 if (remove && ((MonoObject*)obj)->synchronisation) {
1026 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1028 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
1035 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1037 if (clear_domain_process_object (obj, domain))
1038 memset (obj, 0, size);
1042 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1044 clear_domain_process_object (obj, domain);
1048 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1050 if (need_remove_object_for_domain (obj, domain))
1051 major_collector.free_non_pinned_object (obj, size);
1055 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1057 if (need_remove_object_for_domain (obj, domain))
1058 major_collector.free_pinned_object (obj, size);
1062 * When appdomains are unloaded we can easily remove objects that have finalizers,
1063 * but all the others could still be present in random places on the heap.
1064 * We need a sweep to get rid of them even though it's going to be costly
1066 * The reason we need to remove them is because we access the vtable and class
1067 * structures to know the object size and the reference bitmap: once the domain is
1068 * unloaded the point to random memory.
1071 mono_gc_clear_domain (MonoDomain * domain)
1073 LOSObject *bigobj, *prev;
1078 if (concurrent_collection_in_progress)
1079 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
1080 g_assert (!concurrent_collection_in_progress);
1082 sgen_process_fin_stage_entries ();
1083 sgen_process_dislink_stage_entries ();
1085 sgen_clear_nursery_fragments ();
1087 if (xdomain_checks && domain != mono_get_root_domain ()) {
1088 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1089 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1090 check_for_xdomain_refs ();
1093 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1094 to memory returned to the OS.*/
1095 null_ephemerons_for_domain (domain);
1097 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1098 sgen_null_links_for_domain (domain, i);
1100 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1101 sgen_remove_finalizers_for_domain (domain, i);
1103 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1104 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1106 /* We need two passes over major and large objects because
1107 freeing such objects might give their memory back to the OS
1108 (in the case of large objects) or obliterate its vtable
1109 (pinned objects with major-copying or pinned and non-pinned
1110 objects with major-mark&sweep), but we might need to
1111 dereference a pointer from an object to another object if
1112 the first object is a proxy. */
1113 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1114 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1115 clear_domain_process_object (bigobj->data, domain);
1118 for (bigobj = los_object_list; bigobj;) {
1119 if (need_remove_object_for_domain (bigobj->data, domain)) {
1120 LOSObject *to_free = bigobj;
1122 prev->next = bigobj->next;
1124 los_object_list = bigobj->next;
1125 bigobj = bigobj->next;
1126 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
1127 sgen_los_free_object (to_free);
1131 bigobj = bigobj->next;
1133 major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1134 major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1136 if (G_UNLIKELY (do_pin_stats)) {
1137 if (domain == mono_get_root_domain ())
1138 sgen_pin_stats_print_class_stats ();
1145 * sgen_add_to_global_remset:
1147 * The global remset contains locations which point into newspace after
1148 * a minor collection. This can happen if the objects they point to are pinned.
1150 * LOCKING: If called from a parallel collector, the global remset
1151 * lock must be held. For serial collectors that is not necessary.
1154 sgen_add_to_global_remset (gpointer ptr, gpointer obj, gboolean concurrent_cementing)
1156 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
1158 if (!major_collector.is_concurrent) {
1159 SGEN_ASSERT (5, !concurrent_cementing, "Concurrent cementing must only happen with the concurrent collector");
1160 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
1162 if (current_collection_generation == -1)
1163 SGEN_ASSERT (5, concurrent_cementing, "Global remsets outside of collection pauses can only be added by the concurrent collector");
1164 if (concurrent_cementing)
1165 SGEN_ASSERT (5, concurrent_collection_in_progress, "Concurrent collection must be in process in order to add global remsets");
1168 if (!object_is_pinned (obj))
1169 SGEN_ASSERT (5, concurrent_cementing || sgen_minor_collector.is_split, "Non-pinned objects can only remain in nursery if it is a split nursery");
1170 else if (sgen_cement_lookup_or_register (obj, concurrent_cementing))
1173 remset.record_pointer (ptr);
1175 #ifdef ENABLE_DTRACE
1176 if (G_UNLIKELY (do_pin_stats))
1177 sgen_pin_stats_register_global_remset (obj);
1179 SGEN_LOG (8, "Adding global remset for %p", ptr);
1180 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
1182 HEAVY_STAT (++stat_global_remsets_added);
1184 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
1185 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
1186 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
1187 vt->klass->name_space, vt->klass->name);
1193 * sgen_drain_gray_stack:
1195 * Scan objects in the gray stack until the stack is empty. This should be called
1196 * frequently after each object is copied, to achieve better locality and cache
1200 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
1203 ScanObjectFunc scan_func = ctx.scan_func;
1204 GrayQueue *queue = ctx.queue;
1206 if (max_objs == -1) {
1208 GRAY_OBJECT_DEQUEUE (queue, obj);
1211 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1212 scan_func (obj, queue);
1218 for (i = 0; i != max_objs; ++i) {
1219 GRAY_OBJECT_DEQUEUE (queue, obj);
1222 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1223 scan_func (obj, queue);
1225 } while (max_objs < 0);
1231 * Addresses from start to end are already sorted. This function finds
1232 * the object header for each address and pins the object. The
1233 * addresses must be inside the passed section. The (start of the)
1234 * address array is overwritten with the addresses of the actually
1235 * pinned objects. Return the number of pinned objects.
1238 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
1243 void *last_obj = NULL;
1244 size_t last_obj_size = 0;
1247 void **definitely_pinned = start;
1248 ScanObjectFunc scan_func = ctx.scan_func;
1249 SgenGrayQueue *queue = ctx.queue;
1251 sgen_nursery_allocator_prepare_for_pinning ();
1253 while (start < end) {
1255 /* the range check should be reduntant */
1256 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1257 SGEN_LOG (5, "Considering pinning addr %p", addr);
1258 /* multiple pointers to the same object */
1259 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1263 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1264 g_assert (idx < section->num_scan_start);
1265 search_start = (void*)section->scan_starts [idx];
1266 if (!search_start || search_start > addr) {
1269 search_start = section->scan_starts [idx];
1270 if (search_start && search_start <= addr)
1273 if (!search_start || search_start > addr)
1274 search_start = start_nursery;
1276 if (search_start < last_obj)
1277 search_start = (char*)last_obj + last_obj_size;
1278 /* now addr should be in an object a short distance from search_start
1279 * Note that search_start must point to zeroed mem or point to an object.
1283 if (!*(void**)search_start) {
1284 /* Consistency check */
1286 for (frag = nursery_fragments; frag; frag = frag->next) {
1287 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1288 g_assert_not_reached ();
1292 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1295 last_obj = search_start;
1296 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1298 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1299 /* Marks the beginning of a nursery fragment, skip */
1301 SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
1302 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1304 scan_func (search_start, queue);
1306 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1307 search_start, *(void**)search_start, safe_name (search_start), count);
1308 binary_protocol_pin (search_start,
1309 (gpointer)LOAD_VTABLE (search_start),
1310 safe_object_get_size (search_start));
1312 #ifdef ENABLE_DTRACE
1313 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1314 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
1315 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
1316 MONO_GC_OBJ_PINNED ((mword)search_start,
1317 sgen_safe_object_get_size (search_start),
1318 vt->klass->name_space, vt->klass->name, gen);
1322 pin_object (search_start);
1323 GRAY_OBJECT_ENQUEUE (queue, search_start);
1324 if (G_UNLIKELY (do_pin_stats))
1325 sgen_pin_stats_register_object (search_start, last_obj_size);
1326 definitely_pinned [count] = search_start;
1332 /* skip to the next object */
1333 search_start = (void*)((char*)search_start + last_obj_size);
1334 } while (search_start <= addr);
1335 /* we either pinned the correct object or we ignored the addr because
1336 * it points to unused zeroed memory.
1342 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1343 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1344 GCRootReport report;
1346 for (idx = 0; idx < count; ++idx)
1347 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1348 notify_gc_roots (&report);
1350 stat_pinned_objects += count;
1355 sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
1357 int num_entries = section->pin_queue_num_entries;
1359 void **start = section->pin_queue_start;
1361 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1362 section->data, section->next_data, ctx);
1363 section->pin_queue_num_entries = reduced_to;
1365 section->pin_queue_start = NULL;
1371 sgen_pin_object (void *object, GrayQueue *queue)
1373 g_assert (!concurrent_collection_in_progress);
1375 if (sgen_collection_is_parallel ()) {
1377 /*object arrives pinned*/
1378 sgen_pin_stage_ptr (object);
1382 SGEN_PIN_OBJECT (object);
1383 sgen_pin_stage_ptr (object);
1385 if (G_UNLIKELY (do_pin_stats))
1386 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1388 GRAY_OBJECT_ENQUEUE (queue, object);
1389 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1391 #ifdef ENABLE_DTRACE
1392 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1393 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1394 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1395 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1401 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1405 gboolean major_pinned = FALSE;
1407 if (sgen_ptr_in_nursery (obj)) {
1408 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1409 sgen_pin_object (obj, queue);
1413 major_collector.pin_major_object (obj, queue);
1414 major_pinned = TRUE;
1417 vtable_word = *(mword*)obj;
1418 /*someone else forwarded it, update the pointer and bail out*/
1419 if (vtable_word & SGEN_FORWARDED_BIT) {
1420 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1424 /*someone pinned it, nothing to do.*/
1425 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1430 /* Sort the addresses in array in increasing order.
1431 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1434 sgen_sort_addresses (void **array, int size)
1439 for (i = 1; i < size; ++i) {
1442 int parent = (child - 1) / 2;
1444 if (array [parent] >= array [child])
1447 tmp = array [parent];
1448 array [parent] = array [child];
1449 array [child] = tmp;
1455 for (i = size - 1; i > 0; --i) {
1458 array [i] = array [0];
1464 while (root * 2 + 1 <= end) {
1465 int child = root * 2 + 1;
1467 if (child < end && array [child] < array [child + 1])
1469 if (array [root] >= array [child])
1473 array [root] = array [child];
1474 array [child] = tmp;
1482 * Scan the memory between start and end and queue values which could be pointers
1483 * to the area between start_nursery and end_nursery for later consideration.
1484 * Typically used for thread stacks.
1487 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1491 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1492 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1495 while (start < end) {
1496 if (*start >= start_nursery && *start < end_nursery) {
1498 * *start can point to the middle of an object
1499 * note: should we handle pointing at the end of an object?
1500 * pinning in C# code disallows pointing at the end of an object
1501 * but there is some small chance that an optimizing C compiler
1502 * may keep the only reference to an object by pointing
1503 * at the end of it. We ignore this small chance for now.
1504 * Pointers to the end of an object are indistinguishable
1505 * from pointers to the start of the next object in memory
1506 * so if we allow that we'd need to pin two objects...
1507 * We queue the pointer in an array, the
1508 * array will then be sorted and uniqued. This way
1509 * we can coalesce several pinning pointers and it should
1510 * be faster since we'd do a memory scan with increasing
1511 * addresses. Note: we can align the address to the allocation
1512 * alignment, so the unique process is more effective.
1514 mword addr = (mword)*start;
1515 addr &= ~(ALLOC_ALIGN - 1);
1516 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1517 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1518 sgen_pin_stage_ptr ((void*)addr);
1521 if (G_UNLIKELY (do_pin_stats)) {
1522 if (ptr_in_nursery ((void*)addr))
1523 sgen_pin_stats_register_address ((char*)addr, pin_type);
1529 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1533 * The first thing we do in a collection is to identify pinned objects.
1534 * This function considers all the areas of memory that need to be
1535 * conservatively scanned.
1538 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1542 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1543 /* objects pinned from the API are inside these roots */
1544 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1545 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1546 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1547 } SGEN_HASH_TABLE_FOREACH_END;
1548 /* now deal with the thread stacks
1549 * in the future we should be able to conservatively scan only:
1550 * *) the cpu registers
1551 * *) the unmanaged stack frames
1552 * *) the _last_ managed stack frame
1553 * *) pointers slots in managed frames
1555 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1559 unpin_objects_from_queue (SgenGrayQueue *queue)
1563 GRAY_OBJECT_DEQUEUE (queue, addr);
1566 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1567 SGEN_UNPIN_OBJECT (addr);
1572 CopyOrMarkObjectFunc func;
1574 } UserCopyOrMarkData;
1576 static MonoNativeTlsKey user_copy_or_mark_key;
1579 init_user_copy_or_mark_key (void)
1581 mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1585 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1587 mono_native_tls_set_value (user_copy_or_mark_key, data);
1591 single_arg_user_copy_or_mark (void **obj)
1593 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1595 data->func (obj, data->queue);
1599 * The memory area from start_root to end_root contains pointers to objects.
1600 * Their position is precisely described by @desc (this means that the pointer
1601 * can be either NULL or the pointer to the start of an object).
1602 * This functions copies them to to_space updates them.
1604 * This function is not thread-safe!
1607 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1609 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1610 SgenGrayQueue *queue = ctx.queue;
1612 switch (desc & ROOT_DESC_TYPE_MASK) {
1613 case ROOT_DESC_BITMAP:
1614 desc >>= ROOT_DESC_TYPE_SHIFT;
1616 if ((desc & 1) && *start_root) {
1617 copy_func (start_root, queue);
1618 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1619 sgen_drain_gray_stack (-1, ctx);
1625 case ROOT_DESC_COMPLEX: {
1626 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1627 int bwords = (*bitmap_data) - 1;
1628 void **start_run = start_root;
1630 while (bwords-- > 0) {
1631 gsize bmap = *bitmap_data++;
1632 void **objptr = start_run;
1634 if ((bmap & 1) && *objptr) {
1635 copy_func (objptr, queue);
1636 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1637 sgen_drain_gray_stack (-1, ctx);
1642 start_run += GC_BITS_PER_WORD;
1646 case ROOT_DESC_USER: {
1647 UserCopyOrMarkData data = { copy_func, queue };
1648 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1649 set_user_copy_or_mark_data (&data);
1650 marker (start_root, single_arg_user_copy_or_mark);
1651 set_user_copy_or_mark_data (NULL);
1654 case ROOT_DESC_RUN_LEN:
1655 g_assert_not_reached ();
1657 g_assert_not_reached ();
1662 reset_heap_boundaries (void)
1664 lowest_heap_address = ~(mword)0;
1665 highest_heap_address = 0;
1669 sgen_update_heap_boundaries (mword low, mword high)
1674 old = lowest_heap_address;
1677 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1680 old = highest_heap_address;
1683 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1687 * Allocate and setup the data structures needed to be able to allocate objects
1688 * in the nursery. The nursery is stored in nursery_section.
1691 alloc_nursery (void)
1693 GCMemSection *section;
1698 if (nursery_section)
1700 SGEN_LOG (2, "Allocating nursery size: %lu", (unsigned long)sgen_nursery_size);
1701 /* later we will alloc a larger area for the nursery but only activate
1702 * what we need. The rest will be used as expansion if we have too many pinned
1703 * objects in the existing nursery.
1705 /* FIXME: handle OOM */
1706 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1708 alloc_size = sgen_nursery_size;
1710 /* If there isn't enough space even for the nursery we should simply abort. */
1711 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1713 #ifdef SGEN_ALIGN_NURSERY
1714 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1716 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1718 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1719 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1720 section->data = section->next_data = data;
1721 section->size = alloc_size;
1722 section->end_data = data + sgen_nursery_size;
1723 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1724 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1725 section->num_scan_start = scan_starts;
1727 nursery_section = section;
1729 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1733 mono_gc_get_nursery (int *shift_bits, size_t *size)
1735 *size = sgen_nursery_size;
1736 #ifdef SGEN_ALIGN_NURSERY
1737 *shift_bits = DEFAULT_NURSERY_BITS;
1741 return sgen_get_nursery_start ();
1745 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1747 SgenThreadInfo *info = mono_thread_info_current ();
1749 /* Could be called from sgen_thread_unregister () with a NULL info */
1752 info->stopped_domain = domain;
1757 mono_gc_precise_stack_mark_enabled (void)
1759 return !conservative_stack_mark;
1763 mono_gc_get_logfile (void)
1765 return gc_debug_file;
1769 report_finalizer_roots_list (FinalizeReadyEntry *list)
1771 GCRootReport report;
1772 FinalizeReadyEntry *fin;
1775 for (fin = list; fin; fin = fin->next) {
1778 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1780 notify_gc_roots (&report);
1784 report_finalizer_roots (void)
1786 report_finalizer_roots_list (fin_ready_list);
1787 report_finalizer_roots_list (critical_fin_list);
1790 static GCRootReport *root_report;
1793 single_arg_report_root (void **obj)
1796 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1800 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1802 switch (desc & ROOT_DESC_TYPE_MASK) {
1803 case ROOT_DESC_BITMAP:
1804 desc >>= ROOT_DESC_TYPE_SHIFT;
1806 if ((desc & 1) && *start_root) {
1807 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1813 case ROOT_DESC_COMPLEX: {
1814 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1815 int bwords = (*bitmap_data) - 1;
1816 void **start_run = start_root;
1818 while (bwords-- > 0) {
1819 gsize bmap = *bitmap_data++;
1820 void **objptr = start_run;
1822 if ((bmap & 1) && *objptr) {
1823 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1828 start_run += GC_BITS_PER_WORD;
1832 case ROOT_DESC_USER: {
1833 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1834 root_report = report;
1835 marker (start_root, single_arg_report_root);
1838 case ROOT_DESC_RUN_LEN:
1839 g_assert_not_reached ();
1841 g_assert_not_reached ();
1846 report_registered_roots_by_type (int root_type)
1848 GCRootReport report;
1852 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1853 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1854 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1855 } SGEN_HASH_TABLE_FOREACH_END;
1856 notify_gc_roots (&report);
1860 report_registered_roots (void)
1862 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1863 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1867 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1869 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1870 SgenGrayQueue *queue = ctx.queue;
1871 FinalizeReadyEntry *fin;
1873 for (fin = list; fin; fin = fin->next) {
1876 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1877 copy_func (&fin->object, queue);
1882 generation_name (int generation)
1884 switch (generation) {
1885 case GENERATION_NURSERY: return "nursery";
1886 case GENERATION_OLD: return "old";
1887 default: g_assert_not_reached ();
1892 sgen_generation_name (int generation)
1894 return generation_name (generation);
1897 SgenObjectOperations *
1898 sgen_get_current_object_ops (void){
1899 return ¤t_object_ops;
1904 finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue)
1908 int done_with_ephemerons, ephemeron_rounds = 0;
1909 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1910 ScanObjectFunc scan_func = current_object_ops.scan_object;
1911 ScanCopyContext ctx = { scan_func, copy_func, queue };
1914 * We copied all the reachable objects. Now it's the time to copy
1915 * the objects that were not referenced by the roots, but by the copied objects.
1916 * we built a stack of objects pointed to by gray_start: they are
1917 * additional roots and we may add more items as we go.
1918 * We loop until gray_start == gray_objects which means no more objects have
1919 * been added. Note this is iterative: no recursion is involved.
1920 * We need to walk the LO list as well in search of marked big objects
1921 * (use a flag since this is needed only on major collections). We need to loop
1922 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1923 * To achieve better cache locality and cache usage, we drain the gray stack
1924 * frequently, after each object is copied, and just finish the work here.
1926 sgen_drain_gray_stack (-1, ctx);
1928 SGEN_LOG (2, "%s generation done", generation_name (generation));
1931 Reset bridge data, we might have lingering data from a previous collection if this is a major
1932 collection trigged by minor overflow.
1934 We must reset the gathered bridges since their original block might be evacuated due to major
1935 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1937 sgen_bridge_reset_data ();
1940 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1941 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1942 * objects that are in fact reachable.
1944 done_with_ephemerons = 0;
1946 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1947 sgen_drain_gray_stack (-1, ctx);
1949 } while (!done_with_ephemerons);
1951 sgen_scan_togglerefs (start_addr, end_addr, ctx);
1952 if (generation == GENERATION_OLD)
1953 sgen_scan_togglerefs (sgen_get_nursery_start (), sgen_get_nursery_end (), ctx);
1955 if (sgen_need_bridge_processing ()) {
1956 sgen_collect_bridge_objects (generation, ctx);
1957 if (generation == GENERATION_OLD)
1958 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1962 Make sure we drain the gray stack before processing disappearing links and finalizers.
1963 If we don't make sure it is empty we might wrongly see a live object as dead.
1965 sgen_drain_gray_stack (-1, ctx);
1968 We must clear weak links that don't track resurrection before processing object ready for
1969 finalization so they can be cleared before that.
1971 sgen_null_link_in_range (generation, TRUE, ctx);
1972 if (generation == GENERATION_OLD)
1973 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1976 /* walk the finalization queue and move also the objects that need to be
1977 * finalized: use the finalized objects as new roots so the objects they depend
1978 * on are also not reclaimed. As with the roots above, only objects in the nursery
1979 * are marked/copied.
1981 sgen_finalize_in_range (generation, ctx);
1982 if (generation == GENERATION_OLD)
1983 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1984 /* drain the new stack that might have been created */
1985 SGEN_LOG (6, "Precise scan of gray area post fin");
1986 sgen_drain_gray_stack (-1, ctx);
1989 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1991 done_with_ephemerons = 0;
1993 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1994 sgen_drain_gray_stack (-1, ctx);
1996 } while (!done_with_ephemerons);
1999 * Clear ephemeron pairs with unreachable keys.
2000 * We pass the copy func so we can figure out if an array was promoted or not.
2002 clear_unreachable_ephemerons (generation == GENERATION_OLD && major_collector.is_concurrent, ctx);
2005 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
2008 * handle disappearing links
2009 * Note we do this after checking the finalization queue because if an object
2010 * survives (at least long enough to be finalized) we don't clear the link.
2011 * This also deals with a possible issue with the monitor reclamation: with the Boehm
2012 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2015 g_assert (sgen_gray_object_queue_is_empty (queue));
2017 sgen_null_link_in_range (generation, FALSE, ctx);
2018 if (generation == GENERATION_OLD)
2019 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
2020 if (sgen_gray_object_queue_is_empty (queue))
2022 sgen_drain_gray_stack (-1, ctx);
2025 g_assert (sgen_gray_object_queue_is_empty (queue));
2029 sgen_check_section_scan_starts (GCMemSection *section)
2032 for (i = 0; i < section->num_scan_start; ++i) {
2033 if (section->scan_starts [i]) {
2034 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
2035 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
2041 check_scan_starts (void)
2043 if (!do_scan_starts_check)
2045 sgen_check_section_scan_starts (nursery_section);
2046 major_collector.check_scan_starts ();
2050 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
2054 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
2055 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
2056 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
2057 } SGEN_HASH_TABLE_FOREACH_END;
2061 sgen_dump_occupied (char *start, char *end, char *section_start)
2063 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2067 sgen_dump_section (GCMemSection *section, const char *type)
2069 char *start = section->data;
2070 char *end = section->data + section->size;
2071 char *occ_start = NULL;
2073 char *old_start = NULL; /* just for debugging */
2075 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2077 while (start < end) {
2081 if (!*(void**)start) {
2083 sgen_dump_occupied (occ_start, start, section->data);
2086 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2089 g_assert (start < section->next_data);
2094 vt = (GCVTable*)LOAD_VTABLE (start);
2097 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2100 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2101 start - section->data,
2102 vt->klass->name_space, vt->klass->name,
2110 sgen_dump_occupied (occ_start, start, section->data);
2112 fprintf (heap_dump_file, "</section>\n");
2116 dump_object (MonoObject *obj, gboolean dump_location)
2118 static char class_name [1024];
2120 MonoClass *class = mono_object_class (obj);
2124 * Python's XML parser is too stupid to parse angle brackets
2125 * in strings, so we just ignore them;
2128 while (class->name [i] && j < sizeof (class_name) - 1) {
2129 if (!strchr ("<>\"", class->name [i]))
2130 class_name [j++] = class->name [i];
2133 g_assert (j < sizeof (class_name));
2136 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2137 class->name_space, class_name,
2138 safe_object_get_size (obj));
2139 if (dump_location) {
2140 const char *location;
2141 if (ptr_in_nursery (obj))
2142 location = "nursery";
2143 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2147 fprintf (heap_dump_file, " location=\"%s\"", location);
2149 fprintf (heap_dump_file, "/>\n");
2153 dump_heap (const char *type, int num, const char *reason)
2158 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2160 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2161 fprintf (heap_dump_file, ">\n");
2162 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2163 sgen_dump_internal_mem_usage (heap_dump_file);
2164 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2165 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2166 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2168 fprintf (heap_dump_file, "<pinned-objects>\n");
2169 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2170 dump_object (list->obj, TRUE);
2171 fprintf (heap_dump_file, "</pinned-objects>\n");
2173 sgen_dump_section (nursery_section, "nursery");
2175 major_collector.dump_heap (heap_dump_file);
2177 fprintf (heap_dump_file, "<los>\n");
2178 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2179 dump_object ((MonoObject*)bigobj->data, FALSE);
2180 fprintf (heap_dump_file, "</los>\n");
2182 fprintf (heap_dump_file, "</collection>\n");
2186 sgen_register_moved_object (void *obj, void *destination)
2188 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2190 /* FIXME: handle this for parallel collector */
2191 g_assert (!sgen_collection_is_parallel ());
2193 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2194 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2195 moved_objects_idx = 0;
2197 moved_objects [moved_objects_idx++] = obj;
2198 moved_objects [moved_objects_idx++] = destination;
2204 static gboolean inited = FALSE;
2209 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2210 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2211 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2212 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2213 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2214 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2215 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2216 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2218 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2219 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2220 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2221 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2222 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2223 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2224 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2225 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2226 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2227 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2228 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2229 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2230 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2232 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2234 #ifdef HEAVY_STATISTICS
2235 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2236 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2237 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2238 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2239 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2240 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2241 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2243 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2244 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2246 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2247 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2248 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2249 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2251 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2252 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2254 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2256 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2257 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2258 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2259 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2261 sgen_nursery_allocator_init_heavy_stats ();
2262 sgen_alloc_init_heavy_stats ();
2270 reset_pinned_from_failed_allocation (void)
2272 bytes_pinned_from_failed_allocation = 0;
2276 sgen_set_pinned_from_failed_allocation (mword objsize)
2278 bytes_pinned_from_failed_allocation += objsize;
2282 sgen_collection_is_parallel (void)
2284 switch (current_collection_generation) {
2285 case GENERATION_NURSERY:
2286 return nursery_collection_is_parallel;
2287 case GENERATION_OLD:
2288 return major_collector.is_parallel;
2290 g_error ("Invalid current generation %d", current_collection_generation);
2295 sgen_collection_is_concurrent (void)
2297 switch (current_collection_generation) {
2298 case GENERATION_NURSERY:
2300 case GENERATION_OLD:
2301 return major_collector.is_concurrent;
2303 g_error ("Invalid current generation %d", current_collection_generation);
2308 sgen_concurrent_collection_in_progress (void)
2310 return concurrent_collection_in_progress;
2317 } FinishRememberedSetScanJobData;
2320 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2322 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2324 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2325 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2330 CopyOrMarkObjectFunc copy_or_mark_func;
2331 ScanObjectFunc scan_func;
2335 } ScanFromRegisteredRootsJobData;
2338 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2340 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2341 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2342 sgen_workers_get_job_gray_queue (worker_data) };
2344 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2345 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2352 } ScanThreadDataJobData;
2355 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2357 ScanThreadDataJobData *job_data = job_data_untyped;
2359 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2360 sgen_workers_get_job_gray_queue (worker_data));
2361 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2366 FinalizeReadyEntry *list;
2367 } ScanFinalizerEntriesJobData;
2370 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2372 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2373 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2375 scan_finalizer_entries (job_data->list, ctx);
2376 sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2380 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2382 g_assert (concurrent_collection_in_progress);
2383 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2387 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2389 g_assert (concurrent_collection_in_progress);
2390 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2394 verify_scan_starts (char *start, char *end)
2398 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2399 char *addr = nursery_section->scan_starts [i];
2400 if (addr > start && addr < end)
2401 SGEN_LOG (1, "NFC-BAD SCAN START [%d] %p for obj [%p %p]", i, addr, start, end);
2406 verify_nursery (void)
2408 char *start, *end, *cur, *hole_start;
2410 if (!do_verify_nursery)
2413 /*This cleans up unused fragments */
2414 sgen_nursery_allocator_prepare_for_pinning ();
2416 hole_start = start = cur = sgen_get_nursery_start ();
2417 end = sgen_get_nursery_end ();
2422 if (!*(void**)cur) {
2423 cur += sizeof (void*);
2427 if (object_is_forwarded (cur))
2428 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2429 else if (object_is_pinned (cur))
2430 SGEN_LOG (1, "PINNED OBJ %p", cur);
2432 ss = safe_object_get_size ((MonoObject*)cur);
2433 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2434 verify_scan_starts (cur, cur + size);
2435 if (do_dump_nursery_content) {
2436 if (cur > hole_start)
2437 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2438 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2446 * Checks that no objects in the nursery are fowarded or pinned. This
2447 * is a precondition to restarting the mutator while doing a
2448 * concurrent collection. Note that we don't clear fragments because
2449 * we depend on that having happened earlier.
2452 check_nursery_is_clean (void)
2454 char *start, *end, *cur;
2456 start = cur = sgen_get_nursery_start ();
2457 end = sgen_get_nursery_end ();
2462 if (!*(void**)cur) {
2463 cur += sizeof (void*);
2467 g_assert (!object_is_forwarded (cur));
2468 g_assert (!object_is_pinned (cur));
2470 ss = safe_object_get_size ((MonoObject*)cur);
2471 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2472 verify_scan_starts (cur, cur + size);
2479 init_gray_queue (void)
2481 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2482 sgen_workers_init_distribute_gray_queue ();
2483 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2484 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2486 sgen_gray_object_queue_init (&gray_queue, NULL);
2489 if (major_collector.is_concurrent) {
2490 sgen_gray_object_queue_init_with_alloc_prepare (&remember_major_objects_gray_queue, NULL,
2491 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2493 sgen_gray_object_queue_init_invalid (&remember_major_objects_gray_queue);
2498 pin_stage_object_callback (char *obj, size_t size, void *data)
2500 sgen_pin_stage_ptr (obj);
2501 /* FIXME: do pin stats if enabled */
2505 * Collect objects in the nursery. Returns whether to trigger a major
2509 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2511 gboolean needs_major;
2512 size_t max_garbage_amount;
2514 FinishRememberedSetScanJobData *frssjd;
2515 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2516 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2517 ScanThreadDataJobData *stdjd;
2518 mword fragment_total;
2519 ScanCopyContext ctx;
2520 TV_DECLARE (all_atv);
2521 TV_DECLARE (all_btv);
2525 if (disable_minor_collections)
2528 MONO_GC_BEGIN (GENERATION_NURSERY);
2529 binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
2533 #ifndef DISABLE_PERFCOUNTERS
2534 mono_perfcounters->gc_collections0++;
2537 current_collection_generation = GENERATION_NURSERY;
2538 if (sgen_collection_is_parallel ())
2539 current_object_ops = sgen_minor_collector.parallel_ops;
2541 current_object_ops = sgen_minor_collector.serial_ops;
2543 reset_pinned_from_failed_allocation ();
2545 check_scan_starts ();
2547 sgen_nursery_alloc_prepare_for_minor ();
2551 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2552 /* FIXME: optimize later to use the higher address where an object can be present */
2553 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2555 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2556 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2557 g_assert (nursery_section->size >= max_garbage_amount);
2559 /* world must be stopped already */
2560 TV_GETTIME (all_atv);
2564 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2566 if (xdomain_checks) {
2567 sgen_clear_nursery_fragments ();
2568 check_for_xdomain_refs ();
2571 nursery_section->next_data = nursery_next;
2573 major_collector.start_nursery_collection ();
2575 sgen_memgov_minor_collection_start ();
2580 gc_stats.minor_gc_count ++;
2582 if (remset.prepare_for_minor_collection)
2583 remset.prepare_for_minor_collection ();
2585 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2587 sgen_process_fin_stage_entries ();
2588 sgen_process_dislink_stage_entries ();
2590 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2592 /* pin from pinned handles */
2593 sgen_init_pinning ();
2594 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2595 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2596 /* pin cemented objects */
2597 sgen_cement_iterate (pin_stage_object_callback, NULL);
2598 /* identify pinned objects */
2599 sgen_optimize_pin_queue (0);
2600 sgen_pinning_setup_section (nursery_section);
2601 ctx.scan_func = NULL;
2602 ctx.copy_func = NULL;
2603 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2604 sgen_pin_objects_in_section (nursery_section, ctx);
2605 sgen_pinning_trim_queue_to_section (nursery_section);
2608 time_minor_pinning += TV_ELAPSED (btv, atv);
2609 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2610 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2612 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2614 if (whole_heap_check_before_collection) {
2615 sgen_clear_nursery_fragments ();
2616 sgen_check_whole_heap (finish_up_concurrent_mark);
2618 if (consistency_check_at_minor_collection)
2619 sgen_check_consistency ();
2621 sgen_workers_start_all_workers ();
2624 * Perform the sequential part of remembered set scanning.
2625 * This usually involves scanning global information that might later be produced by evacuation.
2627 if (remset.begin_scan_remsets)
2628 remset.begin_scan_remsets (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2630 sgen_workers_start_marking ();
2632 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2633 frssjd->heap_start = sgen_get_nursery_start ();
2634 frssjd->heap_end = nursery_next;
2635 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2637 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2639 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2640 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2642 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2644 if (!sgen_collection_is_parallel ()) {
2645 ctx.scan_func = current_object_ops.scan_object;
2646 ctx.copy_func = NULL;
2647 ctx.queue = &gray_queue;
2648 sgen_drain_gray_stack (-1, ctx);
2651 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2652 report_registered_roots ();
2653 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2654 report_finalizer_roots ();
2656 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2658 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2660 /* registered roots, this includes static fields */
2661 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2662 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2663 scrrjd_normal->scan_func = current_object_ops.scan_object;
2664 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2665 scrrjd_normal->heap_end = nursery_next;
2666 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2667 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2669 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2670 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2671 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2672 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2673 scrrjd_wbarrier->heap_end = nursery_next;
2674 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2675 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2678 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2680 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2683 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2684 stdjd->heap_start = sgen_get_nursery_start ();
2685 stdjd->heap_end = nursery_next;
2686 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2689 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2692 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2694 g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2696 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2697 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2699 /* Scan the list of objects ready for finalization. If */
2700 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2701 sfejd_fin_ready->list = fin_ready_list;
2702 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2704 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2705 sfejd_critical_fin->list = critical_fin_list;
2706 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2708 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2710 finish_gray_stack (sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
2712 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2713 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2715 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2718 * The (single-threaded) finalization code might have done
2719 * some copying/marking so we can only reset the GC thread's
2720 * worker data here instead of earlier when we joined the
2723 sgen_workers_reset_data ();
2725 if (objects_pinned) {
2726 sgen_optimize_pin_queue (0);
2727 sgen_pinning_setup_section (nursery_section);
2730 /* walk the pin_queue, build up the fragment list of free memory, unmark
2731 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2734 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2735 fragment_total = sgen_build_nursery_fragments (nursery_section,
2736 nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2738 if (!fragment_total)
2741 /* Clear TLABs for all threads */
2742 sgen_clear_tlabs ();
2744 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2746 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2747 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2749 if (consistency_check_at_minor_collection)
2750 sgen_check_major_refs ();
2752 major_collector.finish_nursery_collection ();
2754 TV_GETTIME (all_btv);
2755 gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2758 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2760 /* prepare the pin queue for the next collection */
2761 sgen_finish_pinning ();
2762 if (fin_ready_list || critical_fin_list) {
2763 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2764 mono_gc_finalize_notify ();
2766 sgen_pin_stats_reset ();
2767 /* clear cemented hash */
2768 sgen_cement_clear_below_threshold ();
2770 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2772 if (remset.finish_minor_collection)
2773 remset.finish_minor_collection ();
2775 check_scan_starts ();
2777 binary_protocol_flush_buffers (FALSE);
2779 sgen_memgov_minor_collection_end ();
2781 /*objects are late pinned because of lack of memory, so a major is a good call*/
2782 needs_major = objects_pinned > 0;
2783 current_collection_generation = -1;
2786 MONO_GC_END (GENERATION_NURSERY);
2787 binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
2789 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2790 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2796 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2798 ctx->scan_func (obj, ctx->queue);
2802 scan_nursery_objects (ScanCopyContext ctx)
2804 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2805 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2809 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2814 /* FIXME: only use these values for the precise scan
2815 * note that to_space pointers should be excluded anyway...
2817 char *heap_start = NULL;
2818 char *heap_end = (char*)-1;
2819 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2820 GCRootReport root_report = { 0 };
2821 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2822 ScanThreadDataJobData *stdjd;
2823 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2824 ScanCopyContext ctx;
2826 if (major_collector.is_concurrent) {
2827 /*This cleans up unused fragments */
2828 sgen_nursery_allocator_prepare_for_pinning ();
2830 if (do_concurrent_checks)
2831 check_nursery_is_clean ();
2833 /* The concurrent collector doesn't touch the nursery. */
2834 sgen_nursery_alloc_prepare_for_major ();
2841 /* Pinning depends on this */
2842 sgen_clear_nursery_fragments ();
2844 if (whole_heap_check_before_collection)
2845 sgen_check_whole_heap (finish_up_concurrent_mark);
2847 if (!major_collector.is_concurrent)
2848 sgen_cement_reset ();
2851 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2853 if (!sgen_collection_is_concurrent ())
2854 nursery_section->next_data = sgen_get_nursery_end ();
2855 /* we should also coalesce scanning from sections close to each other
2856 * and deal with pointers outside of the sections later.
2860 *major_collector.have_swept = FALSE;
2862 if (xdomain_checks) {
2863 sgen_clear_nursery_fragments ();
2864 check_for_xdomain_refs ();
2867 if (!major_collector.is_concurrent) {
2868 /* Remsets are not useful for a major collection */
2869 remset.prepare_for_major_collection ();
2872 sgen_process_fin_stage_entries ();
2873 sgen_process_dislink_stage_entries ();
2876 sgen_init_pinning ();
2877 SGEN_LOG (6, "Collecting pinned addresses");
2878 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2879 sgen_optimize_pin_queue (0);
2882 * The concurrent collector doesn't move objects, neither on
2883 * the major heap nor in the nursery, so we can mark even
2884 * before pinning has finished. For the non-concurrent
2885 * collector we start the workers after pinning.
2887 if (major_collector.is_concurrent) {
2888 sgen_workers_start_all_workers ();
2889 sgen_workers_start_marking ();
2893 * pin_queue now contains all candidate pointers, sorted and
2894 * uniqued. We must do two passes now to figure out which
2895 * objects are pinned.
2897 * The first is to find within the pin_queue the area for each
2898 * section. This requires that the pin_queue be sorted. We
2899 * also process the LOS objects and pinned chunks here.
2901 * The second, destructive, pass is to reduce the section
2902 * areas to pointers to the actually pinned objects.
2904 SGEN_LOG (6, "Pinning from sections");
2905 /* first pass for the sections */
2906 sgen_find_section_pin_queue_start_end (nursery_section);
2907 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2908 /* identify possible pointers to the insize of large objects */
2909 SGEN_LOG (6, "Pinning from large objects");
2910 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2912 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2913 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2915 #ifdef ENABLE_DTRACE
2916 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2917 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2918 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2922 if (sgen_los_object_is_pinned (bigobj->data)) {
2923 g_assert (finish_up_concurrent_mark);
2926 sgen_los_pin_object (bigobj->data);
2927 /* FIXME: only enqueue if object has references */
2928 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2929 if (G_UNLIKELY (do_pin_stats))
2930 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2931 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2934 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2938 notify_gc_roots (&root_report);
2939 /* second pass for the sections */
2940 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2941 ctx.copy_func = NULL;
2942 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2945 * Concurrent mark never follows references into the nursery.
2946 * In the start and finish pauses we must scan live nursery
2947 * objects, though. We could simply scan all nursery objects,
2948 * but that would be conservative. The easiest way is to do a
2949 * nursery collection, which copies all live nursery objects
2950 * (except pinned ones, with the simple nursery) to the major
2951 * heap. Scanning the mod union table later will then scan
2952 * those promoted objects, provided they're reachable. Pinned
2953 * objects in the nursery - which we can trivially find in the
2954 * pinning queue - are treated as roots in the mark pauses.
2956 * The split nursery complicates the latter part because
2957 * non-pinned objects can survive in the nursery. That's why
2958 * we need to do a full front-to-back scan of the nursery,
2959 * marking all objects.
2961 * Non-concurrent mark evacuates from the nursery, so it's
2962 * sufficient to just scan pinned nursery objects.
2964 if (major_collector.is_concurrent && sgen_minor_collector.is_split) {
2965 scan_nursery_objects (ctx);
2967 sgen_pin_objects_in_section (nursery_section, ctx);
2968 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2969 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2972 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2973 if (old_next_pin_slot)
2974 *old_next_pin_slot = sgen_get_pinned_count ();
2977 time_major_pinning += TV_ELAPSED (atv, btv);
2978 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2979 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2981 major_collector.init_to_space ();
2983 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2984 main_gc_thread = mono_native_thread_self ();
2987 if (!major_collector.is_concurrent) {
2988 sgen_workers_start_all_workers ();
2989 sgen_workers_start_marking ();
2992 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2993 report_registered_roots ();
2995 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2997 /* registered roots, this includes static fields */
2998 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2999 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
3000 scrrjd_normal->scan_func = current_object_ops.scan_object;
3001 scrrjd_normal->heap_start = heap_start;
3002 scrrjd_normal->heap_end = heap_end;
3003 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
3004 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
3006 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3007 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
3008 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
3009 scrrjd_wbarrier->heap_start = heap_start;
3010 scrrjd_wbarrier->heap_end = heap_end;
3011 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
3012 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
3015 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
3018 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3019 stdjd->heap_start = heap_start;
3020 stdjd->heap_end = heap_end;
3021 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
3024 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
3027 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
3029 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
3030 report_finalizer_roots ();
3032 /* scan the list of objects ready for finalization */
3033 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3034 sfejd_fin_ready->list = fin_ready_list;
3035 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
3037 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3038 sfejd_critical_fin->list = critical_fin_list;
3039 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
3041 if (scan_mod_union) {
3042 g_assert (finish_up_concurrent_mark);
3044 /* Mod union card table */
3045 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
3046 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
3050 time_major_scan_finalized += TV_ELAPSED (btv, atv);
3051 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
3054 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
3056 if (major_collector.is_concurrent) {
3057 /* prepare the pin queue for the next collection */
3058 sgen_finish_pinning ();
3060 sgen_pin_stats_reset ();
3062 if (do_concurrent_checks)
3063 check_nursery_is_clean ();
3068 major_start_collection (int *old_next_pin_slot)
3070 MONO_GC_BEGIN (GENERATION_OLD);
3071 binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
3073 current_collection_generation = GENERATION_OLD;
3074 #ifndef DISABLE_PERFCOUNTERS
3075 mono_perfcounters->gc_collections1++;
3078 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3080 if (major_collector.is_concurrent) {
3081 concurrent_collection_in_progress = TRUE;
3083 sgen_cement_concurrent_start ();
3086 current_object_ops = major_collector.major_ops;
3088 reset_pinned_from_failed_allocation ();
3090 sgen_memgov_major_collection_start ();
3092 //count_ref_nonref_objs ();
3093 //consistency_check ();
3095 check_scan_starts ();
3098 SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
3100 gc_stats.major_gc_count ++;
3102 if (major_collector.start_major_collection)
3103 major_collector.start_major_collection ();
3105 major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
3109 wait_for_workers_to_finish (void)
3111 g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
3113 if (major_collector.is_parallel || major_collector.is_concurrent) {
3114 gray_queue_redirect (&gray_queue);
3115 sgen_workers_join ();
3118 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3120 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
3121 main_gc_thread = NULL;
3126 major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
3128 LOSObject *bigobj, *prevbo;
3131 char *heap_start = NULL;
3132 char *heap_end = (char*)-1;
3136 if (major_collector.is_concurrent || major_collector.is_parallel)
3137 wait_for_workers_to_finish ();
3139 current_object_ops = major_collector.major_ops;
3141 if (major_collector.is_concurrent) {
3142 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
3143 wait_for_workers_to_finish ();
3145 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3147 if (do_concurrent_checks)
3148 check_nursery_is_clean ();
3152 * The workers have stopped so we need to finish gray queue
3153 * work that might result from finalization in the main GC
3154 * thread. Redirection must therefore be turned off.
3156 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
3157 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3159 /* all the objects in the heap */
3160 finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
3162 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
3165 * The (single-threaded) finalization code might have done
3166 * some copying/marking so we can only reset the GC thread's
3167 * worker data here instead of earlier when we joined the
3170 sgen_workers_reset_data ();
3172 if (objects_pinned) {
3173 g_assert (!major_collector.is_concurrent);
3175 /*This is slow, but we just OOM'd*/
3176 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
3177 sgen_optimize_pin_queue (0);
3178 sgen_find_section_pin_queue_start_end (nursery_section);
3182 reset_heap_boundaries ();
3183 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
3185 if (check_mark_bits_after_major_collection)
3186 sgen_check_major_heap_marked ();
3188 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
3190 /* sweep the big objects list */
3192 for (bigobj = los_object_list; bigobj;) {
3193 g_assert (!object_is_pinned (bigobj->data));
3194 if (sgen_los_object_is_pinned (bigobj->data)) {
3195 sgen_los_unpin_object (bigobj->data);
3196 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
3199 /* not referenced anywhere, so we can free it */
3201 prevbo->next = bigobj->next;
3203 los_object_list = bigobj->next;
3205 bigobj = bigobj->next;
3206 sgen_los_free_object (to_free);
3210 bigobj = bigobj->next;
3214 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
3219 time_major_los_sweep += TV_ELAPSED (btv, atv);
3221 major_collector.sweep ();
3223 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
3226 time_major_sweep += TV_ELAPSED (atv, btv);
3228 if (!major_collector.is_concurrent) {
3229 /* walk the pin_queue, build up the fragment list of free memory, unmark
3230 * pinned objects as we go, memzero() the empty fragments so they are ready for the
3233 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
3236 /* prepare the pin queue for the next collection */
3237 sgen_finish_pinning ();
3239 /* Clear TLABs for all threads */
3240 sgen_clear_tlabs ();
3242 sgen_pin_stats_reset ();
3245 if (major_collector.is_concurrent)
3246 sgen_cement_concurrent_finish ();
3247 sgen_cement_clear_below_threshold ();
3250 time_major_fragment_creation += TV_ELAPSED (btv, atv);
3253 dump_heap ("major", stat_major_gcs - 1, reason);
3255 if (fin_ready_list || critical_fin_list) {
3256 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
3257 mono_gc_finalize_notify ();
3260 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3262 sgen_memgov_major_collection_end ();
3263 current_collection_generation = -1;
3265 major_collector.finish_major_collection ();
3267 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3269 if (major_collector.is_concurrent)
3270 concurrent_collection_in_progress = FALSE;
3272 check_scan_starts ();
3274 binary_protocol_flush_buffers (FALSE);
3276 //consistency_check ();
3278 MONO_GC_END (GENERATION_OLD);
3279 binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
3283 major_do_collection (const char *reason)
3285 TV_DECLARE (all_atv);
3286 TV_DECLARE (all_btv);
3287 int old_next_pin_slot;
3289 if (major_collector.get_and_reset_num_major_objects_marked) {
3290 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3291 g_assert (!num_marked);
3294 /* world must be stopped already */
3295 TV_GETTIME (all_atv);
3297 major_start_collection (&old_next_pin_slot);
3298 major_finish_collection (reason, old_next_pin_slot, FALSE);
3300 TV_GETTIME (all_btv);
3301 gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3303 /* FIXME: also report this to the user, preferably in gc-end. */
3304 if (major_collector.get_and_reset_num_major_objects_marked)
3305 major_collector.get_and_reset_num_major_objects_marked ();
3307 return bytes_pinned_from_failed_allocation > 0;
3310 static gboolean major_do_collection (const char *reason);
3313 major_start_concurrent_collection (const char *reason)
3315 long long num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3317 g_assert (num_objects_marked == 0);
3319 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3321 // FIXME: store reason and pass it when finishing
3322 major_start_collection (NULL);
3324 gray_queue_redirect (&gray_queue);
3325 sgen_workers_wait_for_jobs ();
3327 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3328 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3330 current_collection_generation = -1;
3334 major_update_or_finish_concurrent_collection (gboolean force_finish)
3336 SgenGrayQueue unpin_queue;
3337 memset (&unpin_queue, 0, sizeof (unpin_queue));
3339 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3341 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3342 if (!have_non_collection_major_object_remembers)
3343 g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
3345 major_collector.update_cardtable_mod_union ();
3346 sgen_los_update_cardtable_mod_union ();
3348 if (!force_finish && !sgen_workers_all_done ()) {
3349 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3353 collect_nursery (&unpin_queue, TRUE);
3354 redirect_major_object_remembers ();
3356 current_collection_generation = GENERATION_OLD;
3357 major_finish_collection ("finishing", -1, TRUE);
3359 if (whole_heap_check_before_collection)
3360 sgen_check_whole_heap (FALSE);
3362 unpin_objects_from_queue (&unpin_queue);
3363 sgen_gray_object_queue_deinit (&unpin_queue);
3365 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3367 current_collection_generation = -1;
3373 * Ensure an allocation request for @size will succeed by freeing enough memory.
3375 * LOCKING: The GC lock MUST be held.
3378 sgen_ensure_free_space (size_t size)
3380 int generation_to_collect = -1;
3381 const char *reason = NULL;
3384 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3385 if (sgen_need_major_collection (size)) {
3386 reason = "LOS overflow";
3387 generation_to_collect = GENERATION_OLD;
3390 if (degraded_mode) {
3391 if (sgen_need_major_collection (size)) {
3392 reason = "Degraded mode overflow";
3393 generation_to_collect = GENERATION_OLD;
3395 } else if (sgen_need_major_collection (size)) {
3396 reason = "Minor allowance";
3397 generation_to_collect = GENERATION_OLD;
3399 generation_to_collect = GENERATION_NURSERY;
3400 reason = "Nursery full";
3404 if (generation_to_collect == -1) {
3405 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3406 generation_to_collect = GENERATION_OLD;
3407 reason = "Finish concurrent collection";
3411 if (generation_to_collect == -1)
3413 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3417 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3419 TV_DECLARE (gc_end);
3420 GGTimingInfo infos [2];
3421 int overflow_generation_to_collect = -1;
3422 int oldest_generation_collected = generation_to_collect;
3423 const char *overflow_reason = NULL;
3425 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3427 g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3429 if (have_non_collection_major_object_remembers) {
3430 g_assert (concurrent_collection_in_progress);
3431 redirect_major_object_remembers ();
3434 memset (infos, 0, sizeof (infos));
3435 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3437 infos [0].generation = generation_to_collect;
3438 infos [0].reason = reason;
3439 infos [0].is_overflow = FALSE;
3440 TV_GETTIME (infos [0].total_time);
3441 infos [1].generation = -1;
3443 sgen_stop_world (generation_to_collect);
3445 if (concurrent_collection_in_progress) {
3446 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3447 oldest_generation_collected = GENERATION_OLD;
3450 if (generation_to_collect == GENERATION_OLD)
3454 //FIXME extract overflow reason
3455 if (generation_to_collect == GENERATION_NURSERY) {
3456 if (collect_nursery (NULL, FALSE)) {
3457 overflow_generation_to_collect = GENERATION_OLD;
3458 overflow_reason = "Minor overflow";
3460 if (concurrent_collection_in_progress) {
3461 redirect_major_object_remembers ();
3462 sgen_workers_wake_up_all ();
3465 SgenGrayQueue unpin_queue;
3466 SgenGrayQueue *unpin_queue_ptr;
3467 memset (&unpin_queue, 0, sizeof (unpin_queue));
3469 if (major_collector.is_concurrent && wait_to_finish)
3470 unpin_queue_ptr = &unpin_queue;
3472 unpin_queue_ptr = NULL;
3474 if (major_collector.is_concurrent) {
3475 g_assert (!concurrent_collection_in_progress);
3476 collect_nursery (unpin_queue_ptr, FALSE);
3479 if (major_collector.is_concurrent && !wait_to_finish) {
3480 major_start_concurrent_collection (reason);
3481 // FIXME: set infos[0] properly
3484 if (major_do_collection (reason)) {
3485 overflow_generation_to_collect = GENERATION_NURSERY;
3486 overflow_reason = "Excessive pinning";
3490 if (unpin_queue_ptr) {
3491 unpin_objects_from_queue (unpin_queue_ptr);
3492 sgen_gray_object_queue_deinit (unpin_queue_ptr);
3496 TV_GETTIME (gc_end);
3497 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3500 if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3501 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3502 infos [1].generation = overflow_generation_to_collect;
3503 infos [1].reason = overflow_reason;
3504 infos [1].is_overflow = TRUE;
3505 infos [1].total_time = gc_end;
3507 if (overflow_generation_to_collect == GENERATION_NURSERY)
3508 collect_nursery (NULL, FALSE);
3510 major_do_collection (overflow_reason);
3512 TV_GETTIME (gc_end);
3513 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3515 /* keep events symmetric */
3516 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3518 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3521 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3523 /* this also sets the proper pointers for the next allocation */
3524 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3525 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3526 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3527 sgen_dump_pin_queue ();
3532 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3533 g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
3535 sgen_restart_world (oldest_generation_collected, infos);
3537 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3541 * ######################################################################
3542 * ######## Memory allocation from the OS
3543 * ######################################################################
3544 * This section of code deals with getting memory from the OS and
3545 * allocating memory for GC-internal data structures.
3546 * Internal memory can be handled with a freelist for small objects.
3552 G_GNUC_UNUSED static void
3553 report_internal_mem_usage (void)
3555 printf ("Internal memory usage:\n");
3556 sgen_report_internal_mem_usage ();
3557 printf ("Pinned memory usage:\n");
3558 major_collector.report_pinned_memory_usage ();
3562 * ######################################################################
3563 * ######## Finalization support
3564 * ######################################################################
3567 static inline gboolean
3568 sgen_major_is_object_alive (void *object)
3572 /* Oldgen objects can be pinned and forwarded too */
3573 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3577 * FIXME: major_collector.is_object_live() also calculates the
3578 * size. Avoid the double calculation.
3580 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3581 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3582 return sgen_los_object_is_pinned (object);
3584 return major_collector.is_object_live (object);
3588 * If the object has been forwarded it means it's still referenced from a root.
3589 * If it is pinned it's still alive as well.
3590 * A LOS object is only alive if we have pinned it.
3591 * Return TRUE if @obj is ready to be finalized.
3593 static inline gboolean
3594 sgen_is_object_alive (void *object)
3596 if (ptr_in_nursery (object))
3597 return sgen_nursery_is_object_alive (object);
3599 return sgen_major_is_object_alive (object);
3603 * This function returns true if @object is either alive or it belongs to the old gen
3604 * and we're currently doing a minor collection.
3607 sgen_is_object_alive_for_current_gen (char *object)
3609 if (ptr_in_nursery (object))
3610 return sgen_nursery_is_object_alive (object);
3612 if (current_collection_generation == GENERATION_NURSERY)
3615 return sgen_major_is_object_alive (object);
3619 * This function returns true if @object is either alive and belongs to the
3620 * current collection - major collections are full heap, so old gen objects
3621 * are never alive during a minor collection.
3624 sgen_is_object_alive_and_on_current_collection (char *object)
3626 if (ptr_in_nursery (object))
3627 return sgen_nursery_is_object_alive (object);
3629 if (current_collection_generation == GENERATION_NURSERY)
3632 return sgen_major_is_object_alive (object);
3637 sgen_gc_is_object_ready_for_finalization (void *object)
3639 return !sgen_is_object_alive (object);
3643 has_critical_finalizer (MonoObject *obj)
3647 if (!mono_defaults.critical_finalizer_object)
3650 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3652 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3656 sgen_queue_finalization_entry (MonoObject *obj)
3658 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3659 gboolean critical = has_critical_finalizer (obj);
3660 entry->object = obj;
3662 entry->next = critical_fin_list;
3663 critical_fin_list = entry;
3665 entry->next = fin_ready_list;
3666 fin_ready_list = entry;
3669 #ifdef ENABLE_DTRACE
3670 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3671 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3672 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3673 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3674 vt->klass->name_space, vt->klass->name, gen, critical);
3680 sgen_object_is_live (void *obj)
3682 return sgen_is_object_alive_and_on_current_collection (obj);
3685 /* LOCKING: requires that the GC lock is held */
3687 null_ephemerons_for_domain (MonoDomain *domain)
3689 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3692 MonoObject *object = (MonoObject*)current->array;
3694 if (object && !object->vtable) {
3695 EphemeronLinkNode *tmp = current;
3698 prev->next = current->next;
3700 ephemeron_list = current->next;
3702 current = current->next;
3703 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3706 current = current->next;
3711 /* LOCKING: requires that the GC lock is held */
3713 clear_unreachable_ephemerons (gboolean concurrent_cementing, ScanCopyContext ctx)
3715 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3716 GrayQueue *queue = ctx.queue;
3717 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3719 Ephemeron *cur, *array_end;
3723 char *object = current->array;
3725 if (!sgen_is_object_alive_for_current_gen (object)) {
3726 EphemeronLinkNode *tmp = current;
3728 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3731 prev->next = current->next;
3733 ephemeron_list = current->next;
3735 current = current->next;
3736 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3741 copy_func ((void**)&object, queue);
3742 current->array = object;
3744 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3746 array = (MonoArray*)object;
3747 cur = mono_array_addr (array, Ephemeron, 0);
3748 array_end = cur + mono_array_length_fast (array);
3749 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3751 for (; cur < array_end; ++cur) {
3752 char *key = (char*)cur->key;
3754 if (!key || key == tombstone)
3757 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3758 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3759 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3761 if (!sgen_is_object_alive_for_current_gen (key)) {
3762 cur->key = tombstone;
3768 current = current->next;
3773 LOCKING: requires that the GC lock is held
3775 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3778 mark_ephemerons_in_range (ScanCopyContext ctx)
3780 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3781 GrayQueue *queue = ctx.queue;
3782 int nothing_marked = 1;
3783 EphemeronLinkNode *current = ephemeron_list;
3785 Ephemeron *cur, *array_end;
3788 for (current = ephemeron_list; current; current = current->next) {
3789 char *object = current->array;
3790 SGEN_LOG (5, "Ephemeron array at %p", object);
3792 /*It has to be alive*/
3793 if (!sgen_is_object_alive_for_current_gen (object)) {
3794 SGEN_LOG (5, "\tnot reachable");
3798 copy_func ((void**)&object, queue);
3800 array = (MonoArray*)object;
3801 cur = mono_array_addr (array, Ephemeron, 0);
3802 array_end = cur + mono_array_length_fast (array);
3803 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3805 for (; cur < array_end; ++cur) {
3806 char *key = cur->key;
3808 if (!key || key == tombstone)
3811 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3812 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3813 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3815 if (sgen_is_object_alive_for_current_gen (key)) {
3816 char *value = cur->value;
3818 copy_func ((void**)&cur->key, queue);
3820 if (!sgen_is_object_alive_for_current_gen (value))
3822 copy_func ((void**)&cur->value, queue);
3828 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3829 return nothing_marked;
3833 mono_gc_invoke_finalizers (void)
3835 FinalizeReadyEntry *entry = NULL;
3836 gboolean entry_is_critical = FALSE;
3839 /* FIXME: batch to reduce lock contention */
3840 while (fin_ready_list || critical_fin_list) {
3844 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3846 /* We have finalized entry in the last
3847 interation, now we need to remove it from
3850 *list = entry->next;
3852 FinalizeReadyEntry *e = *list;
3853 while (e->next != entry)
3855 e->next = entry->next;
3857 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3861 /* Now look for the first non-null entry. */
3862 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3865 entry_is_critical = FALSE;
3867 entry_is_critical = TRUE;
3868 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3873 g_assert (entry->object);
3874 num_ready_finalizers--;
3875 obj = entry->object;
3876 entry->object = NULL;
3877 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3885 g_assert (entry->object == NULL);
3887 /* the object is on the stack so it is pinned */
3888 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3889 mono_gc_run_finalize (obj, NULL);
3896 mono_gc_pending_finalizers (void)
3898 return fin_ready_list || critical_fin_list;
3902 * ######################################################################
3903 * ######## registered roots support
3904 * ######################################################################
3908 * We do not coalesce roots.
3911 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3913 RootRecord new_root;
3916 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3917 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3918 /* we allow changing the size and the descriptor (for thread statics etc) */
3920 size_t old_size = root->end_root - start;
3921 root->end_root = start + size;
3922 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3923 ((root->root_desc == 0) && (descr == NULL)));
3924 root->root_desc = (mword)descr;
3926 roots_size -= old_size;
3932 new_root.end_root = start + size;
3933 new_root.root_desc = (mword)descr;
3935 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3938 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3945 mono_gc_register_root (char *start, size_t size, void *descr)
3947 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3951 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3953 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3957 mono_gc_deregister_root (char* addr)
3963 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3964 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3965 roots_size -= (root.end_root - addr);
3971 * ######################################################################
3972 * ######## Thread handling (stop/start code)
3973 * ######################################################################
3976 unsigned int sgen_global_stop_count = 0;
3979 sgen_fill_thread_info_for_suspend (SgenThreadInfo *info)
3981 if (remset.fill_thread_info_for_suspend)
3982 remset.fill_thread_info_for_suspend (info);
3986 sgen_get_current_collection_generation (void)
3988 return current_collection_generation;
3992 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3994 gc_callbacks = *callbacks;
3998 mono_gc_get_gc_callbacks ()
4000 return &gc_callbacks;
4003 /* Variables holding start/end nursery so it won't have to be passed at every call */
4004 static void *scan_area_arg_start, *scan_area_arg_end;
4007 mono_gc_conservatively_scan_area (void *start, void *end)
4009 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
4013 mono_gc_scan_object (void *obj)
4015 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
4016 current_object_ops.copy_or_mark_object (&obj, data->queue);
4021 * Mark from thread stacks and registers.
4024 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
4026 SgenThreadInfo *info;
4028 scan_area_arg_start = start_nursery;
4029 scan_area_arg_end = end_nursery;
4031 FOREACH_THREAD (info) {
4033 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
4036 if (info->gc_disabled) {
4037 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
4041 if (!info->joined_stw) {
4042 SGEN_LOG (3, "Skipping thread not seen in STW %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
4046 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
4047 if (!info->thread_is_dying) {
4048 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
4049 UserCopyOrMarkData data = { NULL, queue };
4050 set_user_copy_or_mark_data (&data);
4051 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
4052 set_user_copy_or_mark_data (NULL);
4053 } else if (!precise) {
4054 if (!conservative_stack_mark) {
4055 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
4056 conservative_stack_mark = TRUE;
4058 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
4062 if (!info->thread_is_dying && !precise) {
4064 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
4065 start_nursery, end_nursery, PIN_TYPE_STACK);
4067 conservatively_pin_objects_from (&info->regs, &info->regs + ARCH_NUM_REGS,
4068 start_nursery, end_nursery, PIN_TYPE_STACK);
4071 } END_FOREACH_THREAD
4075 ptr_on_stack (void *ptr)
4077 gpointer stack_start = &stack_start;
4078 SgenThreadInfo *info = mono_thread_info_current ();
4080 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
4086 sgen_thread_register (SgenThreadInfo* info, void *addr)
4088 #ifndef HAVE_KW_THREAD
4089 SgenThreadInfo *__thread_info__ = info;
4093 #ifndef HAVE_KW_THREAD
4094 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
4096 g_assert (!mono_native_tls_get_value (thread_info_key));
4097 mono_native_tls_set_value (thread_info_key, info);
4099 sgen_thread_info = info;
4102 #if !defined(__MACH__)
4103 info->stop_count = -1;
4107 info->joined_stw = FALSE;
4108 info->doing_handshake = FALSE;
4109 info->thread_is_dying = FALSE;
4110 info->stack_start = NULL;
4111 info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
4112 info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
4113 info->stopped_ip = NULL;
4114 info->stopped_domain = NULL;
4116 memset (&info->ctx, 0, sizeof (MonoContext));
4118 memset (&info->regs, 0, sizeof (info->regs));
4121 sgen_init_tlab_info (info);
4123 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
4125 #ifdef HAVE_KW_THREAD
4126 store_remset_buffer_index_addr = &store_remset_buffer_index;
4129 /* try to get it with attributes first */
4130 #if (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
4134 pthread_attr_t attr;
4136 #if defined(HAVE_PTHREAD_GETATTR_NP)
4138 pthread_getattr_np (pthread_self (), &attr);
4139 #elif defined(HAVE_PTHREAD_ATTR_GET_NP)
4141 pthread_attr_init (&attr);
4142 pthread_attr_get_np (pthread_self (), &attr);
4144 #error Cannot determine which API is needed to retrieve pthread attributes.
4147 pthread_attr_getstack (&attr, &sstart, &size);
4148 info->stack_start_limit = sstart;
4149 info->stack_end = (char*)sstart + size;
4150 pthread_attr_destroy (&attr);
4152 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
4153 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
4154 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
4157 /* FIXME: we assume the stack grows down */
4158 gsize stack_bottom = (gsize)addr;
4159 stack_bottom += 4095;
4160 stack_bottom &= ~4095;
4161 info->stack_end = (char*)stack_bottom;
4165 #ifdef HAVE_KW_THREAD
4166 stack_end = info->stack_end;
4169 if (remset.register_thread)
4170 remset.register_thread (info);
4172 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
4174 if (gc_callbacks.thread_attach_func)
4175 info->runtime_data = gc_callbacks.thread_attach_func ();
4182 sgen_wbarrier_cleanup_thread (SgenThreadInfo *p)
4184 if (remset.cleanup_thread)
4185 remset.cleanup_thread (p);
4189 sgen_thread_unregister (SgenThreadInfo *p)
4191 /* If a delegate is passed to native code and invoked on a thread we dont
4192 * know about, the jit will register it with mono_jit_thread_attach, but
4193 * we have no way of knowing when that thread goes away. SGen has a TSD
4194 * so we assume that if the domain is still registered, we can detach
4197 if (mono_domain_get ())
4198 mono_thread_detach (mono_thread_current ());
4200 p->thread_is_dying = TRUE;
4203 There is a race condition between a thread finishing executing and been removed
4204 from the GC thread set.
4205 This happens on posix systems when TLS data is been cleaned-up, libpthread will
4206 set the thread_info slot to NULL before calling the cleanup function. This
4207 opens a window in which the thread is registered but has a NULL TLS.
4209 The suspend signal handler needs TLS data to know where to store thread state
4210 data or otherwise it will simply ignore the thread.
4212 This solution works because the thread doing STW will wait until all threads been
4213 suspended handshake back, so there is no race between the doing_hankshake test
4214 and the suspend_thread call.
4216 This is not required on systems that do synchronous STW as those can deal with
4217 the above race at suspend time.
4219 FIXME: I believe we could avoid this by using mono_thread_info_lookup when
4220 mono_thread_info_current returns NULL. Or fix mono_thread_info_lookup to do so.
4222 #if (defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED) || !defined(HAVE_PTHREAD_KILL)
4225 while (!TRYLOCK_GC) {
4226 if (!sgen_park_current_thread_if_doing_handshake (p))
4232 binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
4233 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)mono_thread_info_get_tid (p));
4235 if (gc_callbacks.thread_detach_func) {
4236 gc_callbacks.thread_detach_func (p->runtime_data);
4237 p->runtime_data = NULL;
4239 sgen_wbarrier_cleanup_thread (p);
4241 mono_threads_unregister_current_thread (p);
4247 sgen_thread_attach (SgenThreadInfo *info)
4250 /*this is odd, can we get attached before the gc is inited?*/
4254 if (gc_callbacks.thread_attach_func && !info->runtime_data)
4255 info->runtime_data = gc_callbacks.thread_attach_func ();
4258 mono_gc_register_thread (void *baseptr)
4260 return mono_thread_info_attach (baseptr) != NULL;
4264 * mono_gc_set_stack_end:
4266 * Set the end of the current threads stack to STACK_END. The stack space between
4267 * STACK_END and the real end of the threads stack will not be scanned during collections.
4270 mono_gc_set_stack_end (void *stack_end)
4272 SgenThreadInfo *info;
4275 info = mono_thread_info_current ();
4277 g_assert (stack_end < info->stack_end);
4278 info->stack_end = stack_end;
4283 #if USE_PTHREAD_INTERCEPT
4287 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4289 return pthread_create (new_thread, attr, start_routine, arg);
4293 mono_gc_pthread_join (pthread_t thread, void **retval)
4295 return pthread_join (thread, retval);
4299 mono_gc_pthread_detach (pthread_t thread)
4301 return pthread_detach (thread);
4305 mono_gc_pthread_exit (void *retval)
4307 pthread_exit (retval);
4310 #endif /* USE_PTHREAD_INTERCEPT */
4313 * ######################################################################
4314 * ######## Write barriers
4315 * ######################################################################
4319 * Note: the write barriers first do the needed GC work and then do the actual store:
4320 * this way the value is visible to the conservative GC scan after the write barrier
4321 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4322 * the conservative scan, otherwise by the remembered set scan.
4325 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4327 HEAVY_STAT (++stat_wbarrier_set_field);
4328 if (ptr_in_nursery (field_ptr)) {
4329 *(void**)field_ptr = value;
4332 SGEN_LOG (8, "Adding remset at %p", field_ptr);
4334 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4336 remset.wbarrier_set_field (obj, field_ptr, value);
4340 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4342 HEAVY_STAT (++stat_wbarrier_set_arrayref);
4343 if (ptr_in_nursery (slot_ptr)) {
4344 *(void**)slot_ptr = value;
4347 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4349 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4351 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4355 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4357 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4358 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4359 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4360 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
4364 #ifdef SGEN_BINARY_PROTOCOL
4367 for (i = 0; i < count; ++i) {
4368 gpointer dest = (gpointer*)dest_ptr + i;
4369 gpointer obj = *((gpointer*)src_ptr + i);
4371 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4376 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4379 static char *found_obj;
4382 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4384 char *ptr = user_data;
4386 if (ptr >= obj && ptr < obj + size) {
4387 g_assert (!found_obj);
4392 /* for use in the debugger */
4393 char* find_object_for_ptr (char *ptr);
4395 find_object_for_ptr (char *ptr)
4397 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4399 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4400 find_object_for_ptr_callback, ptr, TRUE);
4406 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4411 * Very inefficient, but this is debugging code, supposed to
4412 * be called from gdb, so we don't care.
4415 major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4420 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4424 HEAVY_STAT (++stat_wbarrier_generic_store);
4426 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4427 /* FIXME: ptr_in_heap must be called with the GC lock held */
4428 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4429 char *start = find_object_for_ptr (ptr);
4430 MonoObject *value = *(MonoObject**)ptr;
4434 MonoObject *obj = (MonoObject*)start;
4435 if (obj->vtable->domain != value->vtable->domain)
4436 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4442 obj = *(gpointer*)ptr;
4444 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4446 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4447 SGEN_LOG (8, "Skipping remset at %p", ptr);
4452 * We need to record old->old pointer locations for the
4453 * concurrent collector.
4455 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4456 SGEN_LOG (8, "Skipping remset at %p", ptr);
4460 SGEN_LOG (8, "Adding remset at %p", ptr);
4462 remset.wbarrier_generic_nostore (ptr);
4466 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4468 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4469 *(void**)ptr = value;
4470 if (ptr_in_nursery (value))
4471 mono_gc_wbarrier_generic_nostore (ptr);
4472 sgen_dummy_use (value);
4475 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4477 mword *dest = _dest;
4482 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4487 size -= SIZEOF_VOID_P;
4492 #ifdef SGEN_BINARY_PROTOCOL
4494 #define HANDLE_PTR(ptr,obj) do { \
4495 gpointer o = *(gpointer*)(ptr); \
4497 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4498 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4503 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4505 #define SCAN_OBJECT_NOVTABLE
4506 #include "sgen-scan-object.h"
4511 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4513 HEAVY_STAT (++stat_wbarrier_value_copy);
4514 g_assert (klass->valuetype);
4516 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4518 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4519 size_t element_size = mono_class_value_size (klass, NULL);
4520 size_t size = count * element_size;
4521 mono_gc_memmove (dest, src, size);
4525 #ifdef SGEN_BINARY_PROTOCOL
4527 size_t element_size = mono_class_value_size (klass, NULL);
4529 for (i = 0; i < count; ++i) {
4530 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4531 (char*)src + i * element_size - sizeof (MonoObject),
4532 (mword) klass->gc_descr);
4537 remset.wbarrier_value_copy (dest, src, count, klass);
4541 * mono_gc_wbarrier_object_copy:
4543 * Write barrier to call when obj is the result of a clone or copy of an object.
4546 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4550 HEAVY_STAT (++stat_wbarrier_object_copy);
4552 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4553 size = mono_object_class (obj)->instance_size;
4554 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4555 size - sizeof (MonoObject));
4559 #ifdef SGEN_BINARY_PROTOCOL
4560 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4563 remset.wbarrier_object_copy (obj, src);
4568 * ######################################################################
4569 * ######## Other mono public interface functions.
4570 * ######################################################################
4573 #define REFS_SIZE 128
4576 MonoGCReferences callback;
4580 MonoObject *refs [REFS_SIZE];
4581 uintptr_t offsets [REFS_SIZE];
4585 #define HANDLE_PTR(ptr,obj) do { \
4587 if (hwi->count == REFS_SIZE) { \
4588 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4592 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4593 hwi->refs [hwi->count++] = *(ptr); \
4598 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4600 #include "sgen-scan-object.h"
4604 walk_references (char *start, size_t size, void *data)
4606 HeapWalkInfo *hwi = data;
4609 collect_references (hwi, start, size);
4610 if (hwi->count || !hwi->called)
4611 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4615 * mono_gc_walk_heap:
4616 * @flags: flags for future use
4617 * @callback: a function pointer called for each object in the heap
4618 * @data: a user data pointer that is passed to callback
4620 * This function can be used to iterate over all the live objects in the heap:
4621 * for each object, @callback is invoked, providing info about the object's
4622 * location in memory, its class, its size and the objects it references.
4623 * For each referenced object it's offset from the object address is
4624 * reported in the offsets array.
4625 * The object references may be buffered, so the callback may be invoked
4626 * multiple times for the same object: in all but the first call, the size
4627 * argument will be zero.
4628 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4629 * profiler event handler.
4631 * Returns: a non-zero value if the GC doesn't support heap walking
4634 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4639 hwi.callback = callback;
4642 sgen_clear_nursery_fragments ();
4643 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4645 major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4646 sgen_los_iterate_objects (walk_references, &hwi);
4652 mono_gc_collect (int generation)
4657 sgen_perform_collection (0, generation, "user request", TRUE);
4662 mono_gc_max_generation (void)
4668 mono_gc_collection_count (int generation)
4670 if (generation == 0)
4671 return stat_minor_gcs;
4672 return stat_major_gcs;
4676 mono_gc_get_used_size (void)
4680 tot = los_memory_usage;
4681 tot += nursery_section->next_data - nursery_section->data;
4682 tot += major_collector.get_used_size ();
4683 /* FIXME: account for pinned objects */
4689 mono_gc_disable (void)
4697 mono_gc_enable (void)
4705 mono_gc_get_los_limit (void)
4707 return MAX_SMALL_OBJ_SIZE;
4711 mono_gc_user_markers_supported (void)
4717 mono_object_is_alive (MonoObject* o)
4723 mono_gc_get_generation (MonoObject *obj)
4725 if (ptr_in_nursery (obj))
4731 mono_gc_enable_events (void)
4736 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4738 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4742 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4744 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4748 mono_gc_weak_link_get (void **link_addr)
4751 * We must only load *link_addr once because it might change
4752 * under our feet, and REVEAL_POINTER (NULL) results in an
4753 * invalid reference.
4755 void *ptr = *link_addr;
4760 * During the second bridge processing step the world is
4761 * running again. That step processes all weak links once
4762 * more to null those that refer to dead objects. Before that
4763 * is completed, those links must not be followed, so we
4764 * conservatively wait for bridge processing when any weak
4765 * link is dereferenced.
4767 if (G_UNLIKELY (bridge_processing_in_progress))
4768 mono_gc_wait_for_bridge_processing ();
4770 return (MonoObject*) REVEAL_POINTER (ptr);
4774 mono_gc_ephemeron_array_add (MonoObject *obj)
4776 EphemeronLinkNode *node;
4780 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4785 node->array = (char*)obj;
4786 node->next = ephemeron_list;
4787 ephemeron_list = node;
4789 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4796 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4800 result = func (data);
4801 UNLOCK_INTERRUPTION;
4806 mono_gc_is_gc_thread (void)
4810 result = mono_thread_info_current () != NULL;
4816 is_critical_method (MonoMethod *method)
4818 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4822 mono_gc_base_init (void)
4824 MonoThreadInfoCallbacks cb;
4827 char *major_collector_opt = NULL;
4828 char *minor_collector_opt = NULL;
4830 glong soft_limit = 0;
4834 gboolean debug_print_allowance = FALSE;
4835 double allowance_ratio = 0, save_target = 0;
4836 gboolean have_split_nursery = FALSE;
4837 gboolean cement_enabled = TRUE;
4840 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4843 /* already inited */
4846 /* being inited by another thread */
4850 /* we will init it */
4853 g_assert_not_reached ();
4855 } while (result != 0);
4857 LOCK_INIT (gc_mutex);
4859 pagesize = mono_pagesize ();
4860 gc_debug_file = stderr;
4862 cb.thread_register = sgen_thread_register;
4863 cb.thread_unregister = sgen_thread_unregister;
4864 cb.thread_attach = sgen_thread_attach;
4865 cb.mono_method_is_critical = (gpointer)is_critical_method;
4867 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4870 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4872 LOCK_INIT (sgen_interruption_mutex);
4873 LOCK_INIT (pin_queue_mutex);
4875 init_user_copy_or_mark_key ();
4877 if ((env = getenv ("MONO_GC_PARAMS"))) {
4878 opts = g_strsplit (env, ",", -1);
4879 for (ptr = opts; *ptr; ++ptr) {
4881 if (g_str_has_prefix (opt, "major=")) {
4882 opt = strchr (opt, '=') + 1;
4883 major_collector_opt = g_strdup (opt);
4884 } else if (g_str_has_prefix (opt, "minor=")) {
4885 opt = strchr (opt, '=') + 1;
4886 minor_collector_opt = g_strdup (opt);
4894 sgen_init_internal_allocator ();
4895 sgen_init_nursery_allocator ();
4897 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4898 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4899 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4900 g_assert (sizeof (GenericStoreRememberedSet) == sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
4901 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
4902 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4904 #ifndef HAVE_KW_THREAD
4905 mono_native_tls_alloc (&thread_info_key, NULL);
4909 * This needs to happen before any internal allocations because
4910 * it inits the small id which is required for hazard pointer
4915 mono_thread_info_attach (&dummy);
4917 if (!minor_collector_opt) {
4918 sgen_simple_nursery_init (&sgen_minor_collector);
4920 if (!strcmp (minor_collector_opt, "simple")) {
4921 sgen_simple_nursery_init (&sgen_minor_collector);
4922 } else if (!strcmp (minor_collector_opt, "split")) {
4923 sgen_split_nursery_init (&sgen_minor_collector);
4924 have_split_nursery = TRUE;
4926 fprintf (stderr, "Unknown minor collector `%s'.\n", minor_collector_opt);
4931 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4932 sgen_marksweep_init (&major_collector);
4933 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4934 sgen_marksweep_fixed_init (&major_collector);
4935 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4936 sgen_marksweep_par_init (&major_collector);
4937 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4938 sgen_marksweep_fixed_par_init (&major_collector);
4939 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4940 sgen_marksweep_conc_init (&major_collector);
4942 fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
4946 #ifdef SGEN_HAVE_CARDTABLE
4947 use_cardtable = major_collector.supports_cardtable;
4949 use_cardtable = FALSE;
4952 num_workers = mono_cpu_count ();
4953 g_assert (num_workers > 0);
4954 if (num_workers > 16)
4957 ///* Keep this the default for now */
4958 /* Precise marking is broken on all supported targets. Disable until fixed. */
4959 conservative_stack_mark = TRUE;
4961 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4964 for (ptr = opts; *ptr; ++ptr) {
4966 if (g_str_has_prefix (opt, "major="))
4968 if (g_str_has_prefix (opt, "minor="))
4970 if (g_str_has_prefix (opt, "wbarrier=")) {
4971 opt = strchr (opt, '=') + 1;
4972 if (strcmp (opt, "remset") == 0) {
4973 if (major_collector.is_concurrent) {
4974 fprintf (stderr, "The concurrent collector does not support the SSB write barrier.\n");
4977 use_cardtable = FALSE;
4978 } else if (strcmp (opt, "cardtable") == 0) {
4979 if (!use_cardtable) {
4980 if (major_collector.supports_cardtable)
4981 fprintf (stderr, "The cardtable write barrier is not supported on this platform.\n");
4983 fprintf (stderr, "The major collector does not support the cardtable write barrier.\n");
4987 fprintf (stderr, "wbarrier must either be `remset' or `cardtable'.");
4992 if (g_str_has_prefix (opt, "max-heap-size=")) {
4993 opt = strchr (opt, '=') + 1;
4994 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) {
4995 if ((max_heap & (mono_pagesize () - 1))) {
4996 fprintf (stderr, "max-heap-size size must be a multiple of %d.\n", mono_pagesize ());
5000 fprintf (stderr, "max-heap-size must be an integer.\n");
5005 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
5006 opt = strchr (opt, '=') + 1;
5007 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
5008 if (soft_limit <= 0) {
5009 fprintf (stderr, "soft-heap-limit must be positive.\n");
5013 fprintf (stderr, "soft-heap-limit must be an integer.\n");
5018 if (g_str_has_prefix (opt, "workers=")) {
5021 if (!major_collector.is_parallel) {
5022 fprintf (stderr, "The workers= option can only be used for parallel collectors.");
5025 opt = strchr (opt, '=') + 1;
5026 val = strtol (opt, &endptr, 10);
5027 if (!*opt || *endptr) {
5028 fprintf (stderr, "Cannot parse the workers= option value.");
5031 if (val <= 0 || val > 16) {
5032 fprintf (stderr, "The number of workers must be in the range 1 to 16.");
5035 num_workers = (int)val;
5038 if (g_str_has_prefix (opt, "stack-mark=")) {
5039 opt = strchr (opt, '=') + 1;
5040 if (!strcmp (opt, "precise")) {
5041 conservative_stack_mark = FALSE;
5042 } else if (!strcmp (opt, "conservative")) {
5043 conservative_stack_mark = TRUE;
5045 fprintf (stderr, "Invalid value '%s' for stack-mark= option, possible values are: 'precise', 'conservative'.\n", opt);
5050 if (g_str_has_prefix (opt, "bridge=")) {
5051 opt = strchr (opt, '=') + 1;
5052 sgen_register_test_bridge_callbacks (g_strdup (opt));
5056 if (g_str_has_prefix (opt, "nursery-size=")) {
5058 opt = strchr (opt, '=') + 1;
5059 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
5060 sgen_nursery_size = val;
5061 #ifdef SGEN_ALIGN_NURSERY
5062 if ((val & (val - 1))) {
5063 fprintf (stderr, "The nursery size must be a power of two.\n");
5067 if (val < SGEN_MAX_NURSERY_WASTE) {
5068 fprintf (stderr, "The nursery size must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
5072 sgen_nursery_bits = 0;
5073 while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
5077 fprintf (stderr, "nursery-size must be an integer.\n");
5083 if (g_str_has_prefix (opt, "save-target-ratio=")) {
5085 opt = strchr (opt, '=') + 1;
5086 save_target = strtod (opt, &endptr);
5087 if (endptr == opt) {
5088 fprintf (stderr, "save-target-ratio must be a number.");
5091 if (save_target < SGEN_MIN_SAVE_TARGET_RATIO || save_target > SGEN_MAX_SAVE_TARGET_RATIO) {
5092 fprintf (stderr, "save-target-ratio must be between %.2f - %.2f.", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5097 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
5099 opt = strchr (opt, '=') + 1;
5101 allowance_ratio = strtod (opt, &endptr);
5102 if (endptr == opt) {
5103 fprintf (stderr, "save-target-ratio must be a number.");
5106 if (allowance_ratio < SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO || allowance_ratio > SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO) {
5107 fprintf (stderr, "default-allowance-ratio must be between %.2f - %.2f.", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO);
5113 if (!strcmp (opt, "cementing")) {
5114 cement_enabled = TRUE;
5117 if (!strcmp (opt, "no-cementing")) {
5118 cement_enabled = FALSE;
5122 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
5125 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
5128 fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
5129 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5130 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
5131 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5132 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
5133 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
5134 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
5135 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
5136 fprintf (stderr, " [no-]cementing\n");
5137 if (major_collector.print_gc_param_usage)
5138 major_collector.print_gc_param_usage ();
5139 if (sgen_minor_collector.print_gc_param_usage)
5140 sgen_minor_collector.print_gc_param_usage ();
5141 fprintf (stderr, " Experimental options:\n");
5142 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5143 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
5149 if (major_collector.is_parallel)
5150 sgen_workers_init (num_workers);
5151 else if (major_collector.is_concurrent)
5152 sgen_workers_init (1);
5154 if (major_collector_opt)
5155 g_free (major_collector_opt);
5157 if (minor_collector_opt)
5158 g_free (minor_collector_opt);
5162 sgen_cement_init (cement_enabled);
5164 if ((env = getenv ("MONO_GC_DEBUG"))) {
5165 opts = g_strsplit (env, ",", -1);
5166 for (ptr = opts; ptr && *ptr; ptr ++) {
5168 if (opt [0] >= '0' && opt [0] <= '9') {
5169 gc_debug_level = atoi (opt);
5175 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
5177 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
5179 gc_debug_file = fopen (rf, "wb");
5181 gc_debug_file = stderr;
5184 } else if (!strcmp (opt, "print-allowance")) {
5185 debug_print_allowance = TRUE;
5186 } else if (!strcmp (opt, "print-pinning")) {
5187 do_pin_stats = TRUE;
5188 } else if (!strcmp (opt, "verify-before-allocs")) {
5189 verify_before_allocs = 1;
5190 has_per_allocation_action = TRUE;
5191 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
5192 char *arg = strchr (opt, '=') + 1;
5193 verify_before_allocs = atoi (arg);
5194 has_per_allocation_action = TRUE;
5195 } else if (!strcmp (opt, "collect-before-allocs")) {
5196 collect_before_allocs = 1;
5197 has_per_allocation_action = TRUE;
5198 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
5199 char *arg = strchr (opt, '=') + 1;
5200 has_per_allocation_action = TRUE;
5201 collect_before_allocs = atoi (arg);
5202 } else if (!strcmp (opt, "verify-before-collections")) {
5203 whole_heap_check_before_collection = TRUE;
5204 } else if (!strcmp (opt, "check-at-minor-collections")) {
5205 consistency_check_at_minor_collection = TRUE;
5206 nursery_clear_policy = CLEAR_AT_GC;
5207 } else if (!strcmp (opt, "check-mark-bits")) {
5208 check_mark_bits_after_major_collection = TRUE;
5209 } else if (!strcmp (opt, "check-nursery-pinned")) {
5210 check_nursery_objects_pinned = TRUE;
5211 } else if (!strcmp (opt, "xdomain-checks")) {
5212 xdomain_checks = TRUE;
5213 } else if (!strcmp (opt, "clear-at-gc")) {
5214 nursery_clear_policy = CLEAR_AT_GC;
5215 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5216 nursery_clear_policy = CLEAR_AT_GC;
5217 } else if (!strcmp (opt, "check-scan-starts")) {
5218 do_scan_starts_check = TRUE;
5219 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5220 do_verify_nursery = TRUE;
5221 } else if (!strcmp (opt, "check-concurrent")) {
5222 if (!major_collector.is_concurrent) {
5223 fprintf (stderr, "Error: check-concurrent only world with concurrent major collectors.\n");
5226 do_concurrent_checks = TRUE;
5227 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5228 do_dump_nursery_content = TRUE;
5229 } else if (!strcmp (opt, "no-managed-allocator")) {
5230 sgen_set_use_managed_allocator (FALSE);
5231 } else if (!strcmp (opt, "disable-minor")) {
5232 disable_minor_collections = TRUE;
5233 } else if (!strcmp (opt, "disable-major")) {
5234 disable_major_collections = TRUE;
5235 } else if (g_str_has_prefix (opt, "heap-dump=")) {
5236 char *filename = strchr (opt, '=') + 1;
5237 nursery_clear_policy = CLEAR_AT_GC;
5238 heap_dump_file = fopen (filename, "w");
5239 if (heap_dump_file) {
5240 fprintf (heap_dump_file, "<sgen-dump>\n");
5241 do_pin_stats = TRUE;
5243 #ifdef SGEN_BINARY_PROTOCOL
5244 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5245 char *filename = strchr (opt, '=') + 1;
5246 binary_protocol_init (filename);
5248 fprintf (stderr, "Warning: Cardtable write barriers will not be binary-protocolled.\n");
5251 fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
5252 fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
5253 fprintf (stderr, "Valid options are:\n");
5254 fprintf (stderr, " collect-before-allocs[=<n>]\n");
5255 fprintf (stderr, " verify-before-allocs[=<n>]\n");
5256 fprintf (stderr, " check-at-minor-collections\n");
5257 fprintf (stderr, " check-mark-bits\n");
5258 fprintf (stderr, " check-nursery-pinned\n");
5259 fprintf (stderr, " verify-before-collections\n");
5260 fprintf (stderr, " verify-nursery-at-minor-gc\n");
5261 fprintf (stderr, " dump-nursery-at-minor-gc\n");
5262 fprintf (stderr, " disable-minor\n");
5263 fprintf (stderr, " disable-major\n");
5264 fprintf (stderr, " xdomain-checks\n");
5265 fprintf (stderr, " check-concurrent\n");
5266 fprintf (stderr, " clear-at-gc\n");
5267 fprintf (stderr, " clear-nursery-at-gc\n");
5268 fprintf (stderr, " check-scan-starts\n");
5269 fprintf (stderr, " no-managed-allocator\n");
5270 fprintf (stderr, " print-allowance\n");
5271 fprintf (stderr, " print-pinning\n");
5272 fprintf (stderr, " heap-dump=<filename>\n");
5273 #ifdef SGEN_BINARY_PROTOCOL
5274 fprintf (stderr, " binary-protocol=<filename>\n");
5282 if (major_collector.is_parallel) {
5283 if (heap_dump_file) {
5284 fprintf (stderr, "Error: Cannot do heap dump with the parallel collector.\n");
5288 fprintf (stderr, "Error: Cannot gather pinning statistics with the parallel collector.\n");
5293 if (major_collector.post_param_init)
5294 major_collector.post_param_init (&major_collector);
5296 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5298 memset (&remset, 0, sizeof (remset));
5300 #ifdef SGEN_HAVE_CARDTABLE
5302 sgen_card_table_init (&remset);
5305 sgen_ssb_init (&remset);
5307 if (remset.register_thread)
5308 remset.register_thread (mono_thread_info_current ());
5314 mono_gc_get_gc_name (void)
5319 static MonoMethod *write_barrier_method;
5322 sgen_is_critical_method (MonoMethod *method)
5324 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5328 sgen_has_critical_method (void)
5330 return write_barrier_method || sgen_has_managed_allocator ();
5334 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5336 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5337 #ifdef SGEN_ALIGN_NURSERY
5338 // if (ptr_in_nursery (ptr)) return;
5340 * Masking out the bits might be faster, but we would have to use 64 bit
5341 * immediates, which might be slower.
5343 mono_mb_emit_ldarg (mb, 0);
5344 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5345 mono_mb_emit_byte (mb, CEE_SHR_UN);
5346 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5347 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5349 if (!major_collector.is_concurrent) {
5350 // if (!ptr_in_nursery (*ptr)) return;
5351 mono_mb_emit_ldarg (mb, 0);
5352 mono_mb_emit_byte (mb, CEE_LDIND_I);
5353 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5354 mono_mb_emit_byte (mb, CEE_SHR_UN);
5355 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5356 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5359 int label_continue1, label_continue2;
5360 int dereferenced_var;
5362 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5363 mono_mb_emit_ldarg (mb, 0);
5364 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5365 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5367 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5368 mono_mb_emit_ldarg (mb, 0);
5369 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5370 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5373 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5376 mono_mb_patch_branch (mb, label_continue_1);
5377 mono_mb_patch_branch (mb, label_continue_2);
5379 // Dereference and store in local var
5380 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5381 mono_mb_emit_ldarg (mb, 0);
5382 mono_mb_emit_byte (mb, CEE_LDIND_I);
5383 mono_mb_emit_stloc (mb, dereferenced_var);
5385 if (!major_collector.is_concurrent) {
5386 // if (*ptr < sgen_get_nursery_start ()) return;
5387 mono_mb_emit_ldloc (mb, dereferenced_var);
5388 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5389 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5391 // if (*ptr >= sgen_get_nursery_end ()) return;
5392 mono_mb_emit_ldloc (mb, dereferenced_var);
5393 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5394 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5400 mono_gc_get_write_barrier (void)
5403 MonoMethodBuilder *mb;
5404 MonoMethodSignature *sig;
5405 #ifdef MANAGED_WBARRIER
5406 int i, nursery_check_labels [3];
5407 int label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
5408 int buffer_var, buffer_index_var, dummy_var;
5410 #ifdef HAVE_KW_THREAD
5411 int stack_end_offset = -1, store_remset_buffer_offset = -1;
5412 int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
5414 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5415 g_assert (stack_end_offset != -1);
5416 MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
5417 g_assert (store_remset_buffer_offset != -1);
5418 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
5419 g_assert (store_remset_buffer_index_offset != -1);
5420 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5421 g_assert (store_remset_buffer_index_addr_offset != -1);
5425 // FIXME: Maybe create a separate version for ctors (the branch would be
5426 // correctly predicted more times)
5427 if (write_barrier_method)
5428 return write_barrier_method;
5430 /* Create the IL version of mono_gc_barrier_generic_store () */
5431 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5432 sig->ret = &mono_defaults.void_class->byval_arg;
5433 sig->params [0] = &mono_defaults.int_class->byval_arg;
5435 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5437 #ifdef MANAGED_WBARRIER
5438 if (use_cardtable) {
5439 emit_nursery_check (mb, nursery_check_labels);
5441 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5445 LDC_PTR sgen_cardtable
5447 address >> CARD_BITS
5451 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5452 LDC_PTR card_table_mask
5459 mono_mb_emit_ptr (mb, sgen_cardtable);
5460 mono_mb_emit_ldarg (mb, 0);
5461 mono_mb_emit_icon (mb, CARD_BITS);
5462 mono_mb_emit_byte (mb, CEE_SHR_UN);
5463 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5464 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5465 mono_mb_emit_byte (mb, CEE_AND);
5467 mono_mb_emit_byte (mb, CEE_ADD);
5468 mono_mb_emit_icon (mb, 1);
5469 mono_mb_emit_byte (mb, CEE_STIND_I1);
5472 for (i = 0; i < 3; ++i) {
5473 if (nursery_check_labels [i])
5474 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5476 mono_mb_emit_byte (mb, CEE_RET);
5477 } else if (mono_runtime_has_tls_get ()) {
5478 emit_nursery_check (mb, nursery_check_labels);
5480 // if (ptr >= stack_end) goto need_wb;
5481 mono_mb_emit_ldarg (mb, 0);
5482 EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
5483 label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
5485 // if (ptr >= stack_start) return;
5486 dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5487 mono_mb_emit_ldarg (mb, 0);
5488 mono_mb_emit_ldloc_addr (mb, dummy_var);
5489 label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
5492 mono_mb_patch_branch (mb, label_need_wb);
5494 // buffer = STORE_REMSET_BUFFER;
5495 buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5496 EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
5497 mono_mb_emit_stloc (mb, buffer_var);
5499 // buffer_index = STORE_REMSET_BUFFER_INDEX;
5500 buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5501 EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
5502 mono_mb_emit_stloc (mb, buffer_index_var);
5504 // if (buffer [buffer_index] == ptr) return;
5505 mono_mb_emit_ldloc (mb, buffer_var);
5506 mono_mb_emit_ldloc (mb, buffer_index_var);
5507 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5508 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5509 mono_mb_emit_byte (mb, CEE_SHL);
5510 mono_mb_emit_byte (mb, CEE_ADD);
5511 mono_mb_emit_byte (mb, CEE_LDIND_I);
5512 mono_mb_emit_ldarg (mb, 0);
5513 label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
5516 mono_mb_emit_ldloc (mb, buffer_index_var);
5517 mono_mb_emit_icon (mb, 1);
5518 mono_mb_emit_byte (mb, CEE_ADD);
5519 mono_mb_emit_stloc (mb, buffer_index_var);
5521 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
5522 mono_mb_emit_ldloc (mb, buffer_index_var);
5523 mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
5524 label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
5526 // buffer [buffer_index] = ptr;
5527 mono_mb_emit_ldloc (mb, buffer_var);
5528 mono_mb_emit_ldloc (mb, buffer_index_var);
5529 g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
5530 mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
5531 mono_mb_emit_byte (mb, CEE_SHL);
5532 mono_mb_emit_byte (mb, CEE_ADD);
5533 mono_mb_emit_ldarg (mb, 0);
5534 mono_mb_emit_byte (mb, CEE_STIND_I);
5536 // STORE_REMSET_BUFFER_INDEX = buffer_index;
5537 EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
5538 mono_mb_emit_ldloc (mb, buffer_index_var);
5539 mono_mb_emit_byte (mb, CEE_STIND_I);
5542 for (i = 0; i < 3; ++i) {
5543 if (nursery_check_labels [i])
5544 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5546 mono_mb_patch_branch (mb, label_no_wb_3);
5547 mono_mb_patch_branch (mb, label_no_wb_4);
5548 mono_mb_emit_byte (mb, CEE_RET);
5551 mono_mb_patch_branch (mb, label_slow_path);
5553 mono_mb_emit_ldarg (mb, 0);
5554 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5555 mono_mb_emit_byte (mb, CEE_RET);
5559 mono_mb_emit_ldarg (mb, 0);
5560 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5561 mono_mb_emit_byte (mb, CEE_RET);
5564 res = mono_mb_create_method (mb, sig, 16);
5567 mono_loader_lock ();
5568 if (write_barrier_method) {
5569 /* Already created */
5570 mono_free_method (res);
5572 /* double-checked locking */
5573 mono_memory_barrier ();
5574 write_barrier_method = res;
5576 mono_loader_unlock ();
5578 return write_barrier_method;
5582 mono_gc_get_description (void)
5584 return g_strdup ("sgen");
5588 mono_gc_set_desktop_mode (void)
5593 mono_gc_is_moving (void)
5599 mono_gc_is_disabled (void)
5605 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5612 sgen_get_nursery_clear_policy (void)
5614 return nursery_clear_policy;
5618 sgen_get_array_fill_vtable (void)
5620 if (!array_fill_vtable) {
5621 static MonoClass klass;
5622 static MonoVTable vtable;
5625 MonoDomain *domain = mono_get_root_domain ();
5628 klass.element_class = mono_defaults.byte_class;
5630 klass.instance_size = sizeof (MonoArray);
5631 klass.sizes.element_size = 1;
5632 klass.name = "array_filler_type";
5634 vtable.klass = &klass;
5636 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5639 array_fill_vtable = &vtable;
5641 return array_fill_vtable;
5651 sgen_gc_unlock (void)
5657 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5659 major_collector.iterate_live_block_ranges (callback);
5663 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5665 major_collector.scan_card_table (FALSE, queue);
5669 sgen_get_major_collector (void)
5671 return &major_collector;
5674 void mono_gc_set_skip_thread (gboolean skip)
5676 SgenThreadInfo *info = mono_thread_info_current ();
5679 info->gc_disabled = skip;
5684 sgen_get_remset (void)
5690 mono_gc_get_vtable_bits (MonoClass *class)
5692 if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5693 return SGEN_GC_BIT_BRIDGE_OBJECT;
5698 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5705 sgen_check_whole_heap_stw (void)
5707 sgen_stop_world (0);
5708 sgen_clear_nursery_fragments ();
5709 sgen_check_whole_heap (FALSE);
5710 sgen_restart_world (0, NULL);
5714 sgen_gc_event_moves (void)
5716 if (moved_objects_idx) {
5717 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5718 moved_objects_idx = 0;
5722 #endif /* HAVE_SGEN_GC */