2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11 * Thread start/stop adapted from Boehm's GC:
12 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
13 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
14 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
15 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
16 * Copyright 2001-2003 Ximian, Inc
17 * Copyright 2003-2010 Novell, Inc.
18 * Copyright 2011 Xamarin, Inc.
19 * Copyright (C) 2012 Xamarin Inc
21 * This library is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU Library General Public
23 * License 2.0 as published by the Free Software Foundation;
25 * This library is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Library General Public License for more details.
30 * You should have received a copy of the GNU Library General Public
31 * License 2.0 along with this library; if not, write to the Free
32 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 * Important: allocation provides always zeroed memory, having to do
35 * a memset after allocation is deadly for performance.
36 * Memory usage at startup is currently as follows:
38 * 64 KB internal space
40 * We should provide a small memory config with half the sizes
42 * We currently try to make as few mono assumptions as possible:
43 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
45 * 2) gc descriptor is the second word in the vtable (first word in the class)
46 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
47 * 4) there is a function to get an object's size and the number of
48 * elements in an array.
49 * 5) we know the special way bounds are allocated for complex arrays
50 * 6) we know about proxies and how to treat them when domains are unloaded
52 * Always try to keep stack usage to a minimum: no recursive behaviour
53 * and no large stack allocs.
55 * General description.
56 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
57 * When the nursery is full we start a nursery collection: this is performed with a
59 * When the old generation is full we start a copying GC of the old generation as well:
60 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
61 * in the future. Maybe we'll even do both during the same collection like IMMIX.
63 * The things that complicate this description are:
64 * *) pinned objects: we can't move them so we need to keep track of them
65 * *) no precise info of the thread stacks and registers: we need to be able to
66 * quickly find the objects that may be referenced conservatively and pin them
67 * (this makes the first issues more important)
68 * *) large objects are too expensive to be dealt with using copying GC: we handle them
69 * with mark/sweep during major collections
70 * *) some objects need to not move even if they are small (interned strings, Type handles):
71 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
72 * PinnedChunks regions
78 *) we could have a function pointer in MonoClass to implement
79 customized write barriers for value types
81 *) investigate the stuff needed to advance a thread to a GC-safe
82 point (single-stepping, read from unmapped memory etc) and implement it.
83 This would enable us to inline allocations and write barriers, for example,
84 or at least parts of them, like the write barrier checks.
85 We may need this also for handling precise info on stacks, even simple things
86 as having uninitialized data on the stack and having to wait for the prolog
87 to zero it. Not an issue for the last frame that we scan conservatively.
88 We could always not trust the value in the slots anyway.
90 *) modify the jit to save info about references in stack locations:
91 this can be done just for locals as a start, so that at least
92 part of the stack is handled precisely.
94 *) test/fix endianess issues
96 *) Implement a card table as the write barrier instead of remembered
97 sets? Card tables are not easy to implement with our current
98 memory layout. We have several different kinds of major heap
99 objects: Small objects in regular blocks, small objects in pinned
100 chunks and LOS objects. If we just have a pointer we have no way
101 to tell which kind of object it points into, therefore we cannot
102 know where its card table is. The least we have to do to make
103 this happen is to get rid of write barriers for indirect stores.
106 *) Get rid of write barriers for indirect stores. We can do this by
107 telling the GC to wbarrier-register an object once we do an ldloca
108 or ldelema on it, and to unregister it once it's not used anymore
109 (it can only travel downwards on the stack). The problem with
110 unregistering is that it needs to happen eventually no matter
111 what, even if exceptions are thrown, the thread aborts, etc.
112 Rodrigo suggested that we could do only the registering part and
113 let the collector find out (pessimistically) when it's safe to
114 unregister, namely when the stack pointer of the thread that
115 registered the object is higher than it was when the registering
116 happened. This might make for a good first implementation to get
117 some data on performance.
119 *) Some sort of blacklist support? Blacklists is a concept from the
120 Boehm GC: if during a conservative scan we find pointers to an
121 area which we might use as heap, we mark that area as unusable, so
122 pointer retention by random pinning pointers is reduced.
124 *) experiment with max small object size (very small right now - 2kb,
125 because it's tied to the max freelist size)
127 *) add an option to mmap the whole heap in one chunk: it makes for many
128 simplifications in the checks (put the nursery at the top and just use a single
129 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
130 not flexible (too much of the address space may be used by default or we can't
131 increase the heap as needed) and we'd need a race-free mechanism to return memory
132 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
133 was written to, munmap is needed, but the following mmap may not find the same segment
136 *) memzero the major fragments after restarting the world and optionally a smaller
139 *) investigate having fragment zeroing threads
141 *) separate locks for finalization and other minor stuff to reduce
144 *) try a different copying order to improve memory locality
146 *) a thread abort after a store but before the write barrier will
147 prevent the write barrier from executing
149 *) specialized dynamically generated markers/copiers
151 *) Dynamically adjust TLAB size to the number of threads. If we have
152 too many threads that do allocation, we might need smaller TLABs,
153 and we might get better performance with larger TLABs if we only
154 have a handful of threads. We could sum up the space left in all
155 assigned TLABs and if that's more than some percentage of the
156 nursery size, reduce the TLAB size.
158 *) Explore placing unreachable objects on unused nursery memory.
159 Instead of memset'ng a region to zero, place an int[] covering it.
160 A good place to start is add_nursery_frag. The tricky thing here is
161 placing those objects atomically outside of a collection.
163 *) Allocation should use asymmetric Dekker synchronization:
164 http://blogs.oracle.com/dave/resource/Asymmetric-Dekker-Synchronization.txt
165 This should help weak consistency archs.
172 #define _XOPEN_SOURCE
173 #define _DARWIN_C_SOURCE
179 #ifdef HAVE_PTHREAD_H
182 #ifdef HAVE_PTHREAD_NP_H
183 #include <pthread_np.h>
185 #ifdef HAVE_SEMAPHORE_H
186 #include <semaphore.h>
194 #include "metadata/sgen-gc.h"
195 #include "metadata/metadata-internals.h"
196 #include "metadata/class-internals.h"
197 #include "metadata/gc-internal.h"
198 #include "metadata/object-internals.h"
199 #include "metadata/threads.h"
200 #include "metadata/sgen-cardtable.h"
201 #include "metadata/sgen-protocol.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/sgen-bridge.h"
204 #include "metadata/sgen-memory-governor.h"
205 #include "metadata/sgen-hash-table.h"
206 #include "metadata/mono-gc.h"
207 #include "metadata/method-builder.h"
208 #include "metadata/profiler-private.h"
209 #include "metadata/monitor.h"
210 #include "metadata/threadpool-internals.h"
211 #include "metadata/mempool-internals.h"
212 #include "metadata/marshal.h"
213 #include "metadata/runtime.h"
214 #include "metadata/sgen-cardtable.h"
215 #include "metadata/sgen-pinning.h"
216 #include "metadata/sgen-workers.h"
217 #include "metadata/sgen-layout-stats.h"
218 #include "utils/mono-mmap.h"
219 #include "utils/mono-time.h"
220 #include "utils/mono-semaphore.h"
221 #include "utils/mono-counters.h"
222 #include "utils/mono-proclib.h"
223 #include "utils/mono-memory-model.h"
224 #include "utils/mono-logger-internal.h"
225 #include "utils/dtrace.h"
227 #include <mono/utils/mono-logger-internal.h>
228 #include <mono/utils/memcheck.h>
230 #if defined(__MACH__)
231 #include "utils/mach-support.h"
234 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
238 #include "mono/cil/opcode.def"
244 #undef pthread_create
246 #undef pthread_detach
249 * ######################################################################
250 * ######## Types and constants used by the GC.
251 * ######################################################################
254 /* 0 means not initialized, 1 is initialized, -1 means in progress */
255 static int gc_initialized = 0;
256 /* If set, check if we need to do something every X allocations */
257 gboolean has_per_allocation_action;
258 /* If set, do a heap check every X allocation */
259 guint32 verify_before_allocs = 0;
260 /* If set, do a minor collection before every X allocation */
261 guint32 collect_before_allocs = 0;
262 /* If set, do a whole heap check before each collection */
263 static gboolean whole_heap_check_before_collection = FALSE;
264 /* If set, do a heap consistency check before each minor collection */
265 static gboolean consistency_check_at_minor_collection = FALSE;
266 /* If set, do a mod union consistency check before each finishing collection pause */
267 static gboolean mod_union_consistency_check = FALSE;
268 /* If set, check whether mark bits are consistent after major collections */
269 static gboolean check_mark_bits_after_major_collection = FALSE;
270 /* If set, check that all nursery objects are pinned/not pinned, depending on context */
271 static gboolean check_nursery_objects_pinned = FALSE;
272 /* If set, do a few checks when the concurrent collector is used */
273 static gboolean do_concurrent_checks = FALSE;
274 /* If set, check that there are no references to the domain left at domain unload */
275 static gboolean xdomain_checks = FALSE;
276 /* If not null, dump the heap after each collection into this file */
277 static FILE *heap_dump_file = NULL;
278 /* If set, mark stacks conservatively, even if precise marking is possible */
279 static gboolean conservative_stack_mark = FALSE;
280 /* If set, do a plausibility check on the scan_starts before and after
282 static gboolean do_scan_starts_check = FALSE;
284 * If the major collector is concurrent and this is FALSE, we will
285 * never initiate a synchronous major collection, unless requested via
288 static gboolean allow_synchronous_major = TRUE;
289 static gboolean nursery_collection_is_parallel = FALSE;
290 static gboolean disable_minor_collections = FALSE;
291 static gboolean disable_major_collections = FALSE;
292 gboolean do_pin_stats = FALSE;
293 static gboolean do_verify_nursery = FALSE;
294 static gboolean do_dump_nursery_content = FALSE;
296 #ifdef HEAVY_STATISTICS
297 long long stat_objects_alloced_degraded = 0;
298 long long stat_bytes_alloced_degraded = 0;
300 long long stat_copy_object_called_nursery = 0;
301 long long stat_objects_copied_nursery = 0;
302 long long stat_copy_object_called_major = 0;
303 long long stat_objects_copied_major = 0;
305 long long stat_scan_object_called_nursery = 0;
306 long long stat_scan_object_called_major = 0;
308 long long stat_slots_allocated_in_vain;
310 long long stat_nursery_copy_object_failed_from_space = 0;
311 long long stat_nursery_copy_object_failed_forwarded = 0;
312 long long stat_nursery_copy_object_failed_pinned = 0;
313 long long stat_nursery_copy_object_failed_to_space = 0;
315 static int stat_wbarrier_add_to_global_remset = 0;
316 static int stat_wbarrier_set_field = 0;
317 static int stat_wbarrier_set_arrayref = 0;
318 static int stat_wbarrier_arrayref_copy = 0;
319 static int stat_wbarrier_generic_store = 0;
320 static int stat_wbarrier_set_root = 0;
321 static int stat_wbarrier_value_copy = 0;
322 static int stat_wbarrier_object_copy = 0;
325 int stat_minor_gcs = 0;
326 int stat_major_gcs = 0;
328 static long long stat_pinned_objects = 0;
330 static long long time_minor_pre_collection_fragment_clear = 0;
331 static long long time_minor_pinning = 0;
332 static long long time_minor_scan_remsets = 0;
333 static long long time_minor_scan_pinned = 0;
334 static long long time_minor_scan_registered_roots = 0;
335 static long long time_minor_scan_thread_data = 0;
336 static long long time_minor_finish_gray_stack = 0;
337 static long long time_minor_fragment_creation = 0;
339 static long long time_major_pre_collection_fragment_clear = 0;
340 static long long time_major_pinning = 0;
341 static long long time_major_scan_pinned = 0;
342 static long long time_major_scan_registered_roots = 0;
343 static long long time_major_scan_thread_data = 0;
344 static long long time_major_scan_alloc_pinned = 0;
345 static long long time_major_scan_finalized = 0;
346 static long long time_major_scan_big_objects = 0;
347 static long long time_major_finish_gray_stack = 0;
348 static long long time_major_free_bigobjs = 0;
349 static long long time_major_los_sweep = 0;
350 static long long time_major_sweep = 0;
351 static long long time_major_fragment_creation = 0;
353 int gc_debug_level = 0;
358 mono_gc_flush_info (void)
360 fflush (gc_debug_file);
364 #define TV_DECLARE SGEN_TV_DECLARE
365 #define TV_GETTIME SGEN_TV_GETTIME
366 #define TV_ELAPSED SGEN_TV_ELAPSED
367 #define TV_ELAPSED_MS SGEN_TV_ELAPSED_MS
369 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
371 NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
373 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
374 #define object_is_pinned SGEN_OBJECT_IS_PINNED
375 #define pin_object SGEN_PIN_OBJECT
376 #define unpin_object SGEN_UNPIN_OBJECT
378 #define ptr_in_nursery sgen_ptr_in_nursery
380 #define LOAD_VTABLE SGEN_LOAD_VTABLE
383 safe_name (void* obj)
385 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
386 return vt->klass->name;
389 #define safe_object_get_size sgen_safe_object_get_size
392 sgen_safe_name (void* obj)
394 return safe_name (obj);
398 * ######################################################################
399 * ######## Global data.
400 * ######################################################################
402 LOCK_DECLARE (gc_mutex);
404 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
406 static mword pagesize = 4096;
407 int degraded_mode = 0;
409 static mword bytes_pinned_from_failed_allocation = 0;
411 GCMemSection *nursery_section = NULL;
412 static mword lowest_heap_address = ~(mword)0;
413 static mword highest_heap_address = 0;
415 LOCK_DECLARE (sgen_interruption_mutex);
416 static LOCK_DECLARE (pin_queue_mutex);
418 #define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
419 #define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
421 typedef struct _FinalizeReadyEntry FinalizeReadyEntry;
422 struct _FinalizeReadyEntry {
423 FinalizeReadyEntry *next;
427 typedef struct _EphemeronLinkNode EphemeronLinkNode;
429 struct _EphemeronLinkNode {
430 EphemeronLinkNode *next;
439 int current_collection_generation = -1;
440 volatile gboolean concurrent_collection_in_progress = FALSE;
442 /* objects that are ready to be finalized */
443 static FinalizeReadyEntry *fin_ready_list = NULL;
444 static FinalizeReadyEntry *critical_fin_list = NULL;
446 static EphemeronLinkNode *ephemeron_list;
448 /* registered roots: the key to the hash is the root start address */
450 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
452 SgenHashTable roots_hash [ROOT_TYPE_NUM] = {
453 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
454 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL),
455 SGEN_HASH_TABLE_INIT (INTERNAL_MEM_ROOTS_TABLE, INTERNAL_MEM_ROOT_RECORD, sizeof (RootRecord), mono_aligned_addr_hash, NULL)
457 static mword roots_size = 0; /* amount of memory in the root set */
459 #define GC_ROOT_NUM 32
461 int count; /* must be the first field */
462 void *objects [GC_ROOT_NUM];
463 int root_types [GC_ROOT_NUM];
464 uintptr_t extra_info [GC_ROOT_NUM];
468 notify_gc_roots (GCRootReport *report)
472 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
477 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
479 if (report->count == GC_ROOT_NUM)
480 notify_gc_roots (report);
481 report->objects [report->count] = object;
482 report->root_types [report->count] = rtype;
483 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
486 MonoNativeTlsKey thread_info_key;
488 #ifdef HAVE_KW_THREAD
489 __thread SgenThreadInfo *sgen_thread_info;
490 __thread char *stack_end;
493 /* The size of a TLAB */
494 /* The bigger the value, the less often we have to go to the slow path to allocate a new
495 * one, but the more space is wasted by threads not allocating much memory.
497 * FIXME: Make this self-tuning for each thread.
499 guint32 tlab_size = (1024 * 4);
501 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
503 /* Functions supplied by the runtime to be called by the GC */
504 static MonoGCCallbacks gc_callbacks;
506 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
507 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
509 #define ALIGN_UP SGEN_ALIGN_UP
511 #define MOVED_OBJECTS_NUM 64
512 static void *moved_objects [MOVED_OBJECTS_NUM];
513 static int moved_objects_idx = 0;
515 /* Vtable of the objects used to fill out nursery fragments before a collection */
516 static MonoVTable *array_fill_vtable;
518 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
519 MonoNativeThreadId main_gc_thread = NULL;
522 /*Object was pinned during the current collection*/
523 static mword objects_pinned;
526 * ######################################################################
527 * ######## Macros and function declarations.
528 * ######################################################################
532 align_pointer (void *ptr)
534 mword p = (mword)ptr;
535 p += sizeof (gpointer) - 1;
536 p &= ~ (sizeof (gpointer) - 1);
540 typedef SgenGrayQueue GrayQueue;
542 /* forward declarations */
543 static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
544 static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
545 static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
546 static void report_finalizer_roots (void);
547 static void report_registered_roots (void);
549 static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
550 static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
551 static void finish_gray_stack (int generation, GrayQueue *queue);
553 void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
556 static void init_stats (void);
558 static int mark_ephemerons_in_range (ScanCopyContext ctx);
559 static void clear_unreachable_ephemerons (ScanCopyContext ctx);
560 static void null_ephemerons_for_domain (MonoDomain *domain);
562 static gboolean major_update_or_finish_concurrent_collection (gboolean force_finish);
564 SgenObjectOperations current_object_ops;
565 SgenMajorCollector major_collector;
566 SgenMinorCollector sgen_minor_collector;
567 static GrayQueue gray_queue;
569 static SgenRemeberedSet remset;
571 /* The gray queue to use from the main collection thread. */
572 #define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
575 * The gray queue a worker job must use. If we're not parallel or
576 * concurrent, we use the main gray queue.
578 static SgenGrayQueue*
579 sgen_workers_get_job_gray_queue (WorkerData *worker_data)
581 return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
585 gray_queue_redirect (SgenGrayQueue *queue)
587 gboolean wake = FALSE;
591 GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
594 sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
599 g_assert (concurrent_collection_in_progress ||
600 (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
601 if (sgen_workers_have_started ()) {
602 sgen_workers_wake_up_all ();
604 if (concurrent_collection_in_progress)
605 g_assert (current_collection_generation == -1);
611 is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
613 MonoObject *o = (MonoObject*)(obj);
614 MonoObject *ref = (MonoObject*)*(ptr);
615 int offset = (char*)(ptr) - (char*)o;
617 if (o->vtable->klass == mono_defaults.thread_class && offset == G_STRUCT_OFFSET (MonoThread, internal_thread))
619 if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
622 #ifndef DISABLE_REMOTING
623 if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
624 offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
627 /* Thread.cached_culture_info */
628 if (!strcmp (ref->vtable->klass->name_space, "System.Globalization") &&
629 !strcmp (ref->vtable->klass->name, "CultureInfo") &&
630 !strcmp(o->vtable->klass->name_space, "System") &&
631 !strcmp(o->vtable->klass->name, "Object[]"))
634 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
635 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
636 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
637 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
638 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
639 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
640 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
641 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
642 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
644 if (!strcmp (ref->vtable->klass->name_space, "System") &&
645 !strcmp (ref->vtable->klass->name, "Byte[]") &&
646 !strcmp (o->vtable->klass->name_space, "System.IO") &&
647 !strcmp (o->vtable->klass->name, "MemoryStream"))
649 /* append_job() in threadpool.c */
650 if (!strcmp (ref->vtable->klass->name_space, "System.Runtime.Remoting.Messaging") &&
651 !strcmp (ref->vtable->klass->name, "AsyncResult") &&
652 !strcmp (o->vtable->klass->name_space, "System") &&
653 !strcmp (o->vtable->klass->name, "Object[]") &&
654 mono_thread_pool_is_queue_array ((MonoArray*) o))
660 check_reference_for_xdomain (gpointer *ptr, char *obj, MonoDomain *domain)
662 MonoObject *o = (MonoObject*)(obj);
663 MonoObject *ref = (MonoObject*)*(ptr);
664 int offset = (char*)(ptr) - (char*)o;
666 MonoClassField *field;
669 if (!ref || ref->vtable->domain == domain)
671 if (is_xdomain_ref_allowed (ptr, obj, domain))
675 for (class = o->vtable->klass; class; class = class->parent) {
678 for (i = 0; i < class->field.count; ++i) {
679 if (class->fields[i].offset == offset) {
680 field = &class->fields[i];
688 if (ref->vtable->klass == mono_defaults.string_class)
689 str = mono_string_to_utf8 ((MonoString*)ref);
692 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
693 o, o->vtable->klass->name_space, o->vtable->klass->name,
694 offset, field ? field->name : "",
695 ref, ref->vtable->klass->name_space, ref->vtable->klass->name, str ? str : "");
696 mono_gc_scan_for_specific_ref (o, TRUE);
702 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
705 scan_object_for_xdomain_refs (char *start, mword size, void *data)
707 MonoDomain *domain = ((MonoObject*)start)->vtable->domain;
709 #include "sgen-scan-object.h"
712 static gboolean scan_object_for_specific_ref_precise = TRUE;
715 #define HANDLE_PTR(ptr,obj) do { \
716 if ((MonoObject*)*(ptr) == key) { \
717 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
718 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
723 scan_object_for_specific_ref (char *start, MonoObject *key)
727 if ((forwarded = SGEN_OBJECT_IS_FORWARDED (start)))
730 if (scan_object_for_specific_ref_precise) {
731 #include "sgen-scan-object.h"
733 mword *words = (mword*)start;
734 size_t size = safe_object_get_size ((MonoObject*)start);
736 for (i = 0; i < size / sizeof (mword); ++i) {
737 if (words [i] == (mword)key) {
738 g_print ("found possible ref to %p in object %p (%s) at offset %td\n",
739 key, start, safe_name (start), i * sizeof (mword));
746 sgen_scan_area_with_callback (char *start, char *end, IterateObjectCallbackFunc callback, void *data, gboolean allow_flags)
748 while (start < end) {
752 if (!*(void**)start) {
753 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
758 if (!(obj = SGEN_OBJECT_IS_FORWARDED (start)))
764 size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
766 if ((MonoVTable*)SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
767 callback (obj, size, data);
774 scan_object_for_specific_ref_callback (char *obj, size_t size, MonoObject *key)
776 scan_object_for_specific_ref (obj, key);
780 check_root_obj_specific_ref (RootRecord *root, MonoObject *key, MonoObject *obj)
784 g_print ("found ref to %p in root record %p\n", key, root);
787 static MonoObject *check_key = NULL;
788 static RootRecord *check_root = NULL;
791 check_root_obj_specific_ref_from_marker (void **obj)
793 check_root_obj_specific_ref (check_root, check_key, *obj);
797 scan_roots_for_specific_ref (MonoObject *key, int root_type)
803 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
804 mword desc = root->root_desc;
808 switch (desc & ROOT_DESC_TYPE_MASK) {
809 case ROOT_DESC_BITMAP:
810 desc >>= ROOT_DESC_TYPE_SHIFT;
813 check_root_obj_specific_ref (root, key, *start_root);
818 case ROOT_DESC_COMPLEX: {
819 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
820 int bwords = (*bitmap_data) - 1;
821 void **start_run = start_root;
823 while (bwords-- > 0) {
824 gsize bmap = *bitmap_data++;
825 void **objptr = start_run;
828 check_root_obj_specific_ref (root, key, *objptr);
832 start_run += GC_BITS_PER_WORD;
836 case ROOT_DESC_USER: {
837 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
838 marker (start_root, check_root_obj_specific_ref_from_marker);
841 case ROOT_DESC_RUN_LEN:
842 g_assert_not_reached ();
844 g_assert_not_reached ();
846 } SGEN_HASH_TABLE_FOREACH_END;
853 mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise)
858 scan_object_for_specific_ref_precise = precise;
860 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
861 (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key, TRUE);
863 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
865 sgen_los_iterate_objects ((IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
867 scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
868 scan_roots_for_specific_ref (key, ROOT_TYPE_WBARRIER);
870 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], ptr, root) {
871 while (ptr < (void**)root->end_root) {
872 check_root_obj_specific_ref (root, *ptr, key);
875 } SGEN_HASH_TABLE_FOREACH_END;
879 need_remove_object_for_domain (char *start, MonoDomain *domain)
881 if (mono_object_domain (start) == domain) {
882 SGEN_LOG (4, "Need to cleanup object %p", start);
883 binary_protocol_cleanup (start, (gpointer)LOAD_VTABLE (start), safe_object_get_size ((MonoObject*)start));
890 process_object_for_domain_clearing (char *start, MonoDomain *domain)
892 GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
893 if (vt->klass == mono_defaults.internal_thread_class)
894 g_assert (mono_object_domain (start) == mono_get_root_domain ());
895 /* The object could be a proxy for an object in the domain
897 #ifndef DISABLE_REMOTING
898 if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
899 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
901 /* The server could already have been zeroed out, so
902 we need to check for that, too. */
903 if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
904 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
905 ((MonoRealProxy*)start)->unwrapped_server = NULL;
911 static MonoDomain *check_domain = NULL;
914 check_obj_not_in_domain (void **o)
916 g_assert (((MonoObject*)(*o))->vtable->domain != check_domain);
920 scan_for_registered_roots_in_domain (MonoDomain *domain, int root_type)
924 check_domain = domain;
925 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
926 mword desc = root->root_desc;
928 /* The MonoDomain struct is allowed to hold
929 references to objects in its own domain. */
930 if (start_root == (void**)domain)
933 switch (desc & ROOT_DESC_TYPE_MASK) {
934 case ROOT_DESC_BITMAP:
935 desc >>= ROOT_DESC_TYPE_SHIFT;
937 if ((desc & 1) && *start_root)
938 check_obj_not_in_domain (*start_root);
943 case ROOT_DESC_COMPLEX: {
944 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
945 int bwords = (*bitmap_data) - 1;
946 void **start_run = start_root;
948 while (bwords-- > 0) {
949 gsize bmap = *bitmap_data++;
950 void **objptr = start_run;
952 if ((bmap & 1) && *objptr)
953 check_obj_not_in_domain (*objptr);
957 start_run += GC_BITS_PER_WORD;
961 case ROOT_DESC_USER: {
962 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
963 marker (start_root, check_obj_not_in_domain);
966 case ROOT_DESC_RUN_LEN:
967 g_assert_not_reached ();
969 g_assert_not_reached ();
971 } SGEN_HASH_TABLE_FOREACH_END;
977 check_for_xdomain_refs (void)
981 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
982 (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL, FALSE);
984 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
986 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
987 scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
991 clear_domain_process_object (char *obj, MonoDomain *domain)
995 process_object_for_domain_clearing (obj, domain);
996 remove = need_remove_object_for_domain (obj, domain);
998 if (remove && ((MonoObject*)obj)->synchronisation) {
999 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
1001 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
1008 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
1010 if (clear_domain_process_object (obj, domain))
1011 memset (obj, 0, size);
1015 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
1017 clear_domain_process_object (obj, domain);
1021 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1023 if (need_remove_object_for_domain (obj, domain))
1024 major_collector.free_non_pinned_object (obj, size);
1028 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
1030 if (need_remove_object_for_domain (obj, domain))
1031 major_collector.free_pinned_object (obj, size);
1035 * When appdomains are unloaded we can easily remove objects that have finalizers,
1036 * but all the others could still be present in random places on the heap.
1037 * We need a sweep to get rid of them even though it's going to be costly
1039 * The reason we need to remove them is because we access the vtable and class
1040 * structures to know the object size and the reference bitmap: once the domain is
1041 * unloaded the point to random memory.
1044 mono_gc_clear_domain (MonoDomain * domain)
1046 LOSObject *bigobj, *prev;
1051 if (concurrent_collection_in_progress)
1052 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
1053 g_assert (!concurrent_collection_in_progress);
1055 sgen_process_fin_stage_entries ();
1056 sgen_process_dislink_stage_entries ();
1058 sgen_clear_nursery_fragments ();
1060 if (xdomain_checks && domain != mono_get_root_domain ()) {
1061 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
1062 scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
1063 check_for_xdomain_refs ();
1066 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1067 to memory returned to the OS.*/
1068 null_ephemerons_for_domain (domain);
1070 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1071 sgen_null_links_for_domain (domain, i);
1073 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
1074 sgen_remove_finalizers_for_domain (domain, i);
1076 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
1077 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
1079 /* We need two passes over major and large objects because
1080 freeing such objects might give their memory back to the OS
1081 (in the case of large objects) or obliterate its vtable
1082 (pinned objects with major-copying or pinned and non-pinned
1083 objects with major-mark&sweep), but we might need to
1084 dereference a pointer from an object to another object if
1085 the first object is a proxy. */
1086 major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
1087 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
1088 clear_domain_process_object (bigobj->data, domain);
1091 for (bigobj = los_object_list; bigobj;) {
1092 if (need_remove_object_for_domain (bigobj->data, domain)) {
1093 LOSObject *to_free = bigobj;
1095 prev->next = bigobj->next;
1097 los_object_list = bigobj->next;
1098 bigobj = bigobj->next;
1099 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
1100 sgen_los_free_object (to_free);
1104 bigobj = bigobj->next;
1106 major_collector.iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
1107 major_collector.iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
1109 if (domain == mono_get_root_domain ()) {
1110 if (G_UNLIKELY (do_pin_stats))
1111 sgen_pin_stats_print_class_stats ();
1112 sgen_object_layout_dump (stdout);
1119 * sgen_add_to_global_remset:
1121 * The global remset contains locations which point into newspace after
1122 * a minor collection. This can happen if the objects they point to are pinned.
1124 * LOCKING: If called from a parallel collector, the global remset
1125 * lock must be held. For serial collectors that is not necessary.
1128 sgen_add_to_global_remset (gpointer ptr, gpointer obj)
1130 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Target pointer of global remset must be in the nursery");
1132 HEAVY_STAT (++stat_wbarrier_add_to_global_remset);
1134 if (!major_collector.is_concurrent) {
1135 SGEN_ASSERT (5, current_collection_generation != -1, "Global remsets can only be added during collections");
1137 if (current_collection_generation == -1)
1138 SGEN_ASSERT (5, sgen_concurrent_collection_in_progress (), "Global remsets outside of collection pauses can only be added by the concurrent collector");
1141 if (!object_is_pinned (obj))
1142 SGEN_ASSERT (5, sgen_minor_collector.is_split || sgen_concurrent_collection_in_progress (), "Non-pinned objects can only remain in nursery if it is a split nursery");
1143 else if (sgen_cement_lookup_or_register (obj))
1146 remset.record_pointer (ptr);
1148 if (G_UNLIKELY (do_pin_stats))
1149 sgen_pin_stats_register_global_remset (obj);
1151 SGEN_LOG (8, "Adding global remset for %p", ptr);
1152 binary_protocol_global_remset (ptr, obj, (gpointer)SGEN_LOAD_VTABLE (obj));
1155 #ifdef ENABLE_DTRACE
1156 if (G_UNLIKELY (MONO_GC_GLOBAL_REMSET_ADD_ENABLED ())) {
1157 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
1158 MONO_GC_GLOBAL_REMSET_ADD ((mword)ptr, (mword)obj, sgen_safe_object_get_size (obj),
1159 vt->klass->name_space, vt->klass->name);
1165 * sgen_drain_gray_stack:
1167 * Scan objects in the gray stack until the stack is empty. This should be called
1168 * frequently after each object is copied, to achieve better locality and cache
1172 sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
1175 ScanObjectFunc scan_func = ctx.scan_func;
1176 GrayQueue *queue = ctx.queue;
1178 if (max_objs == -1) {
1180 GRAY_OBJECT_DEQUEUE (queue, obj);
1183 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1184 scan_func (obj, queue);
1190 for (i = 0; i != max_objs; ++i) {
1191 GRAY_OBJECT_DEQUEUE (queue, obj);
1194 SGEN_LOG (9, "Precise gray object scan %p (%s)", obj, safe_name (obj));
1195 scan_func (obj, queue);
1197 } while (max_objs < 0);
1203 * Addresses from start to end are already sorted. This function finds
1204 * the object header for each address and pins the object. The
1205 * addresses must be inside the passed section. The (start of the)
1206 * address array is overwritten with the addresses of the actually
1207 * pinned objects. Return the number of pinned objects.
1210 pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
1215 void *last_obj = NULL;
1216 size_t last_obj_size = 0;
1219 void **definitely_pinned = start;
1220 ScanObjectFunc scan_func = ctx.scan_func;
1221 SgenGrayQueue *queue = ctx.queue;
1223 sgen_nursery_allocator_prepare_for_pinning ();
1225 while (start < end) {
1227 /* the range check should be reduntant */
1228 if (addr != last && addr >= start_nursery && addr < end_nursery) {
1229 SGEN_LOG (5, "Considering pinning addr %p", addr);
1230 /* multiple pointers to the same object */
1231 if (addr >= last_obj && (char*)addr < (char*)last_obj + last_obj_size) {
1235 idx = ((char*)addr - (char*)section->data) / SCAN_START_SIZE;
1236 g_assert (idx < section->num_scan_start);
1237 search_start = (void*)section->scan_starts [idx];
1238 if (!search_start || search_start > addr) {
1241 search_start = section->scan_starts [idx];
1242 if (search_start && search_start <= addr)
1245 if (!search_start || search_start > addr)
1246 search_start = start_nursery;
1248 if (search_start < last_obj)
1249 search_start = (char*)last_obj + last_obj_size;
1250 /* now addr should be in an object a short distance from search_start
1251 * Note that search_start must point to zeroed mem or point to an object.
1255 if (!*(void**)search_start) {
1256 /* Consistency check */
1258 for (frag = nursery_fragments; frag; frag = frag->next) {
1259 if (search_start >= frag->fragment_start && search_start < frag->fragment_end)
1260 g_assert_not_reached ();
1264 search_start = (void*)ALIGN_UP ((mword)search_start + sizeof (gpointer));
1267 last_obj = search_start;
1268 last_obj_size = ALIGN_UP (safe_object_get_size ((MonoObject*)search_start));
1270 if (((MonoObject*)last_obj)->synchronisation == GINT_TO_POINTER (-1)) {
1271 /* Marks the beginning of a nursery fragment, skip */
1273 SGEN_LOG (8, "Pinned try match %p (%s), size %zd", last_obj, safe_name (last_obj), last_obj_size);
1274 if (addr >= search_start && (char*)addr < (char*)last_obj + last_obj_size) {
1276 scan_func (search_start, queue);
1278 SGEN_LOG (4, "Pinned object %p, vtable %p (%s), count %d\n",
1279 search_start, *(void**)search_start, safe_name (search_start), count);
1280 binary_protocol_pin (search_start,
1281 (gpointer)LOAD_VTABLE (search_start),
1282 safe_object_get_size (search_start));
1284 #ifdef ENABLE_DTRACE
1285 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1286 int gen = sgen_ptr_in_nursery (search_start) ? GENERATION_NURSERY : GENERATION_OLD;
1287 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
1288 MONO_GC_OBJ_PINNED ((mword)search_start,
1289 sgen_safe_object_get_size (search_start),
1290 vt->klass->name_space, vt->klass->name, gen);
1294 pin_object (search_start);
1295 GRAY_OBJECT_ENQUEUE (queue, search_start);
1296 if (G_UNLIKELY (do_pin_stats))
1297 sgen_pin_stats_register_object (search_start, last_obj_size);
1298 definitely_pinned [count] = search_start;
1304 /* skip to the next object */
1305 search_start = (void*)((char*)search_start + last_obj_size);
1306 } while (search_start <= addr);
1307 /* we either pinned the correct object or we ignored the addr because
1308 * it points to unused zeroed memory.
1314 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1315 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1316 GCRootReport report;
1318 for (idx = 0; idx < count; ++idx)
1319 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1320 notify_gc_roots (&report);
1322 stat_pinned_objects += count;
1327 sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
1329 int num_entries = section->pin_queue_num_entries;
1331 void **start = section->pin_queue_start;
1333 reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
1334 section->data, section->next_data, ctx);
1335 section->pin_queue_num_entries = reduced_to;
1337 section->pin_queue_start = NULL;
1343 sgen_pin_object (void *object, GrayQueue *queue)
1345 g_assert (!concurrent_collection_in_progress);
1347 if (sgen_collection_is_parallel ()) {
1349 /*object arrives pinned*/
1350 sgen_pin_stage_ptr (object);
1354 SGEN_PIN_OBJECT (object);
1355 sgen_pin_stage_ptr (object);
1357 if (G_UNLIKELY (do_pin_stats))
1358 sgen_pin_stats_register_object (object, safe_object_get_size (object));
1360 GRAY_OBJECT_ENQUEUE (queue, object);
1361 binary_protocol_pin (object, (gpointer)LOAD_VTABLE (object), safe_object_get_size (object));
1363 #ifdef ENABLE_DTRACE
1364 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
1365 int gen = sgen_ptr_in_nursery (object) ? GENERATION_NURSERY : GENERATION_OLD;
1366 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (object);
1367 MONO_GC_OBJ_PINNED ((mword)object, sgen_safe_object_get_size (object), vt->klass->name_space, vt->klass->name, gen);
1373 sgen_parallel_pin_or_update (void **ptr, void *obj, MonoVTable *vt, SgenGrayQueue *queue)
1377 gboolean major_pinned = FALSE;
1379 if (sgen_ptr_in_nursery (obj)) {
1380 if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
1381 sgen_pin_object (obj, queue);
1385 major_collector.pin_major_object (obj, queue);
1386 major_pinned = TRUE;
1389 vtable_word = *(mword*)obj;
1390 /*someone else forwarded it, update the pointer and bail out*/
1391 if (vtable_word & SGEN_FORWARDED_BIT) {
1392 *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
1396 /*someone pinned it, nothing to do.*/
1397 if (vtable_word & SGEN_PINNED_BIT || major_pinned)
1402 /* Sort the addresses in array in increasing order.
1403 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1406 sgen_sort_addresses (void **array, int size)
1411 for (i = 1; i < size; ++i) {
1414 int parent = (child - 1) / 2;
1416 if (array [parent] >= array [child])
1419 tmp = array [parent];
1420 array [parent] = array [child];
1421 array [child] = tmp;
1427 for (i = size - 1; i > 0; --i) {
1430 array [i] = array [0];
1436 while (root * 2 + 1 <= end) {
1437 int child = root * 2 + 1;
1439 if (child < end && array [child] < array [child + 1])
1441 if (array [root] >= array [child])
1445 array [root] = array [child];
1446 array [child] = tmp;
1454 * Scan the memory between start and end and queue values which could be pointers
1455 * to the area between start_nursery and end_nursery for later consideration.
1456 * Typically used for thread stacks.
1459 conservatively_pin_objects_from (void **start, void **end, void *start_nursery, void *end_nursery, int pin_type)
1463 #ifdef VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE
1464 VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE (start, (char*)end - (char*)start);
1467 while (start < end) {
1468 if (*start >= start_nursery && *start < end_nursery) {
1470 * *start can point to the middle of an object
1471 * note: should we handle pointing at the end of an object?
1472 * pinning in C# code disallows pointing at the end of an object
1473 * but there is some small chance that an optimizing C compiler
1474 * may keep the only reference to an object by pointing
1475 * at the end of it. We ignore this small chance for now.
1476 * Pointers to the end of an object are indistinguishable
1477 * from pointers to the start of the next object in memory
1478 * so if we allow that we'd need to pin two objects...
1479 * We queue the pointer in an array, the
1480 * array will then be sorted and uniqued. This way
1481 * we can coalesce several pinning pointers and it should
1482 * be faster since we'd do a memory scan with increasing
1483 * addresses. Note: we can align the address to the allocation
1484 * alignment, so the unique process is more effective.
1486 mword addr = (mword)*start;
1487 addr &= ~(ALLOC_ALIGN - 1);
1488 if (addr >= (mword)start_nursery && addr < (mword)end_nursery) {
1489 SGEN_LOG (6, "Pinning address %p from %p", (void*)addr, start);
1490 sgen_pin_stage_ptr ((void*)addr);
1493 if (G_UNLIKELY (do_pin_stats)) {
1494 if (ptr_in_nursery ((void*)addr))
1495 sgen_pin_stats_register_address ((char*)addr, pin_type);
1501 SGEN_LOG (7, "found %d potential pinned heap pointers", count);
1505 * The first thing we do in a collection is to identify pinned objects.
1506 * This function considers all the areas of memory that need to be
1507 * conservatively scanned.
1510 pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue)
1514 SGEN_LOG (2, "Scanning pinned roots (%d bytes, %d/%d entries)", (int)roots_size, roots_hash [ROOT_TYPE_NORMAL].num_entries, roots_hash [ROOT_TYPE_PINNED].num_entries);
1515 /* objects pinned from the API are inside these roots */
1516 SGEN_HASH_TABLE_FOREACH (&roots_hash [ROOT_TYPE_PINNED], start_root, root) {
1517 SGEN_LOG (6, "Pinned roots %p-%p", start_root, root->end_root);
1518 conservatively_pin_objects_from (start_root, (void**)root->end_root, start_nursery, end_nursery, PIN_TYPE_OTHER);
1519 } SGEN_HASH_TABLE_FOREACH_END;
1520 /* now deal with the thread stacks
1521 * in the future we should be able to conservatively scan only:
1522 * *) the cpu registers
1523 * *) the unmanaged stack frames
1524 * *) the _last_ managed stack frame
1525 * *) pointers slots in managed frames
1527 scan_thread_data (start_nursery, end_nursery, FALSE, queue);
1531 unpin_objects_from_queue (SgenGrayQueue *queue)
1535 GRAY_OBJECT_DEQUEUE (queue, addr);
1538 g_assert (SGEN_OBJECT_IS_PINNED (addr));
1539 SGEN_UNPIN_OBJECT (addr);
1544 CopyOrMarkObjectFunc func;
1546 } UserCopyOrMarkData;
1548 static MonoNativeTlsKey user_copy_or_mark_key;
1551 init_user_copy_or_mark_key (void)
1553 mono_native_tls_alloc (&user_copy_or_mark_key, NULL);
1557 set_user_copy_or_mark_data (UserCopyOrMarkData *data)
1559 mono_native_tls_set_value (user_copy_or_mark_key, data);
1563 single_arg_user_copy_or_mark (void **obj)
1565 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
1567 data->func (obj, data->queue);
1571 * The memory area from start_root to end_root contains pointers to objects.
1572 * Their position is precisely described by @desc (this means that the pointer
1573 * can be either NULL or the pointer to the start of an object).
1574 * This functions copies them to to_space updates them.
1576 * This function is not thread-safe!
1579 precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
1581 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1582 SgenGrayQueue *queue = ctx.queue;
1584 switch (desc & ROOT_DESC_TYPE_MASK) {
1585 case ROOT_DESC_BITMAP:
1586 desc >>= ROOT_DESC_TYPE_SHIFT;
1588 if ((desc & 1) && *start_root) {
1589 copy_func (start_root, queue);
1590 SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
1591 sgen_drain_gray_stack (-1, ctx);
1597 case ROOT_DESC_COMPLEX: {
1598 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1599 int bwords = (*bitmap_data) - 1;
1600 void **start_run = start_root;
1602 while (bwords-- > 0) {
1603 gsize bmap = *bitmap_data++;
1604 void **objptr = start_run;
1606 if ((bmap & 1) && *objptr) {
1607 copy_func (objptr, queue);
1608 SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
1609 sgen_drain_gray_stack (-1, ctx);
1614 start_run += GC_BITS_PER_WORD;
1618 case ROOT_DESC_USER: {
1619 UserCopyOrMarkData data = { copy_func, queue };
1620 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1621 set_user_copy_or_mark_data (&data);
1622 marker (start_root, single_arg_user_copy_or_mark);
1623 set_user_copy_or_mark_data (NULL);
1626 case ROOT_DESC_RUN_LEN:
1627 g_assert_not_reached ();
1629 g_assert_not_reached ();
1634 reset_heap_boundaries (void)
1636 lowest_heap_address = ~(mword)0;
1637 highest_heap_address = 0;
1641 sgen_update_heap_boundaries (mword low, mword high)
1646 old = lowest_heap_address;
1649 } while (SGEN_CAS_PTR ((gpointer*)&lowest_heap_address, (gpointer)low, (gpointer)old) != (gpointer)old);
1652 old = highest_heap_address;
1655 } while (SGEN_CAS_PTR ((gpointer*)&highest_heap_address, (gpointer)high, (gpointer)old) != (gpointer)old);
1659 * Allocate and setup the data structures needed to be able to allocate objects
1660 * in the nursery. The nursery is stored in nursery_section.
1663 alloc_nursery (void)
1665 GCMemSection *section;
1670 if (nursery_section)
1672 SGEN_LOG (2, "Allocating nursery size: %lu", (unsigned long)sgen_nursery_size);
1673 /* later we will alloc a larger area for the nursery but only activate
1674 * what we need. The rest will be used as expansion if we have too many pinned
1675 * objects in the existing nursery.
1677 /* FIXME: handle OOM */
1678 section = sgen_alloc_internal (INTERNAL_MEM_SECTION);
1680 alloc_size = sgen_nursery_size;
1682 /* If there isn't enough space even for the nursery we should simply abort. */
1683 g_assert (sgen_memgov_try_alloc_space (alloc_size, SPACE_NURSERY));
1685 #ifdef SGEN_ALIGN_NURSERY
1686 data = major_collector.alloc_heap (alloc_size, alloc_size, DEFAULT_NURSERY_BITS);
1688 data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
1690 sgen_update_heap_boundaries ((mword)data, (mword)(data + sgen_nursery_size));
1691 SGEN_LOG (4, "Expanding nursery size (%p-%p): %lu, total: %lu", data, data + alloc_size, (unsigned long)sgen_nursery_size, (unsigned long)mono_gc_get_heap_size ());
1692 section->data = section->next_data = data;
1693 section->size = alloc_size;
1694 section->end_data = data + sgen_nursery_size;
1695 scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
1696 section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
1697 section->num_scan_start = scan_starts;
1699 nursery_section = section;
1701 sgen_nursery_allocator_set_nursery_bounds (data, data + sgen_nursery_size);
1705 mono_gc_get_nursery (int *shift_bits, size_t *size)
1707 *size = sgen_nursery_size;
1708 #ifdef SGEN_ALIGN_NURSERY
1709 *shift_bits = DEFAULT_NURSERY_BITS;
1713 return sgen_get_nursery_start ();
1717 mono_gc_set_current_thread_appdomain (MonoDomain *domain)
1719 SgenThreadInfo *info = mono_thread_info_current ();
1721 /* Could be called from sgen_thread_unregister () with a NULL info */
1724 info->stopped_domain = domain;
1729 mono_gc_precise_stack_mark_enabled (void)
1731 return !conservative_stack_mark;
1735 mono_gc_get_logfile (void)
1737 return gc_debug_file;
1741 report_finalizer_roots_list (FinalizeReadyEntry *list)
1743 GCRootReport report;
1744 FinalizeReadyEntry *fin;
1747 for (fin = list; fin; fin = fin->next) {
1750 add_profile_gc_root (&report, fin->object, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1752 notify_gc_roots (&report);
1756 report_finalizer_roots (void)
1758 report_finalizer_roots_list (fin_ready_list);
1759 report_finalizer_roots_list (critical_fin_list);
1762 static GCRootReport *root_report;
1765 single_arg_report_root (void **obj)
1768 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1772 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1774 switch (desc & ROOT_DESC_TYPE_MASK) {
1775 case ROOT_DESC_BITMAP:
1776 desc >>= ROOT_DESC_TYPE_SHIFT;
1778 if ((desc & 1) && *start_root) {
1779 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1785 case ROOT_DESC_COMPLEX: {
1786 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1787 int bwords = (*bitmap_data) - 1;
1788 void **start_run = start_root;
1790 while (bwords-- > 0) {
1791 gsize bmap = *bitmap_data++;
1792 void **objptr = start_run;
1794 if ((bmap & 1) && *objptr) {
1795 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1800 start_run += GC_BITS_PER_WORD;
1804 case ROOT_DESC_USER: {
1805 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1806 root_report = report;
1807 marker (start_root, single_arg_report_root);
1810 case ROOT_DESC_RUN_LEN:
1811 g_assert_not_reached ();
1813 g_assert_not_reached ();
1818 report_registered_roots_by_type (int root_type)
1820 GCRootReport report;
1824 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1825 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1826 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1827 } SGEN_HASH_TABLE_FOREACH_END;
1828 notify_gc_roots (&report);
1832 report_registered_roots (void)
1834 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1835 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1839 scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
1841 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
1842 SgenGrayQueue *queue = ctx.queue;
1843 FinalizeReadyEntry *fin;
1845 for (fin = list; fin; fin = fin->next) {
1848 SGEN_LOG (5, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object));
1849 copy_func (&fin->object, queue);
1854 generation_name (int generation)
1856 switch (generation) {
1857 case GENERATION_NURSERY: return "nursery";
1858 case GENERATION_OLD: return "old";
1859 default: g_assert_not_reached ();
1864 sgen_generation_name (int generation)
1866 return generation_name (generation);
1869 SgenObjectOperations *
1870 sgen_get_current_object_ops (void){
1871 return ¤t_object_ops;
1876 finish_gray_stack (int generation, GrayQueue *queue)
1880 int done_with_ephemerons, ephemeron_rounds = 0;
1881 CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
1882 ScanObjectFunc scan_func = current_object_ops.scan_object;
1883 ScanCopyContext ctx = { scan_func, copy_func, queue };
1884 char *start_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_start () : NULL;
1885 char *end_addr = generation == GENERATION_NURSERY ? sgen_get_nursery_end () : (char*)-1;
1888 * We copied all the reachable objects. Now it's the time to copy
1889 * the objects that were not referenced by the roots, but by the copied objects.
1890 * we built a stack of objects pointed to by gray_start: they are
1891 * additional roots and we may add more items as we go.
1892 * We loop until gray_start == gray_objects which means no more objects have
1893 * been added. Note this is iterative: no recursion is involved.
1894 * We need to walk the LO list as well in search of marked big objects
1895 * (use a flag since this is needed only on major collections). We need to loop
1896 * here as well, so keep a counter of marked LO (increasing it in copy_object).
1897 * To achieve better cache locality and cache usage, we drain the gray stack
1898 * frequently, after each object is copied, and just finish the work here.
1900 sgen_drain_gray_stack (-1, ctx);
1902 SGEN_LOG (2, "%s generation done", generation_name (generation));
1905 Reset bridge data, we might have lingering data from a previous collection if this is a major
1906 collection trigged by minor overflow.
1908 We must reset the gathered bridges since their original block might be evacuated due to major
1909 fragmentation in the meanwhile and the bridge code should not have to deal with that.
1911 sgen_bridge_reset_data ();
1914 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
1915 * before processing finalizable objects and non-tracking weak links to avoid finalizing/clearing
1916 * objects that are in fact reachable.
1918 done_with_ephemerons = 0;
1920 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1921 sgen_drain_gray_stack (-1, ctx);
1923 } while (!done_with_ephemerons);
1925 sgen_scan_togglerefs (start_addr, end_addr, ctx);
1927 if (sgen_need_bridge_processing ()) {
1928 sgen_collect_bridge_objects (generation, ctx);
1929 if (generation == GENERATION_OLD)
1930 sgen_collect_bridge_objects (GENERATION_NURSERY, ctx);
1934 Make sure we drain the gray stack before processing disappearing links and finalizers.
1935 If we don't make sure it is empty we might wrongly see a live object as dead.
1937 sgen_drain_gray_stack (-1, ctx);
1940 We must clear weak links that don't track resurrection before processing object ready for
1941 finalization so they can be cleared before that.
1943 sgen_null_link_in_range (generation, TRUE, ctx);
1944 if (generation == GENERATION_OLD)
1945 sgen_null_link_in_range (GENERATION_NURSERY, TRUE, ctx);
1948 /* walk the finalization queue and move also the objects that need to be
1949 * finalized: use the finalized objects as new roots so the objects they depend
1950 * on are also not reclaimed. As with the roots above, only objects in the nursery
1951 * are marked/copied.
1953 sgen_finalize_in_range (generation, ctx);
1954 if (generation == GENERATION_OLD)
1955 sgen_finalize_in_range (GENERATION_NURSERY, ctx);
1956 /* drain the new stack that might have been created */
1957 SGEN_LOG (6, "Precise scan of gray area post fin");
1958 sgen_drain_gray_stack (-1, ctx);
1961 * This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
1963 done_with_ephemerons = 0;
1965 done_with_ephemerons = mark_ephemerons_in_range (ctx);
1966 sgen_drain_gray_stack (-1, ctx);
1968 } while (!done_with_ephemerons);
1971 * Clear ephemeron pairs with unreachable keys.
1972 * We pass the copy func so we can figure out if an array was promoted or not.
1974 clear_unreachable_ephemerons (ctx);
1977 SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
1980 * handle disappearing links
1981 * Note we do this after checking the finalization queue because if an object
1982 * survives (at least long enough to be finalized) we don't clear the link.
1983 * This also deals with a possible issue with the monitor reclamation: with the Boehm
1984 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
1987 g_assert (sgen_gray_object_queue_is_empty (queue));
1989 sgen_null_link_in_range (generation, FALSE, ctx);
1990 if (generation == GENERATION_OLD)
1991 sgen_null_link_in_range (GENERATION_NURSERY, FALSE, ctx);
1992 if (sgen_gray_object_queue_is_empty (queue))
1994 sgen_drain_gray_stack (-1, ctx);
1997 g_assert (sgen_gray_object_queue_is_empty (queue));
2001 sgen_check_section_scan_starts (GCMemSection *section)
2004 for (i = 0; i < section->num_scan_start; ++i) {
2005 if (section->scan_starts [i]) {
2006 guint size = safe_object_get_size ((MonoObject*) section->scan_starts [i]);
2007 g_assert (size >= sizeof (MonoObject) && size <= MAX_SMALL_OBJ_SIZE);
2013 check_scan_starts (void)
2015 if (!do_scan_starts_check)
2017 sgen_check_section_scan_starts (nursery_section);
2018 major_collector.check_scan_starts ();
2022 scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
2026 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
2027 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
2028 precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
2029 } SGEN_HASH_TABLE_FOREACH_END;
2033 sgen_dump_occupied (char *start, char *end, char *section_start)
2035 fprintf (heap_dump_file, "<occupied offset=\"%td\" size=\"%td\"/>\n", start - section_start, end - start);
2039 sgen_dump_section (GCMemSection *section, const char *type)
2041 char *start = section->data;
2042 char *end = section->data + section->size;
2043 char *occ_start = NULL;
2045 char *old_start = NULL; /* just for debugging */
2047 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%lu\">\n", type, (unsigned long)section->size);
2049 while (start < end) {
2053 if (!*(void**)start) {
2055 sgen_dump_occupied (occ_start, start, section->data);
2058 start += sizeof (void*); /* should be ALLOC_ALIGN, really */
2061 g_assert (start < section->next_data);
2066 vt = (GCVTable*)LOAD_VTABLE (start);
2069 size = ALIGN_UP (safe_object_get_size ((MonoObject*) start));
2072 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2073 start - section->data,
2074 vt->klass->name_space, vt->klass->name,
2082 sgen_dump_occupied (occ_start, start, section->data);
2084 fprintf (heap_dump_file, "</section>\n");
2088 dump_object (MonoObject *obj, gboolean dump_location)
2090 static char class_name [1024];
2092 MonoClass *class = mono_object_class (obj);
2096 * Python's XML parser is too stupid to parse angle brackets
2097 * in strings, so we just ignore them;
2100 while (class->name [i] && j < sizeof (class_name) - 1) {
2101 if (!strchr ("<>\"", class->name [i]))
2102 class_name [j++] = class->name [i];
2105 g_assert (j < sizeof (class_name));
2108 fprintf (heap_dump_file, "<object class=\"%s.%s\" size=\"%d\"",
2109 class->name_space, class_name,
2110 safe_object_get_size (obj));
2111 if (dump_location) {
2112 const char *location;
2113 if (ptr_in_nursery (obj))
2114 location = "nursery";
2115 else if (safe_object_get_size (obj) <= MAX_SMALL_OBJ_SIZE)
2119 fprintf (heap_dump_file, " location=\"%s\"", location);
2121 fprintf (heap_dump_file, "/>\n");
2125 dump_heap (const char *type, int num, const char *reason)
2130 fprintf (heap_dump_file, "<collection type=\"%s\" num=\"%d\"", type, num);
2132 fprintf (heap_dump_file, " reason=\"%s\"", reason);
2133 fprintf (heap_dump_file, ">\n");
2134 fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2135 sgen_dump_internal_mem_usage (heap_dump_file);
2136 fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
2137 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2138 fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
2140 fprintf (heap_dump_file, "<pinned-objects>\n");
2141 for (list = sgen_pin_stats_get_object_list (); list; list = list->next)
2142 dump_object (list->obj, TRUE);
2143 fprintf (heap_dump_file, "</pinned-objects>\n");
2145 sgen_dump_section (nursery_section, "nursery");
2147 major_collector.dump_heap (heap_dump_file);
2149 fprintf (heap_dump_file, "<los>\n");
2150 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
2151 dump_object ((MonoObject*)bigobj->data, FALSE);
2152 fprintf (heap_dump_file, "</los>\n");
2154 fprintf (heap_dump_file, "</collection>\n");
2158 sgen_register_moved_object (void *obj, void *destination)
2160 g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
2162 /* FIXME: handle this for parallel collector */
2163 g_assert (!sgen_collection_is_parallel ());
2165 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2166 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
2167 moved_objects_idx = 0;
2169 moved_objects [moved_objects_idx++] = obj;
2170 moved_objects [moved_objects_idx++] = destination;
2176 static gboolean inited = FALSE;
2181 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
2182 mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
2183 mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
2184 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
2185 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
2186 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
2187 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
2188 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
2190 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
2191 mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
2192 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
2193 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
2194 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
2195 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
2196 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
2197 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
2198 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
2199 mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
2200 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
2201 mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
2202 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
2204 mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
2206 #ifdef HEAVY_STATISTICS
2207 mono_counters_register ("WBarrier remember pointer", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_add_to_global_remset);
2208 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_field);
2209 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
2210 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
2211 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
2212 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
2213 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
2214 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
2216 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
2217 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
2219 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
2220 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
2221 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_major);
2222 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_major);
2224 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_nursery);
2225 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_scan_object_called_major);
2227 mono_counters_register ("Slots allocated in vain", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_slots_allocated_in_vain);
2229 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_from_space);
2230 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_forwarded);
2231 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
2232 mono_counters_register ("# nursery copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_to_space);
2234 sgen_nursery_allocator_init_heavy_stats ();
2235 sgen_alloc_init_heavy_stats ();
2243 reset_pinned_from_failed_allocation (void)
2245 bytes_pinned_from_failed_allocation = 0;
2249 sgen_set_pinned_from_failed_allocation (mword objsize)
2251 bytes_pinned_from_failed_allocation += objsize;
2255 sgen_collection_is_parallel (void)
2257 switch (current_collection_generation) {
2258 case GENERATION_NURSERY:
2259 return nursery_collection_is_parallel;
2260 case GENERATION_OLD:
2261 return major_collector.is_parallel;
2263 g_error ("Invalid current generation %d", current_collection_generation);
2268 sgen_collection_is_concurrent (void)
2270 switch (current_collection_generation) {
2271 case GENERATION_NURSERY:
2273 case GENERATION_OLD:
2274 return concurrent_collection_in_progress;
2276 g_error ("Invalid current generation %d", current_collection_generation);
2281 sgen_concurrent_collection_in_progress (void)
2283 return concurrent_collection_in_progress;
2290 } FinishRememberedSetScanJobData;
2293 job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
2295 FinishRememberedSetScanJobData *job_data = job_data_untyped;
2297 remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
2298 sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2303 CopyOrMarkObjectFunc copy_or_mark_func;
2304 ScanObjectFunc scan_func;
2308 } ScanFromRegisteredRootsJobData;
2311 job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
2313 ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
2314 ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
2315 sgen_workers_get_job_gray_queue (worker_data) };
2317 scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
2318 sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2325 } ScanThreadDataJobData;
2328 job_scan_thread_data (WorkerData *worker_data, void *job_data_untyped)
2330 ScanThreadDataJobData *job_data = job_data_untyped;
2332 scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
2333 sgen_workers_get_job_gray_queue (worker_data));
2334 sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2339 FinalizeReadyEntry *list;
2340 } ScanFinalizerEntriesJobData;
2343 job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
2345 ScanFinalizerEntriesJobData *job_data = job_data_untyped;
2346 ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
2348 scan_finalizer_entries (job_data->list, ctx);
2349 sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
2353 job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2355 g_assert (concurrent_collection_in_progress);
2356 major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2360 job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
2362 g_assert (concurrent_collection_in_progress);
2363 sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
2367 verify_scan_starts (char *start, char *end)
2371 for (i = 0; i < nursery_section->num_scan_start; ++i) {
2372 char *addr = nursery_section->scan_starts [i];
2373 if (addr > start && addr < end)
2374 SGEN_LOG (1, "NFC-BAD SCAN START [%d] %p for obj [%p %p]", i, addr, start, end);
2379 verify_nursery (void)
2381 char *start, *end, *cur, *hole_start;
2383 if (!do_verify_nursery)
2386 /*This cleans up unused fragments */
2387 sgen_nursery_allocator_prepare_for_pinning ();
2389 hole_start = start = cur = sgen_get_nursery_start ();
2390 end = sgen_get_nursery_end ();
2395 if (!*(void**)cur) {
2396 cur += sizeof (void*);
2400 if (object_is_forwarded (cur))
2401 SGEN_LOG (1, "FORWARDED OBJ %p", cur);
2402 else if (object_is_pinned (cur))
2403 SGEN_LOG (1, "PINNED OBJ %p", cur);
2405 ss = safe_object_get_size ((MonoObject*)cur);
2406 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2407 verify_scan_starts (cur, cur + size);
2408 if (do_dump_nursery_content) {
2409 if (cur > hole_start)
2410 SGEN_LOG (1, "HOLE [%p %p %d]", hole_start, cur, (int)(cur - hole_start));
2411 SGEN_LOG (1, "OBJ [%p %p %d %d %s %d]", cur, cur + size, (int)size, (int)ss, sgen_safe_name ((MonoObject*)cur), (gpointer)LOAD_VTABLE (cur) == sgen_get_array_fill_vtable ());
2419 * Checks that no objects in the nursery are fowarded or pinned. This
2420 * is a precondition to restarting the mutator while doing a
2421 * concurrent collection. Note that we don't clear fragments because
2422 * we depend on that having happened earlier.
2425 check_nursery_is_clean (void)
2427 char *start, *end, *cur;
2429 start = cur = sgen_get_nursery_start ();
2430 end = sgen_get_nursery_end ();
2435 if (!*(void**)cur) {
2436 cur += sizeof (void*);
2440 g_assert (!object_is_forwarded (cur));
2441 g_assert (!object_is_pinned (cur));
2443 ss = safe_object_get_size ((MonoObject*)cur);
2444 size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
2445 verify_scan_starts (cur, cur + size);
2452 init_gray_queue (void)
2454 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
2455 sgen_workers_init_distribute_gray_queue ();
2456 sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
2457 gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
2459 sgen_gray_object_queue_init (&gray_queue, NULL);
2464 pin_stage_object_callback (char *obj, size_t size, void *data)
2466 sgen_pin_stage_ptr (obj);
2467 /* FIXME: do pin stats if enabled */
2471 * Collect objects in the nursery. Returns whether to trigger a major
2475 collect_nursery (SgenGrayQueue *unpin_queue, gboolean finish_up_concurrent_mark)
2477 gboolean needs_major;
2478 size_t max_garbage_amount;
2480 FinishRememberedSetScanJobData *frssjd;
2481 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2482 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2483 ScanThreadDataJobData *stdjd;
2484 mword fragment_total;
2485 ScanCopyContext ctx;
2486 TV_DECLARE (all_atv);
2487 TV_DECLARE (all_btv);
2491 if (disable_minor_collections)
2494 MONO_GC_BEGIN (GENERATION_NURSERY);
2495 binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
2499 #ifndef DISABLE_PERFCOUNTERS
2500 mono_perfcounters->gc_collections0++;
2503 current_collection_generation = GENERATION_NURSERY;
2504 if (sgen_collection_is_parallel ())
2505 current_object_ops = sgen_minor_collector.parallel_ops;
2507 current_object_ops = sgen_minor_collector.serial_ops;
2509 reset_pinned_from_failed_allocation ();
2511 check_scan_starts ();
2513 sgen_nursery_alloc_prepare_for_minor ();
2517 nursery_next = sgen_nursery_alloc_get_upper_alloc_bound ();
2518 /* FIXME: optimize later to use the higher address where an object can be present */
2519 nursery_next = MAX (nursery_next, sgen_get_nursery_end ());
2521 SGEN_LOG (1, "Start nursery collection %d %p-%p, size: %d", stat_minor_gcs, sgen_get_nursery_start (), nursery_next, (int)(nursery_next - sgen_get_nursery_start ()));
2522 max_garbage_amount = nursery_next - sgen_get_nursery_start ();
2523 g_assert (nursery_section->size >= max_garbage_amount);
2525 /* world must be stopped already */
2526 TV_GETTIME (all_atv);
2530 time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2532 if (xdomain_checks) {
2533 sgen_clear_nursery_fragments ();
2534 check_for_xdomain_refs ();
2537 nursery_section->next_data = nursery_next;
2539 major_collector.start_nursery_collection ();
2541 sgen_memgov_minor_collection_start ();
2546 gc_stats.minor_gc_count ++;
2548 MONO_GC_CHECKPOINT_1 (GENERATION_NURSERY);
2550 sgen_process_fin_stage_entries ();
2551 sgen_process_dislink_stage_entries ();
2553 MONO_GC_CHECKPOINT_2 (GENERATION_NURSERY);
2555 /* pin from pinned handles */
2556 sgen_init_pinning ();
2557 mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
2558 pin_from_roots (sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2559 /* pin cemented objects */
2560 sgen_cement_iterate (pin_stage_object_callback, NULL);
2561 /* identify pinned objects */
2562 sgen_optimize_pin_queue (0);
2563 sgen_pinning_setup_section (nursery_section);
2564 ctx.scan_func = NULL;
2565 ctx.copy_func = NULL;
2566 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2567 sgen_pin_objects_in_section (nursery_section, ctx);
2568 sgen_pinning_trim_queue_to_section (nursery_section);
2571 time_minor_pinning += TV_ELAPSED (btv, atv);
2572 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
2573 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2575 MONO_GC_CHECKPOINT_3 (GENERATION_NURSERY);
2577 if (whole_heap_check_before_collection) {
2578 sgen_clear_nursery_fragments ();
2579 sgen_check_whole_heap (finish_up_concurrent_mark);
2581 if (consistency_check_at_minor_collection)
2582 sgen_check_consistency ();
2584 sgen_workers_start_all_workers ();
2585 sgen_workers_start_marking ();
2587 frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2588 frssjd->heap_start = sgen_get_nursery_start ();
2589 frssjd->heap_end = nursery_next;
2590 sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
2592 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2594 time_minor_scan_remsets += TV_ELAPSED (atv, btv);
2595 SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
2597 MONO_GC_CHECKPOINT_4 (GENERATION_NURSERY);
2599 if (!sgen_collection_is_parallel ()) {
2600 ctx.scan_func = current_object_ops.scan_object;
2601 ctx.copy_func = NULL;
2602 ctx.queue = &gray_queue;
2603 sgen_drain_gray_stack (-1, ctx);
2606 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2607 report_registered_roots ();
2608 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2609 report_finalizer_roots ();
2611 time_minor_scan_pinned += TV_ELAPSED (btv, atv);
2613 MONO_GC_CHECKPOINT_5 (GENERATION_NURSERY);
2615 /* registered roots, this includes static fields */
2616 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2617 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2618 scrrjd_normal->scan_func = current_object_ops.scan_object;
2619 scrrjd_normal->heap_start = sgen_get_nursery_start ();
2620 scrrjd_normal->heap_end = nursery_next;
2621 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2622 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2624 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2625 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2626 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2627 scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
2628 scrrjd_wbarrier->heap_end = nursery_next;
2629 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2630 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2633 time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
2635 MONO_GC_CHECKPOINT_6 (GENERATION_NURSERY);
2638 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2639 stdjd->heap_start = sgen_get_nursery_start ();
2640 stdjd->heap_end = nursery_next;
2641 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2644 time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
2647 MONO_GC_CHECKPOINT_7 (GENERATION_NURSERY);
2649 g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
2651 if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
2652 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2654 /* Scan the list of objects ready for finalization. If */
2655 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2656 sfejd_fin_ready->list = fin_ready_list;
2657 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
2659 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2660 sfejd_critical_fin->list = critical_fin_list;
2661 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
2663 MONO_GC_CHECKPOINT_8 (GENERATION_NURSERY);
2665 finish_gray_stack (GENERATION_NURSERY, &gray_queue);
2667 time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
2668 mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
2670 MONO_GC_CHECKPOINT_9 (GENERATION_NURSERY);
2673 * The (single-threaded) finalization code might have done
2674 * some copying/marking so we can only reset the GC thread's
2675 * worker data here instead of earlier when we joined the
2678 sgen_workers_reset_data ();
2680 if (objects_pinned) {
2681 sgen_optimize_pin_queue (0);
2682 sgen_pinning_setup_section (nursery_section);
2685 /* walk the pin_queue, build up the fragment list of free memory, unmark
2686 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2689 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
2690 fragment_total = sgen_build_nursery_fragments (nursery_section,
2691 nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
2693 if (!fragment_total)
2696 /* Clear TLABs for all threads */
2697 sgen_clear_tlabs ();
2699 mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
2701 time_minor_fragment_creation += TV_ELAPSED (atv, btv);
2702 SGEN_LOG (2, "Fragment creation: %d usecs, %lu bytes available", TV_ELAPSED (atv, btv), (unsigned long)fragment_total);
2704 if (consistency_check_at_minor_collection)
2705 sgen_check_major_refs ();
2707 major_collector.finish_nursery_collection ();
2709 TV_GETTIME (all_btv);
2710 gc_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
2713 dump_heap ("minor", stat_minor_gcs - 1, NULL);
2715 /* prepare the pin queue for the next collection */
2716 sgen_finish_pinning ();
2717 if (fin_ready_list || critical_fin_list) {
2718 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
2719 mono_gc_finalize_notify ();
2721 sgen_pin_stats_reset ();
2722 /* clear cemented hash */
2723 sgen_cement_clear_below_threshold ();
2725 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
2727 remset.finish_minor_collection ();
2729 check_scan_starts ();
2731 binary_protocol_flush_buffers (FALSE);
2733 sgen_memgov_minor_collection_end ();
2735 /*objects are late pinned because of lack of memory, so a major is a good call*/
2736 needs_major = objects_pinned > 0;
2737 current_collection_generation = -1;
2740 MONO_GC_END (GENERATION_NURSERY);
2741 binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
2743 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2744 sgen_check_nursery_objects_pinned (unpin_queue != NULL);
2750 scan_nursery_objects_callback (char *obj, size_t size, ScanCopyContext *ctx)
2752 ctx->scan_func (obj, ctx->queue);
2756 scan_nursery_objects (ScanCopyContext ctx)
2758 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
2759 (IterateObjectCallbackFunc)scan_nursery_objects_callback, (void*)&ctx, FALSE);
2763 major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
2768 /* FIXME: only use these values for the precise scan
2769 * note that to_space pointers should be excluded anyway...
2771 char *heap_start = NULL;
2772 char *heap_end = (char*)-1;
2773 gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
2774 GCRootReport root_report = { 0 };
2775 ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
2776 ScanThreadDataJobData *stdjd;
2777 ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
2778 ScanCopyContext ctx;
2780 if (concurrent_collection_in_progress) {
2781 /*This cleans up unused fragments */
2782 sgen_nursery_allocator_prepare_for_pinning ();
2784 if (do_concurrent_checks)
2785 check_nursery_is_clean ();
2787 /* The concurrent collector doesn't touch the nursery. */
2788 sgen_nursery_alloc_prepare_for_major ();
2795 /* Pinning depends on this */
2796 sgen_clear_nursery_fragments ();
2798 if (whole_heap_check_before_collection)
2799 sgen_check_whole_heap (finish_up_concurrent_mark);
2802 time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
2804 if (!sgen_collection_is_concurrent ())
2805 nursery_section->next_data = sgen_get_nursery_end ();
2806 /* we should also coalesce scanning from sections close to each other
2807 * and deal with pointers outside of the sections later.
2811 *major_collector.have_swept = FALSE;
2813 if (xdomain_checks) {
2814 sgen_clear_nursery_fragments ();
2815 check_for_xdomain_refs ();
2818 if (!concurrent_collection_in_progress) {
2819 /* Remsets are not useful for a major collection */
2820 remset.prepare_for_major_collection ();
2823 sgen_process_fin_stage_entries ();
2824 sgen_process_dislink_stage_entries ();
2827 sgen_init_pinning ();
2828 SGEN_LOG (6, "Collecting pinned addresses");
2829 pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
2831 if (!concurrent_collection_in_progress || finish_up_concurrent_mark) {
2832 if (major_collector.is_concurrent) {
2834 * The concurrent major collector cannot evict
2835 * yet, so we need to pin cemented objects to
2836 * not break some asserts.
2838 * FIXME: We could evict now!
2840 sgen_cement_iterate (pin_stage_object_callback, NULL);
2843 if (!concurrent_collection_in_progress)
2844 sgen_cement_reset ();
2847 sgen_optimize_pin_queue (0);
2850 * The concurrent collector doesn't move objects, neither on
2851 * the major heap nor in the nursery, so we can mark even
2852 * before pinning has finished. For the non-concurrent
2853 * collector we start the workers after pinning.
2855 if (concurrent_collection_in_progress) {
2856 sgen_workers_start_all_workers ();
2857 sgen_workers_start_marking ();
2861 * pin_queue now contains all candidate pointers, sorted and
2862 * uniqued. We must do two passes now to figure out which
2863 * objects are pinned.
2865 * The first is to find within the pin_queue the area for each
2866 * section. This requires that the pin_queue be sorted. We
2867 * also process the LOS objects and pinned chunks here.
2869 * The second, destructive, pass is to reduce the section
2870 * areas to pointers to the actually pinned objects.
2872 SGEN_LOG (6, "Pinning from sections");
2873 /* first pass for the sections */
2874 sgen_find_section_pin_queue_start_end (nursery_section);
2875 major_collector.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2876 /* identify possible pointers to the insize of large objects */
2877 SGEN_LOG (6, "Pinning from large objects");
2878 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
2880 if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
2881 binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
2883 #ifdef ENABLE_DTRACE
2884 if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
2885 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
2886 MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
2890 if (sgen_los_object_is_pinned (bigobj->data)) {
2891 g_assert (finish_up_concurrent_mark);
2894 sgen_los_pin_object (bigobj->data);
2895 /* FIXME: only enqueue if object has references */
2896 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
2897 if (G_UNLIKELY (do_pin_stats))
2898 sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
2899 SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
2902 add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
2906 notify_gc_roots (&root_report);
2907 /* second pass for the sections */
2908 ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
2909 ctx.copy_func = NULL;
2910 ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
2913 * Concurrent mark never follows references into the nursery.
2914 * In the start and finish pauses we must scan live nursery
2915 * objects, though. We could simply scan all nursery objects,
2916 * but that would be conservative. The easiest way is to do a
2917 * nursery collection, which copies all live nursery objects
2918 * (except pinned ones, with the simple nursery) to the major
2919 * heap. Scanning the mod union table later will then scan
2920 * those promoted objects, provided they're reachable. Pinned
2921 * objects in the nursery - which we can trivially find in the
2922 * pinning queue - are treated as roots in the mark pauses.
2924 * The split nursery complicates the latter part because
2925 * non-pinned objects can survive in the nursery. That's why
2926 * we need to do a full front-to-back scan of the nursery,
2927 * marking all objects.
2929 * Non-concurrent mark evacuates from the nursery, so it's
2930 * sufficient to just scan pinned nursery objects.
2932 if (concurrent_collection_in_progress && sgen_minor_collector.is_split) {
2933 scan_nursery_objects (ctx);
2935 sgen_pin_objects_in_section (nursery_section, ctx);
2936 if (check_nursery_objects_pinned && !sgen_minor_collector.is_split)
2937 sgen_check_nursery_objects_pinned (!concurrent_collection_in_progress || finish_up_concurrent_mark);
2940 major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
2941 if (old_next_pin_slot)
2942 *old_next_pin_slot = sgen_get_pinned_count ();
2945 time_major_pinning += TV_ELAPSED (atv, btv);
2946 SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (atv, btv));
2947 SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
2949 major_collector.init_to_space ();
2951 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
2952 main_gc_thread = mono_native_thread_self ();
2955 if (!concurrent_collection_in_progress && major_collector.is_parallel) {
2956 sgen_workers_start_all_workers ();
2957 sgen_workers_start_marking ();
2960 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2961 report_registered_roots ();
2963 time_major_scan_pinned += TV_ELAPSED (btv, atv);
2965 /* registered roots, this includes static fields */
2966 scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2967 scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2968 scrrjd_normal->scan_func = current_object_ops.scan_object;
2969 scrrjd_normal->heap_start = heap_start;
2970 scrrjd_normal->heap_end = heap_end;
2971 scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
2972 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
2974 scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2975 scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
2976 scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
2977 scrrjd_wbarrier->heap_start = heap_start;
2978 scrrjd_wbarrier->heap_end = heap_end;
2979 scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
2980 sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
2983 time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
2986 stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
2987 stdjd->heap_start = heap_start;
2988 stdjd->heap_end = heap_end;
2989 sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
2992 time_major_scan_thread_data += TV_ELAPSED (btv, atv);
2995 time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
2997 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
2998 report_finalizer_roots ();
3000 /* scan the list of objects ready for finalization */
3001 sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3002 sfejd_fin_ready->list = fin_ready_list;
3003 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
3005 sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
3006 sfejd_critical_fin->list = critical_fin_list;
3007 sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
3009 if (scan_mod_union) {
3010 g_assert (finish_up_concurrent_mark);
3012 /* Mod union card table */
3013 sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
3014 sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
3018 time_major_scan_finalized += TV_ELAPSED (btv, atv);
3019 SGEN_LOG (2, "Root scan: %d usecs", TV_ELAPSED (btv, atv));
3022 time_major_scan_big_objects += TV_ELAPSED (atv, btv);
3024 if (concurrent_collection_in_progress) {
3025 /* prepare the pin queue for the next collection */
3026 sgen_finish_pinning ();
3028 sgen_pin_stats_reset ();
3030 if (do_concurrent_checks)
3031 check_nursery_is_clean ();
3036 major_start_collection (gboolean concurrent, int *old_next_pin_slot)
3038 MONO_GC_BEGIN (GENERATION_OLD);
3039 binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
3041 current_collection_generation = GENERATION_OLD;
3042 #ifndef DISABLE_PERFCOUNTERS
3043 mono_perfcounters->gc_collections1++;
3046 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3049 g_assert (major_collector.is_concurrent);
3050 concurrent_collection_in_progress = TRUE;
3052 sgen_cement_concurrent_start ();
3054 current_object_ops = major_collector.major_concurrent_ops;
3056 current_object_ops = major_collector.major_ops;
3059 reset_pinned_from_failed_allocation ();
3061 sgen_memgov_major_collection_start ();
3063 //count_ref_nonref_objs ();
3064 //consistency_check ();
3066 check_scan_starts ();
3069 SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
3071 gc_stats.major_gc_count ++;
3073 if (major_collector.start_major_collection)
3074 major_collector.start_major_collection ();
3076 major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
3080 wait_for_workers_to_finish (void)
3082 if (concurrent_collection_in_progress || major_collector.is_parallel) {
3083 gray_queue_redirect (&gray_queue);
3084 sgen_workers_join ();
3087 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3089 #ifdef SGEN_DEBUG_INTERNAL_ALLOC
3090 main_gc_thread = NULL;
3095 major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
3097 LOSObject *bigobj, *prevbo;
3103 if (concurrent_collection_in_progress || major_collector.is_parallel)
3104 wait_for_workers_to_finish ();
3106 if (concurrent_collection_in_progress) {
3107 current_object_ops = major_collector.major_concurrent_ops;
3109 major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
3110 wait_for_workers_to_finish ();
3112 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3114 if (do_concurrent_checks)
3115 check_nursery_is_clean ();
3117 current_object_ops = major_collector.major_ops;
3121 * The workers have stopped so we need to finish gray queue
3122 * work that might result from finalization in the main GC
3123 * thread. Redirection must therefore be turned off.
3125 sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
3126 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3128 /* all the objects in the heap */
3129 finish_gray_stack (GENERATION_OLD, &gray_queue);
3131 time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
3134 * The (single-threaded) finalization code might have done
3135 * some copying/marking so we can only reset the GC thread's
3136 * worker data here instead of earlier when we joined the
3139 sgen_workers_reset_data ();
3141 if (objects_pinned) {
3142 g_assert (!concurrent_collection_in_progress);
3144 /*This is slow, but we just OOM'd*/
3145 sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
3146 sgen_optimize_pin_queue (0);
3147 sgen_find_section_pin_queue_start_end (nursery_section);
3151 reset_heap_boundaries ();
3152 sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
3154 if (check_mark_bits_after_major_collection)
3155 sgen_check_major_heap_marked ();
3157 MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
3159 /* sweep the big objects list */
3161 for (bigobj = los_object_list; bigobj;) {
3162 g_assert (!object_is_pinned (bigobj->data));
3163 if (sgen_los_object_is_pinned (bigobj->data)) {
3164 sgen_los_unpin_object (bigobj->data);
3165 sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
3168 /* not referenced anywhere, so we can free it */
3170 prevbo->next = bigobj->next;
3172 los_object_list = bigobj->next;
3174 bigobj = bigobj->next;
3175 sgen_los_free_object (to_free);
3179 bigobj = bigobj->next;
3183 time_major_free_bigobjs += TV_ELAPSED (atv, btv);
3188 time_major_los_sweep += TV_ELAPSED (btv, atv);
3190 major_collector.sweep ();
3192 MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
3195 time_major_sweep += TV_ELAPSED (atv, btv);
3197 if (!concurrent_collection_in_progress) {
3198 /* walk the pin_queue, build up the fragment list of free memory, unmark
3199 * pinned objects as we go, memzero() the empty fragments so they are ready for the
3202 if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
3205 /* prepare the pin queue for the next collection */
3206 sgen_finish_pinning ();
3208 /* Clear TLABs for all threads */
3209 sgen_clear_tlabs ();
3211 sgen_pin_stats_reset ();
3214 if (concurrent_collection_in_progress)
3215 sgen_cement_concurrent_finish ();
3216 sgen_cement_clear_below_threshold ();
3219 time_major_fragment_creation += TV_ELAPSED (btv, atv);
3222 dump_heap ("major", stat_major_gcs - 1, reason);
3224 if (fin_ready_list || critical_fin_list) {
3225 SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
3226 mono_gc_finalize_notify ();
3229 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3231 sgen_memgov_major_collection_end ();
3232 current_collection_generation = -1;
3234 major_collector.finish_major_collection ();
3236 g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
3238 if (concurrent_collection_in_progress)
3239 concurrent_collection_in_progress = FALSE;
3241 check_scan_starts ();
3243 binary_protocol_flush_buffers (FALSE);
3245 //consistency_check ();
3247 MONO_GC_END (GENERATION_OLD);
3248 binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
3252 major_do_collection (const char *reason)
3254 TV_DECLARE (all_atv);
3255 TV_DECLARE (all_btv);
3256 int old_next_pin_slot;
3258 if (major_collector.get_and_reset_num_major_objects_marked) {
3259 long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
3260 g_assert (!num_marked);
3263 /* world must be stopped already */
3264 TV_GETTIME (all_atv);
3266 major_start_collection (FALSE, &old_next_pin_slot);
3267 major_finish_collection (reason, old_next_pin_slot, FALSE);
3269 TV_GETTIME (all_btv);
3270 gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
3272 /* FIXME: also report this to the user, preferably in gc-end. */
3273 if (major_collector.get_and_reset_num_major_objects_marked)
3274 major_collector.get_and_reset_num_major_objects_marked ();
3276 return bytes_pinned_from_failed_allocation > 0;
3279 static gboolean major_do_collection (const char *reason);
3282 major_start_concurrent_collection (const char *reason)
3284 long long num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3286 g_assert (num_objects_marked == 0);
3288 MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
3290 // FIXME: store reason and pass it when finishing
3291 major_start_collection (TRUE, NULL);
3293 gray_queue_redirect (&gray_queue);
3294 sgen_workers_wait_for_jobs ();
3296 num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
3297 MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
3299 current_collection_generation = -1;
3303 major_update_or_finish_concurrent_collection (gboolean force_finish)
3305 SgenGrayQueue unpin_queue;
3306 memset (&unpin_queue, 0, sizeof (unpin_queue));
3308 MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3310 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3312 major_collector.update_cardtable_mod_union ();
3313 sgen_los_update_cardtable_mod_union ();
3315 if (!force_finish && !sgen_workers_all_done ()) {
3316 MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3320 if (mod_union_consistency_check)
3321 sgen_check_mod_union_consistency ();
3323 collect_nursery (&unpin_queue, TRUE);
3325 current_collection_generation = GENERATION_OLD;
3326 major_finish_collection ("finishing", -1, TRUE);
3328 if (whole_heap_check_before_collection)
3329 sgen_check_whole_heap (FALSE);
3331 unpin_objects_from_queue (&unpin_queue);
3332 sgen_gray_object_queue_deinit (&unpin_queue);
3334 MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
3336 current_collection_generation = -1;
3342 * Ensure an allocation request for @size will succeed by freeing enough memory.
3344 * LOCKING: The GC lock MUST be held.
3347 sgen_ensure_free_space (size_t size)
3349 int generation_to_collect = -1;
3350 const char *reason = NULL;
3353 if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
3354 if (sgen_need_major_collection (size)) {
3355 reason = "LOS overflow";
3356 generation_to_collect = GENERATION_OLD;
3359 if (degraded_mode) {
3360 if (sgen_need_major_collection (size)) {
3361 reason = "Degraded mode overflow";
3362 generation_to_collect = GENERATION_OLD;
3364 } else if (sgen_need_major_collection (size)) {
3365 reason = "Minor allowance";
3366 generation_to_collect = GENERATION_OLD;
3368 generation_to_collect = GENERATION_NURSERY;
3369 reason = "Nursery full";
3373 if (generation_to_collect == -1) {
3374 if (concurrent_collection_in_progress && sgen_workers_all_done ()) {
3375 generation_to_collect = GENERATION_OLD;
3376 reason = "Finish concurrent collection";
3380 if (generation_to_collect == -1)
3382 sgen_perform_collection (size, generation_to_collect, reason, FALSE);
3386 sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
3388 TV_DECLARE (gc_end);
3389 GGTimingInfo infos [2];
3390 int overflow_generation_to_collect = -1;
3391 int oldest_generation_collected = generation_to_collect;
3392 const char *overflow_reason = NULL;
3394 MONO_GC_REQUESTED (generation_to_collect, requested_size, wait_to_finish ? 1 : 0);
3396 binary_protocol_collection_force (generation_to_collect);
3398 g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
3400 memset (infos, 0, sizeof (infos));
3401 mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
3403 infos [0].generation = generation_to_collect;
3404 infos [0].reason = reason;
3405 infos [0].is_overflow = FALSE;
3406 TV_GETTIME (infos [0].total_time);
3407 infos [1].generation = -1;
3409 sgen_stop_world (generation_to_collect);
3411 if (concurrent_collection_in_progress) {
3412 if (major_update_or_finish_concurrent_collection (wait_to_finish && generation_to_collect == GENERATION_OLD)) {
3413 oldest_generation_collected = GENERATION_OLD;
3416 if (generation_to_collect == GENERATION_OLD)
3419 if (generation_to_collect == GENERATION_OLD &&
3420 allow_synchronous_major &&
3421 major_collector.want_synchronous_collection &&
3422 *major_collector.want_synchronous_collection) {
3423 wait_to_finish = TRUE;
3427 //FIXME extract overflow reason
3428 if (generation_to_collect == GENERATION_NURSERY) {
3429 if (collect_nursery (NULL, FALSE)) {
3430 overflow_generation_to_collect = GENERATION_OLD;
3431 overflow_reason = "Minor overflow";
3434 if (major_collector.is_concurrent) {
3435 g_assert (!concurrent_collection_in_progress);
3436 if (!wait_to_finish)
3437 collect_nursery (NULL, FALSE);
3440 if (major_collector.is_concurrent && !wait_to_finish) {
3441 major_start_concurrent_collection (reason);
3442 // FIXME: set infos[0] properly
3445 if (major_do_collection (reason)) {
3446 overflow_generation_to_collect = GENERATION_NURSERY;
3447 overflow_reason = "Excessive pinning";
3452 TV_GETTIME (gc_end);
3453 infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
3456 if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
3457 mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
3458 infos [1].generation = overflow_generation_to_collect;
3459 infos [1].reason = overflow_reason;
3460 infos [1].is_overflow = TRUE;
3461 infos [1].total_time = gc_end;
3463 if (overflow_generation_to_collect == GENERATION_NURSERY)
3464 collect_nursery (NULL, FALSE);
3466 major_do_collection (overflow_reason);
3468 TV_GETTIME (gc_end);
3469 infos [1].total_time = SGEN_TV_ELAPSED (infos [1].total_time, gc_end);
3471 /* keep events symmetric */
3472 mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
3474 oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
3477 SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
3479 /* this also sets the proper pointers for the next allocation */
3480 if (generation_to_collect == GENERATION_NURSERY && !sgen_can_alloc_size (requested_size)) {
3481 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
3482 SGEN_LOG (1, "nursery collection didn't find enough room for %zd alloc (%d pinned)", requested_size, sgen_get_pinned_count ());
3483 sgen_dump_pin_queue ();
3488 g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
3490 sgen_restart_world (oldest_generation_collected, infos);
3492 mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
3496 * ######################################################################
3497 * ######## Memory allocation from the OS
3498 * ######################################################################
3499 * This section of code deals with getting memory from the OS and
3500 * allocating memory for GC-internal data structures.
3501 * Internal memory can be handled with a freelist for small objects.
3507 G_GNUC_UNUSED static void
3508 report_internal_mem_usage (void)
3510 printf ("Internal memory usage:\n");
3511 sgen_report_internal_mem_usage ();
3512 printf ("Pinned memory usage:\n");
3513 major_collector.report_pinned_memory_usage ();
3517 * ######################################################################
3518 * ######## Finalization support
3519 * ######################################################################
3522 static inline gboolean
3523 sgen_major_is_object_alive (void *object)
3527 /* Oldgen objects can be pinned and forwarded too */
3528 if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
3532 * FIXME: major_collector.is_object_live() also calculates the
3533 * size. Avoid the double calculation.
3535 objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
3536 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
3537 return sgen_los_object_is_pinned (object);
3539 return major_collector.is_object_live (object);
3543 * If the object has been forwarded it means it's still referenced from a root.
3544 * If it is pinned it's still alive as well.
3545 * A LOS object is only alive if we have pinned it.
3546 * Return TRUE if @obj is ready to be finalized.
3548 static inline gboolean
3549 sgen_is_object_alive (void *object)
3551 if (ptr_in_nursery (object))
3552 return sgen_nursery_is_object_alive (object);
3554 return sgen_major_is_object_alive (object);
3558 * This function returns true if @object is either alive or it belongs to the old gen
3559 * and we're currently doing a minor collection.
3562 sgen_is_object_alive_for_current_gen (char *object)
3564 if (ptr_in_nursery (object))
3565 return sgen_nursery_is_object_alive (object);
3567 if (current_collection_generation == GENERATION_NURSERY)
3570 return sgen_major_is_object_alive (object);
3574 * This function returns true if @object is either alive and belongs to the
3575 * current collection - major collections are full heap, so old gen objects
3576 * are never alive during a minor collection.
3579 sgen_is_object_alive_and_on_current_collection (char *object)
3581 if (ptr_in_nursery (object))
3582 return sgen_nursery_is_object_alive (object);
3584 if (current_collection_generation == GENERATION_NURSERY)
3587 return sgen_major_is_object_alive (object);
3592 sgen_gc_is_object_ready_for_finalization (void *object)
3594 return !sgen_is_object_alive (object);
3598 has_critical_finalizer (MonoObject *obj)
3602 if (!mono_defaults.critical_finalizer_object)
3605 class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
3607 return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
3611 sgen_queue_finalization_entry (MonoObject *obj)
3613 FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
3614 gboolean critical = has_critical_finalizer (obj);
3615 entry->object = obj;
3617 entry->next = critical_fin_list;
3618 critical_fin_list = entry;
3620 entry->next = fin_ready_list;
3621 fin_ready_list = entry;
3624 #ifdef ENABLE_DTRACE
3625 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
3626 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
3627 MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
3628 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
3629 vt->klass->name_space, vt->klass->name, gen, critical);
3635 sgen_object_is_live (void *obj)
3637 return sgen_is_object_alive_and_on_current_collection (obj);
3640 /* LOCKING: requires that the GC lock is held */
3642 null_ephemerons_for_domain (MonoDomain *domain)
3644 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3647 MonoObject *object = (MonoObject*)current->array;
3649 if (object && !object->vtable) {
3650 EphemeronLinkNode *tmp = current;
3653 prev->next = current->next;
3655 ephemeron_list = current->next;
3657 current = current->next;
3658 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3661 current = current->next;
3666 /* LOCKING: requires that the GC lock is held */
3668 clear_unreachable_ephemerons (ScanCopyContext ctx)
3670 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3671 GrayQueue *queue = ctx.queue;
3672 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
3674 Ephemeron *cur, *array_end;
3678 char *object = current->array;
3680 if (!sgen_is_object_alive_for_current_gen (object)) {
3681 EphemeronLinkNode *tmp = current;
3683 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
3686 prev->next = current->next;
3688 ephemeron_list = current->next;
3690 current = current->next;
3691 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
3696 copy_func ((void**)&object, queue);
3697 current->array = object;
3699 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
3701 array = (MonoArray*)object;
3702 cur = mono_array_addr (array, Ephemeron, 0);
3703 array_end = cur + mono_array_length_fast (array);
3704 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3706 for (; cur < array_end; ++cur) {
3707 char *key = (char*)cur->key;
3709 if (!key || key == tombstone)
3712 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3713 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3714 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3716 if (!sgen_is_object_alive_for_current_gen (key)) {
3717 cur->key = tombstone;
3723 current = current->next;
3728 LOCKING: requires that the GC lock is held
3730 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
3733 mark_ephemerons_in_range (ScanCopyContext ctx)
3735 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
3736 GrayQueue *queue = ctx.queue;
3737 int nothing_marked = 1;
3738 EphemeronLinkNode *current = ephemeron_list;
3740 Ephemeron *cur, *array_end;
3743 for (current = ephemeron_list; current; current = current->next) {
3744 char *object = current->array;
3745 SGEN_LOG (5, "Ephemeron array at %p", object);
3747 /*It has to be alive*/
3748 if (!sgen_is_object_alive_for_current_gen (object)) {
3749 SGEN_LOG (5, "\tnot reachable");
3753 copy_func ((void**)&object, queue);
3755 array = (MonoArray*)object;
3756 cur = mono_array_addr (array, Ephemeron, 0);
3757 array_end = cur + mono_array_length_fast (array);
3758 tombstone = (char*)((MonoVTable*)LOAD_VTABLE (object))->domain->ephemeron_tombstone;
3760 for (; cur < array_end; ++cur) {
3761 char *key = cur->key;
3763 if (!key || key == tombstone)
3766 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
3767 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
3768 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
3770 if (sgen_is_object_alive_for_current_gen (key)) {
3771 char *value = cur->value;
3773 copy_func ((void**)&cur->key, queue);
3775 if (!sgen_is_object_alive_for_current_gen (value))
3777 copy_func ((void**)&cur->value, queue);
3783 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
3784 return nothing_marked;
3788 mono_gc_invoke_finalizers (void)
3790 FinalizeReadyEntry *entry = NULL;
3791 gboolean entry_is_critical = FALSE;
3794 /* FIXME: batch to reduce lock contention */
3795 while (fin_ready_list || critical_fin_list) {
3799 FinalizeReadyEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
3801 /* We have finalized entry in the last
3802 interation, now we need to remove it from
3805 *list = entry->next;
3807 FinalizeReadyEntry *e = *list;
3808 while (e->next != entry)
3810 e->next = entry->next;
3812 sgen_free_internal (entry, INTERNAL_MEM_FINALIZE_READY_ENTRY);
3816 /* Now look for the first non-null entry. */
3817 for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
3820 entry_is_critical = FALSE;
3822 entry_is_critical = TRUE;
3823 for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
3828 g_assert (entry->object);
3829 num_ready_finalizers--;
3830 obj = entry->object;
3831 entry->object = NULL;
3832 SGEN_LOG (7, "Finalizing object %p (%s)", obj, safe_name (obj));
3840 g_assert (entry->object == NULL);
3842 /* the object is on the stack so it is pinned */
3843 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
3844 mono_gc_run_finalize (obj, NULL);
3851 mono_gc_pending_finalizers (void)
3853 return fin_ready_list || critical_fin_list;
3857 * ######################################################################
3858 * ######## registered roots support
3859 * ######################################################################
3863 * We do not coalesce roots.
3866 mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
3868 RootRecord new_root;
3871 for (i = 0; i < ROOT_TYPE_NUM; ++i) {
3872 RootRecord *root = sgen_hash_table_lookup (&roots_hash [i], start);
3873 /* we allow changing the size and the descriptor (for thread statics etc) */
3875 size_t old_size = root->end_root - start;
3876 root->end_root = start + size;
3877 g_assert (((root->root_desc != 0) && (descr != NULL)) ||
3878 ((root->root_desc == 0) && (descr == NULL)));
3879 root->root_desc = (mword)descr;
3881 roots_size -= old_size;
3887 new_root.end_root = start + size;
3888 new_root.root_desc = (mword)descr;
3890 sgen_hash_table_replace (&roots_hash [root_type], start, &new_root, NULL);
3893 SGEN_LOG (3, "Added root for range: %p-%p, descr: %p (%d/%d bytes)", start, new_root.end_root, descr, (int)size, (int)roots_size);
3900 mono_gc_register_root (char *start, size_t size, void *descr)
3902 return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
3906 mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
3908 return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
3912 mono_gc_deregister_root (char* addr)
3918 for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
3919 if (sgen_hash_table_remove (&roots_hash [root_type], addr, &root))
3920 roots_size -= (root.end_root - addr);
3926 * ######################################################################
3927 * ######## Thread handling (stop/start code)
3928 * ######################################################################
3931 unsigned int sgen_global_stop_count = 0;
3934 sgen_get_current_collection_generation (void)
3936 return current_collection_generation;
3940 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
3942 gc_callbacks = *callbacks;
3946 mono_gc_get_gc_callbacks ()
3948 return &gc_callbacks;
3951 /* Variables holding start/end nursery so it won't have to be passed at every call */
3952 static void *scan_area_arg_start, *scan_area_arg_end;
3955 mono_gc_conservatively_scan_area (void *start, void *end)
3957 conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
3961 mono_gc_scan_object (void *obj)
3963 UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
3964 current_object_ops.copy_or_mark_object (&obj, data->queue);
3969 * Mark from thread stacks and registers.
3972 scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue)
3974 SgenThreadInfo *info;
3976 scan_area_arg_start = start_nursery;
3977 scan_area_arg_end = end_nursery;
3979 FOREACH_THREAD (info) {
3981 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3984 if (info->gc_disabled) {
3985 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3989 if (!info->joined_stw) {
3990 SGEN_LOG (3, "Skipping thread not seen in STW %p, range: %p-%p, size: %td", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start);
3994 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, sgen_get_pinned_count ());
3995 if (!info->thread_is_dying) {
3996 if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
3997 UserCopyOrMarkData data = { NULL, queue };
3998 set_user_copy_or_mark_data (&data);
3999 gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
4000 set_user_copy_or_mark_data (NULL);
4001 } else if (!precise) {
4002 if (!conservative_stack_mark) {
4003 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
4004 conservative_stack_mark = TRUE;
4006 conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
4010 if (!info->thread_is_dying && !precise) {
4012 conservatively_pin_objects_from ((void**)&info->ctx, (void**)&info->ctx + ARCH_NUM_REGS,
4013 start_nursery, end_nursery, PIN_TYPE_STACK);
4015 conservatively_pin_objects_from (&info->regs, &info->regs + ARCH_NUM_REGS,
4016 start_nursery, end_nursery, PIN_TYPE_STACK);
4019 } END_FOREACH_THREAD
4023 ptr_on_stack (void *ptr)
4025 gpointer stack_start = &stack_start;
4026 SgenThreadInfo *info = mono_thread_info_current ();
4028 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
4034 sgen_thread_register (SgenThreadInfo* info, void *addr)
4037 #ifndef HAVE_KW_THREAD
4038 info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
4040 g_assert (!mono_native_tls_get_value (thread_info_key));
4041 mono_native_tls_set_value (thread_info_key, info);
4043 sgen_thread_info = info;
4046 #if !defined(__MACH__)
4047 info->stop_count = -1;
4051 info->joined_stw = FALSE;
4052 info->doing_handshake = FALSE;
4053 info->thread_is_dying = FALSE;
4054 info->stack_start = NULL;
4055 info->stopped_ip = NULL;
4056 info->stopped_domain = NULL;
4058 memset (&info->ctx, 0, sizeof (MonoContext));
4060 memset (&info->regs, 0, sizeof (info->regs));
4063 sgen_init_tlab_info (info);
4065 binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
4067 /* try to get it with attributes first */
4068 #if (defined(HAVE_PTHREAD_GETATTR_NP) || defined(HAVE_PTHREAD_ATTR_GET_NP)) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
4072 pthread_attr_t attr;
4074 #if defined(HAVE_PTHREAD_GETATTR_NP)
4076 pthread_getattr_np (pthread_self (), &attr);
4077 #elif defined(HAVE_PTHREAD_ATTR_GET_NP)
4079 pthread_attr_init (&attr);
4080 pthread_attr_get_np (pthread_self (), &attr);
4082 #error Cannot determine which API is needed to retrieve pthread attributes.
4085 pthread_attr_getstack (&attr, &sstart, &size);
4086 info->stack_start_limit = sstart;
4087 info->stack_end = (char*)sstart + size;
4088 pthread_attr_destroy (&attr);
4090 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
4091 info->stack_end = (char*)pthread_get_stackaddr_np (pthread_self ());
4092 info->stack_start_limit = (char*)info->stack_end - pthread_get_stacksize_np (pthread_self ());
4095 /* FIXME: we assume the stack grows down */
4096 gsize stack_bottom = (gsize)addr;
4097 stack_bottom += 4095;
4098 stack_bottom &= ~4095;
4099 info->stack_end = (char*)stack_bottom;
4103 #ifdef HAVE_KW_THREAD
4104 stack_end = info->stack_end;
4107 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end);
4109 if (gc_callbacks.thread_attach_func)
4110 info->runtime_data = gc_callbacks.thread_attach_func ();
4117 sgen_thread_unregister (SgenThreadInfo *p)
4119 /* If a delegate is passed to native code and invoked on a thread we dont
4120 * know about, the jit will register it with mono_jit_thread_attach, but
4121 * we have no way of knowing when that thread goes away. SGen has a TSD
4122 * so we assume that if the domain is still registered, we can detach
4125 if (mono_domain_get ())
4126 mono_thread_detach (mono_thread_current ());
4128 p->thread_is_dying = TRUE;
4131 There is a race condition between a thread finishing executing and been removed
4132 from the GC thread set.
4133 This happens on posix systems when TLS data is been cleaned-up, libpthread will
4134 set the thread_info slot to NULL before calling the cleanup function. This
4135 opens a window in which the thread is registered but has a NULL TLS.
4137 The suspend signal handler needs TLS data to know where to store thread state
4138 data or otherwise it will simply ignore the thread.
4140 This solution works because the thread doing STW will wait until all threads been
4141 suspended handshake back, so there is no race between the doing_hankshake test
4142 and the suspend_thread call.
4144 This is not required on systems that do synchronous STW as those can deal with
4145 the above race at suspend time.
4147 FIXME: I believe we could avoid this by using mono_thread_info_lookup when
4148 mono_thread_info_current returns NULL. Or fix mono_thread_info_lookup to do so.
4150 #if (defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED) || !defined(HAVE_PTHREAD_KILL)
4153 while (!TRYLOCK_GC) {
4154 if (!sgen_park_current_thread_if_doing_handshake (p))
4160 binary_protocol_thread_unregister ((gpointer)mono_thread_info_get_tid (p));
4161 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)mono_thread_info_get_tid (p));
4163 if (gc_callbacks.thread_detach_func) {
4164 gc_callbacks.thread_detach_func (p->runtime_data);
4165 p->runtime_data = NULL;
4168 mono_threads_unregister_current_thread (p);
4174 sgen_thread_attach (SgenThreadInfo *info)
4177 /*this is odd, can we get attached before the gc is inited?*/
4181 if (gc_callbacks.thread_attach_func && !info->runtime_data)
4182 info->runtime_data = gc_callbacks.thread_attach_func ();
4185 mono_gc_register_thread (void *baseptr)
4187 return mono_thread_info_attach (baseptr) != NULL;
4191 * mono_gc_set_stack_end:
4193 * Set the end of the current threads stack to STACK_END. The stack space between
4194 * STACK_END and the real end of the threads stack will not be scanned during collections.
4197 mono_gc_set_stack_end (void *stack_end)
4199 SgenThreadInfo *info;
4202 info = mono_thread_info_current ();
4204 g_assert (stack_end < info->stack_end);
4205 info->stack_end = stack_end;
4210 #if USE_PTHREAD_INTERCEPT
4214 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
4216 return pthread_create (new_thread, attr, start_routine, arg);
4220 mono_gc_pthread_join (pthread_t thread, void **retval)
4222 return pthread_join (thread, retval);
4226 mono_gc_pthread_detach (pthread_t thread)
4228 return pthread_detach (thread);
4232 mono_gc_pthread_exit (void *retval)
4234 mono_thread_info_dettach ();
4235 pthread_exit (retval);
4238 #endif /* USE_PTHREAD_INTERCEPT */
4241 * ######################################################################
4242 * ######## Write barriers
4243 * ######################################################################
4247 * Note: the write barriers first do the needed GC work and then do the actual store:
4248 * this way the value is visible to the conservative GC scan after the write barrier
4249 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
4250 * the conservative scan, otherwise by the remembered set scan.
4253 mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
4255 HEAVY_STAT (++stat_wbarrier_set_field);
4256 if (ptr_in_nursery (field_ptr)) {
4257 *(void**)field_ptr = value;
4260 SGEN_LOG (8, "Adding remset at %p", field_ptr);
4262 binary_protocol_wbarrier (field_ptr, value, value->vtable);
4264 remset.wbarrier_set_field (obj, field_ptr, value);
4268 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
4270 HEAVY_STAT (++stat_wbarrier_set_arrayref);
4271 if (ptr_in_nursery (slot_ptr)) {
4272 *(void**)slot_ptr = value;
4275 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
4277 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
4279 remset.wbarrier_set_arrayref (arr, slot_ptr, value);
4283 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count)
4285 HEAVY_STAT (++stat_wbarrier_arrayref_copy);
4286 /*This check can be done without taking a lock since dest_ptr array is pinned*/
4287 if (ptr_in_nursery (dest_ptr) || count <= 0) {
4288 mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
4292 #ifdef SGEN_BINARY_PROTOCOL
4295 for (i = 0; i < count; ++i) {
4296 gpointer dest = (gpointer*)dest_ptr + i;
4297 gpointer obj = *((gpointer*)src_ptr + i);
4299 binary_protocol_wbarrier (dest, obj, (gpointer)LOAD_VTABLE (obj));
4304 remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
4307 static char *found_obj;
4310 find_object_for_ptr_callback (char *obj, size_t size, void *user_data)
4312 char *ptr = user_data;
4314 if (ptr >= obj && ptr < obj + size) {
4315 g_assert (!found_obj);
4320 /* for use in the debugger */
4321 char* find_object_for_ptr (char *ptr);
4323 find_object_for_ptr (char *ptr)
4325 if (ptr >= nursery_section->data && ptr < nursery_section->end_data) {
4327 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
4328 find_object_for_ptr_callback, ptr, TRUE);
4334 sgen_los_iterate_objects (find_object_for_ptr_callback, ptr);
4339 * Very inefficient, but this is debugging code, supposed to
4340 * be called from gdb, so we don't care.
4343 major_collector.iterate_objects (TRUE, TRUE, find_object_for_ptr_callback, ptr);
4348 mono_gc_wbarrier_generic_nostore (gpointer ptr)
4352 HEAVY_STAT (++stat_wbarrier_generic_store);
4354 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
4355 /* FIXME: ptr_in_heap must be called with the GC lock held */
4356 if (xdomain_checks && *(MonoObject**)ptr && ptr_in_heap (ptr)) {
4357 char *start = find_object_for_ptr (ptr);
4358 MonoObject *value = *(MonoObject**)ptr;
4362 MonoObject *obj = (MonoObject*)start;
4363 if (obj->vtable->domain != value->vtable->domain)
4364 g_assert (is_xdomain_ref_allowed (ptr, start, obj->vtable->domain));
4370 obj = *(gpointer*)ptr;
4372 binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
4374 if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
4375 SGEN_LOG (8, "Skipping remset at %p", ptr);
4380 * We need to record old->old pointer locations for the
4381 * concurrent collector.
4383 if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
4384 SGEN_LOG (8, "Skipping remset at %p", ptr);
4388 SGEN_LOG (8, "Adding remset at %p", ptr);
4390 remset.wbarrier_generic_nostore (ptr);
4394 mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
4396 SGEN_LOG (8, "Wbarrier store at %p to %p (%s)", ptr, value, value ? safe_name (value) : "null");
4397 *(void**)ptr = value;
4398 if (ptr_in_nursery (value))
4399 mono_gc_wbarrier_generic_nostore (ptr);
4400 sgen_dummy_use (value);
4403 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
4405 mword *dest = _dest;
4410 mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
4415 size -= SIZEOF_VOID_P;
4420 #ifdef SGEN_BINARY_PROTOCOL
4422 #define HANDLE_PTR(ptr,obj) do { \
4423 gpointer o = *(gpointer*)(ptr); \
4425 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
4426 binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
4431 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
4433 #define SCAN_OBJECT_NOVTABLE
4434 #include "sgen-scan-object.h"
4439 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
4441 HEAVY_STAT (++stat_wbarrier_value_copy);
4442 g_assert (klass->valuetype);
4444 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
4446 if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
4447 size_t element_size = mono_class_value_size (klass, NULL);
4448 size_t size = count * element_size;
4449 mono_gc_memmove (dest, src, size);
4453 #ifdef SGEN_BINARY_PROTOCOL
4455 size_t element_size = mono_class_value_size (klass, NULL);
4457 for (i = 0; i < count; ++i) {
4458 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
4459 (char*)src + i * element_size - sizeof (MonoObject),
4460 (mword) klass->gc_descr);
4465 remset.wbarrier_value_copy (dest, src, count, klass);
4469 * mono_gc_wbarrier_object_copy:
4471 * Write barrier to call when obj is the result of a clone or copy of an object.
4474 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
4478 HEAVY_STAT (++stat_wbarrier_object_copy);
4480 if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
4481 size = mono_object_class (obj)->instance_size;
4482 mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
4483 size - sizeof (MonoObject));
4487 #ifdef SGEN_BINARY_PROTOCOL
4488 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
4491 remset.wbarrier_object_copy (obj, src);
4496 * ######################################################################
4497 * ######## Other mono public interface functions.
4498 * ######################################################################
4501 #define REFS_SIZE 128
4504 MonoGCReferences callback;
4508 MonoObject *refs [REFS_SIZE];
4509 uintptr_t offsets [REFS_SIZE];
4513 #define HANDLE_PTR(ptr,obj) do { \
4515 if (hwi->count == REFS_SIZE) { \
4516 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
4520 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
4521 hwi->refs [hwi->count++] = *(ptr); \
4526 collect_references (HeapWalkInfo *hwi, char *start, size_t size)
4528 #include "sgen-scan-object.h"
4532 walk_references (char *start, size_t size, void *data)
4534 HeapWalkInfo *hwi = data;
4537 collect_references (hwi, start, size);
4538 if (hwi->count || !hwi->called)
4539 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
4543 * mono_gc_walk_heap:
4544 * @flags: flags for future use
4545 * @callback: a function pointer called for each object in the heap
4546 * @data: a user data pointer that is passed to callback
4548 * This function can be used to iterate over all the live objects in the heap:
4549 * for each object, @callback is invoked, providing info about the object's
4550 * location in memory, its class, its size and the objects it references.
4551 * For each referenced object it's offset from the object address is
4552 * reported in the offsets array.
4553 * The object references may be buffered, so the callback may be invoked
4554 * multiple times for the same object: in all but the first call, the size
4555 * argument will be zero.
4556 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
4557 * profiler event handler.
4559 * Returns: a non-zero value if the GC doesn't support heap walking
4562 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
4567 hwi.callback = callback;
4570 sgen_clear_nursery_fragments ();
4571 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE);
4573 major_collector.iterate_objects (TRUE, TRUE, walk_references, &hwi);
4574 sgen_los_iterate_objects (walk_references, &hwi);
4580 mono_gc_collect (int generation)
4585 sgen_perform_collection (0, generation, "user request", TRUE);
4590 mono_gc_max_generation (void)
4596 mono_gc_collection_count (int generation)
4598 if (generation == 0)
4599 return stat_minor_gcs;
4600 return stat_major_gcs;
4604 mono_gc_get_used_size (void)
4608 tot = los_memory_usage;
4609 tot += nursery_section->next_data - nursery_section->data;
4610 tot += major_collector.get_used_size ();
4611 /* FIXME: account for pinned objects */
4617 mono_gc_get_los_limit (void)
4619 return MAX_SMALL_OBJ_SIZE;
4623 mono_gc_user_markers_supported (void)
4629 mono_object_is_alive (MonoObject* o)
4635 mono_gc_get_generation (MonoObject *obj)
4637 if (ptr_in_nursery (obj))
4643 mono_gc_enable_events (void)
4648 mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
4650 sgen_register_disappearing_link (obj, link_addr, track, FALSE);
4654 mono_gc_weak_link_remove (void **link_addr, gboolean track)
4656 sgen_register_disappearing_link (NULL, link_addr, track, FALSE);
4660 mono_gc_weak_link_get (void **link_addr)
4662 void * volatile *link_addr_volatile;
4666 link_addr_volatile = link_addr;
4667 ptr = (void*)*link_addr_volatile;
4669 * At this point we have a hidden pointer. If the GC runs
4670 * here, it will not recognize the hidden pointer as a
4671 * reference, and if the object behind it is not referenced
4672 * elsewhere, it will be freed. Once the world is restarted
4673 * we reveal the pointer, giving us a pointer to a freed
4674 * object. To make sure we don't return it, we load the
4675 * hidden pointer again. If it's still the same, we can be
4676 * sure the object reference is valid.
4679 obj = (MonoObject*) REVEAL_POINTER (ptr);
4683 mono_memory_barrier ();
4686 * During the second bridge processing step the world is
4687 * running again. That step processes all weak links once
4688 * more to null those that refer to dead objects. Before that
4689 * is completed, those links must not be followed, so we
4690 * conservatively wait for bridge processing when any weak
4691 * link is dereferenced.
4693 if (G_UNLIKELY (bridge_processing_in_progress))
4694 mono_gc_wait_for_bridge_processing ();
4696 if ((void*)*link_addr_volatile != ptr)
4703 mono_gc_ephemeron_array_add (MonoObject *obj)
4705 EphemeronLinkNode *node;
4709 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
4714 node->array = (char*)obj;
4715 node->next = ephemeron_list;
4716 ephemeron_list = node;
4718 SGEN_LOG (5, "Registered ephemeron array %p", obj);
4725 mono_gc_set_allow_synchronous_major (gboolean flag)
4727 if (!major_collector.is_concurrent)
4730 allow_synchronous_major = flag;
4735 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
4739 result = func (data);
4740 UNLOCK_INTERRUPTION;
4745 mono_gc_is_gc_thread (void)
4749 result = mono_thread_info_current () != NULL;
4755 is_critical_method (MonoMethod *method)
4757 return mono_runtime_is_critical_method (method) || sgen_is_critical_method (method);
4761 sgen_env_var_error (const char *env_var, const char *fallback, const char *description_format, ...)
4765 va_start (ap, description_format);
4767 fprintf (stderr, "Warning: In environment variable `%s': ", env_var);
4768 vfprintf (stderr, description_format, ap);
4770 fprintf (stderr, " - %s", fallback);
4771 fprintf (stderr, "\n");
4777 parse_double_in_interval (const char *env_var, const char *opt_name, const char *opt, double min, double max, double *result)
4780 double val = strtod (opt, &endptr);
4781 if (endptr == opt) {
4782 sgen_env_var_error (env_var, "Using default value.", "`%s` must be a number.", opt_name);
4785 else if (val < min || val > max) {
4786 sgen_env_var_error (env_var, "Using default value.", "`%s` must be between %.2f - %.2f.", opt_name, min, max);
4794 mono_gc_base_init (void)
4796 MonoThreadInfoCallbacks cb;
4799 char *major_collector_opt = NULL;
4800 char *minor_collector_opt = NULL;
4802 glong soft_limit = 0;
4806 gboolean debug_print_allowance = FALSE;
4807 double allowance_ratio = 0, save_target = 0;
4808 gboolean have_split_nursery = FALSE;
4809 gboolean cement_enabled = TRUE;
4812 result = InterlockedCompareExchange (&gc_initialized, -1, 0);
4815 /* already inited */
4818 /* being inited by another thread */
4822 /* we will init it */
4825 g_assert_not_reached ();
4827 } while (result != 0);
4829 LOCK_INIT (gc_mutex);
4831 pagesize = mono_pagesize ();
4832 gc_debug_file = stderr;
4834 cb.thread_register = sgen_thread_register;
4835 cb.thread_unregister = sgen_thread_unregister;
4836 cb.thread_attach = sgen_thread_attach;
4837 cb.mono_method_is_critical = (gpointer)is_critical_method;
4839 cb.mono_gc_pthread_create = (gpointer)mono_gc_pthread_create;
4842 mono_threads_init (&cb, sizeof (SgenThreadInfo));
4844 LOCK_INIT (sgen_interruption_mutex);
4845 LOCK_INIT (pin_queue_mutex);
4847 init_user_copy_or_mark_key ();
4849 if ((env = getenv (MONO_GC_PARAMS_NAME))) {
4850 opts = g_strsplit (env, ",", -1);
4851 for (ptr = opts; *ptr; ++ptr) {
4853 if (g_str_has_prefix (opt, "major=")) {
4854 opt = strchr (opt, '=') + 1;
4855 major_collector_opt = g_strdup (opt);
4856 } else if (g_str_has_prefix (opt, "minor=")) {
4857 opt = strchr (opt, '=') + 1;
4858 minor_collector_opt = g_strdup (opt);
4866 sgen_init_internal_allocator ();
4867 sgen_init_nursery_allocator ();
4869 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION, SGEN_SIZEOF_GC_MEM_SECTION);
4870 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_READY_ENTRY, sizeof (FinalizeReadyEntry));
4871 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE, sizeof (GrayQueueSection));
4872 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
4874 #ifndef HAVE_KW_THREAD
4875 mono_native_tls_alloc (&thread_info_key, NULL);
4879 * This needs to happen before any internal allocations because
4880 * it inits the small id which is required for hazard pointer
4885 mono_thread_info_attach (&dummy);
4887 if (!minor_collector_opt) {
4888 sgen_simple_nursery_init (&sgen_minor_collector);
4890 if (!strcmp (minor_collector_opt, "simple")) {
4892 sgen_simple_nursery_init (&sgen_minor_collector);
4893 } else if (!strcmp (minor_collector_opt, "split")) {
4894 sgen_split_nursery_init (&sgen_minor_collector);
4895 have_split_nursery = TRUE;
4897 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `simple` instead.", "Unknown minor collector `%s'.", minor_collector_opt);
4898 goto use_simple_nursery;
4902 if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep")) {
4903 use_marksweep_major:
4904 sgen_marksweep_init (&major_collector);
4905 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed")) {
4906 sgen_marksweep_fixed_init (&major_collector);
4907 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-par")) {
4908 sgen_marksweep_par_init (&major_collector);
4909 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
4910 sgen_marksweep_fixed_par_init (&major_collector);
4911 } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
4912 sgen_marksweep_conc_init (&major_collector);
4914 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using `marksweep` instead.", "Unknown major collector `%s'.", major_collector_opt);
4915 goto use_marksweep_major;
4918 if (have_split_nursery && major_collector.is_parallel) {
4919 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Disabling split minor collector.", "`minor=split` is not supported with the parallel collector yet.");
4920 have_split_nursery = FALSE;
4923 num_workers = mono_cpu_count ();
4924 g_assert (num_workers > 0);
4925 if (num_workers > 16)
4928 ///* Keep this the default for now */
4929 /* Precise marking is broken on all supported targets. Disable until fixed. */
4930 conservative_stack_mark = TRUE;
4932 sgen_nursery_size = DEFAULT_NURSERY_SIZE;
4935 gboolean usage_printed = FALSE;
4937 for (ptr = opts; *ptr; ++ptr) {
4939 if (!strcmp (opt, ""))
4941 if (g_str_has_prefix (opt, "major="))
4943 if (g_str_has_prefix (opt, "minor="))
4945 if (g_str_has_prefix (opt, "max-heap-size=")) {
4946 glong max_heap_candidate = 0;
4947 opt = strchr (opt, '=') + 1;
4948 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap_candidate)) {
4949 max_heap = (max_heap_candidate + mono_pagesize () - 1) & ~(glong)(mono_pagesize () - 1);
4950 if (max_heap != max_heap_candidate)
4951 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Rounding up.", "`max-heap-size` size must be a multiple of %d.", mono_pagesize ());
4953 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`max-heap-size` must be an integer.");
4957 if (g_str_has_prefix (opt, "soft-heap-limit=")) {
4958 opt = strchr (opt, '=') + 1;
4959 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &soft_limit)) {
4960 if (soft_limit <= 0) {
4961 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be positive.");
4965 sgen_env_var_error (MONO_GC_PARAMS_NAME, NULL, "`soft-heap-limit` must be an integer.");
4969 if (g_str_has_prefix (opt, "workers=")) {
4972 if (!major_collector.is_parallel) {
4973 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "The `workers` option can only be used for parallel collectors.");
4976 opt = strchr (opt, '=') + 1;
4977 val = strtol (opt, &endptr, 10);
4978 if (!*opt || *endptr) {
4979 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Cannot parse the `workers` option value.");
4982 if (val <= 0 || val > 16) {
4983 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "The number of `workers` must be in the range 1 to 16.");
4986 num_workers = (int)val;
4989 if (g_str_has_prefix (opt, "stack-mark=")) {
4990 opt = strchr (opt, '=') + 1;
4991 if (!strcmp (opt, "precise")) {
4992 conservative_stack_mark = FALSE;
4993 } else if (!strcmp (opt, "conservative")) {
4994 conservative_stack_mark = TRUE;
4996 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
4997 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
5001 if (g_str_has_prefix (opt, "bridge=")) {
5002 opt = strchr (opt, '=') + 1;
5003 sgen_register_test_bridge_callbacks (g_strdup (opt));
5007 if (g_str_has_prefix (opt, "nursery-size=")) {
5009 opt = strchr (opt, '=') + 1;
5010 if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
5011 #ifdef SGEN_ALIGN_NURSERY
5012 if ((val & (val - 1))) {
5013 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be a power of two.");
5017 if (val < SGEN_MAX_NURSERY_WASTE) {
5018 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.",
5019 "`nursery-size` must be at least %d bytes.\n", SGEN_MAX_NURSERY_WASTE);
5023 sgen_nursery_size = val;
5024 sgen_nursery_bits = 0;
5025 while (1 << (++ sgen_nursery_bits) != sgen_nursery_size)
5028 sgen_nursery_size = val;
5031 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`nursery-size` must be an integer.");
5037 if (g_str_has_prefix (opt, "save-target-ratio=")) {
5039 opt = strchr (opt, '=') + 1;
5040 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "save-target-ratio", opt,
5041 SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO, &val)) {
5046 if (g_str_has_prefix (opt, "default-allowance-ratio=")) {
5048 opt = strchr (opt, '=') + 1;
5049 if (parse_double_in_interval (MONO_GC_PARAMS_NAME, "default-allowance-ratio", opt,
5050 SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, &val)) {
5051 allowance_ratio = val;
5055 if (g_str_has_prefix (opt, "allow-synchronous-major=")) {
5056 if (!major_collector.is_concurrent) {
5057 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "`allow-synchronous-major` is only valid for the concurrent major collector.");
5061 opt = strchr (opt, '=') + 1;
5063 if (!strcmp (opt, "yes")) {
5064 allow_synchronous_major = TRUE;
5065 } else if (!strcmp (opt, "no")) {
5066 allow_synchronous_major = FALSE;
5068 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Using default value.", "`allow-synchronous-major` must be either `yes' or `no'.");
5073 if (!strcmp (opt, "cementing")) {
5074 cement_enabled = TRUE;
5077 if (!strcmp (opt, "no-cementing")) {
5078 cement_enabled = FALSE;
5082 if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
5085 if (sgen_minor_collector.handle_gc_param && sgen_minor_collector.handle_gc_param (opt))
5088 sgen_env_var_error (MONO_GC_PARAMS_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5093 fprintf (stderr, "\n%s must be a comma-delimited list of one or more of the following:\n", MONO_GC_PARAMS_NAME);
5094 fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5095 fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
5096 fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
5097 fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-conc', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
5098 fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
5099 fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
5100 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
5101 fprintf (stderr, " [no-]cementing\n");
5102 if (major_collector.is_concurrent)
5103 fprintf (stderr, " allow-synchronous-major=FLAG (where FLAG is `yes' or `no')\n");
5104 if (major_collector.print_gc_param_usage)
5105 major_collector.print_gc_param_usage ();
5106 if (sgen_minor_collector.print_gc_param_usage)
5107 sgen_minor_collector.print_gc_param_usage ();
5108 fprintf (stderr, " Experimental options:\n");
5109 fprintf (stderr, " save-target-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_SAVE_TARGET_RATIO, SGEN_MAX_SAVE_TARGET_RATIO);
5110 fprintf (stderr, " default-allowance-ratio=R (where R must be between %.2f - %.2f).\n", SGEN_MIN_ALLOWANCE_NURSERY_SIZE_RATIO, SGEN_MAX_ALLOWANCE_NURSERY_SIZE_RATIO);
5111 fprintf (stderr, "\n");
5113 usage_printed = TRUE;
5118 if (major_collector.is_parallel)
5119 sgen_workers_init (num_workers);
5120 else if (major_collector.is_concurrent)
5121 sgen_workers_init (1);
5123 if (major_collector_opt)
5124 g_free (major_collector_opt);
5126 if (minor_collector_opt)
5127 g_free (minor_collector_opt);
5131 sgen_cement_init (cement_enabled);
5133 if ((env = getenv (MONO_GC_DEBUG_NAME))) {
5134 gboolean usage_printed = FALSE;
5136 opts = g_strsplit (env, ",", -1);
5137 for (ptr = opts; ptr && *ptr; ptr ++) {
5139 if (!strcmp (opt, ""))
5141 if (opt [0] >= '0' && opt [0] <= '9') {
5142 gc_debug_level = atoi (opt);
5148 char *rf = g_strdup_printf ("%s.%d", opt, GetCurrentProcessId ());
5150 char *rf = g_strdup_printf ("%s.%d", opt, getpid ());
5152 gc_debug_file = fopen (rf, "wb");
5154 gc_debug_file = stderr;
5157 } else if (!strcmp (opt, "print-allowance")) {
5158 debug_print_allowance = TRUE;
5159 } else if (!strcmp (opt, "print-pinning")) {
5160 do_pin_stats = TRUE;
5161 } else if (!strcmp (opt, "verify-before-allocs")) {
5162 verify_before_allocs = 1;
5163 has_per_allocation_action = TRUE;
5164 } else if (g_str_has_prefix (opt, "verify-before-allocs=")) {
5165 char *arg = strchr (opt, '=') + 1;
5166 verify_before_allocs = atoi (arg);
5167 has_per_allocation_action = TRUE;
5168 } else if (!strcmp (opt, "collect-before-allocs")) {
5169 collect_before_allocs = 1;
5170 has_per_allocation_action = TRUE;
5171 } else if (g_str_has_prefix (opt, "collect-before-allocs=")) {
5172 char *arg = strchr (opt, '=') + 1;
5173 has_per_allocation_action = TRUE;
5174 collect_before_allocs = atoi (arg);
5175 } else if (!strcmp (opt, "verify-before-collections")) {
5176 whole_heap_check_before_collection = TRUE;
5177 } else if (!strcmp (opt, "check-at-minor-collections")) {
5178 consistency_check_at_minor_collection = TRUE;
5179 nursery_clear_policy = CLEAR_AT_GC;
5180 } else if (!strcmp (opt, "mod-union-consistency-check")) {
5181 if (!major_collector.is_concurrent) {
5182 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`mod-union-consistency-check` only works with concurrent major collector.");
5185 mod_union_consistency_check = TRUE;
5186 } else if (!strcmp (opt, "check-mark-bits")) {
5187 check_mark_bits_after_major_collection = TRUE;
5188 } else if (!strcmp (opt, "check-nursery-pinned")) {
5189 check_nursery_objects_pinned = TRUE;
5190 } else if (!strcmp (opt, "xdomain-checks")) {
5191 xdomain_checks = TRUE;
5192 } else if (!strcmp (opt, "clear-at-gc")) {
5193 nursery_clear_policy = CLEAR_AT_GC;
5194 } else if (!strcmp (opt, "clear-nursery-at-gc")) {
5195 nursery_clear_policy = CLEAR_AT_GC;
5196 } else if (!strcmp (opt, "check-scan-starts")) {
5197 do_scan_starts_check = TRUE;
5198 } else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
5199 do_verify_nursery = TRUE;
5200 } else if (!strcmp (opt, "check-concurrent")) {
5201 if (!major_collector.is_concurrent) {
5202 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "`check-concurrent` only works with concurrent major collectors.");
5205 do_concurrent_checks = TRUE;
5206 } else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
5207 do_dump_nursery_content = TRUE;
5208 } else if (!strcmp (opt, "no-managed-allocator")) {
5209 sgen_set_use_managed_allocator (FALSE);
5210 } else if (!strcmp (opt, "disable-minor")) {
5211 disable_minor_collections = TRUE;
5212 } else if (!strcmp (opt, "disable-major")) {
5213 disable_major_collections = TRUE;
5214 } else if (g_str_has_prefix (opt, "heap-dump=")) {
5215 char *filename = strchr (opt, '=') + 1;
5216 nursery_clear_policy = CLEAR_AT_GC;
5217 heap_dump_file = fopen (filename, "w");
5218 if (heap_dump_file) {
5219 fprintf (heap_dump_file, "<sgen-dump>\n");
5220 do_pin_stats = TRUE;
5222 #ifdef SGEN_BINARY_PROTOCOL
5223 } else if (g_str_has_prefix (opt, "binary-protocol=")) {
5224 char *filename = strchr (opt, '=') + 1;
5225 binary_protocol_init (filename);
5228 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Ignoring.", "Unknown option `%s`.", opt);
5233 fprintf (stderr, "\n%s must be of the format [<l>[:<filename>]|<option>]+ where <l> is a debug level 0-9.\n", MONO_GC_DEBUG_NAME);
5234 fprintf (stderr, "Valid <option>s are:\n");
5235 fprintf (stderr, " collect-before-allocs[=<n>]\n");
5236 fprintf (stderr, " verify-before-allocs[=<n>]\n");
5237 fprintf (stderr, " check-at-minor-collections\n");
5238 fprintf (stderr, " check-mark-bits\n");
5239 fprintf (stderr, " check-nursery-pinned\n");
5240 fprintf (stderr, " verify-before-collections\n");
5241 fprintf (stderr, " verify-nursery-at-minor-gc\n");
5242 fprintf (stderr, " dump-nursery-at-minor-gc\n");
5243 fprintf (stderr, " disable-minor\n");
5244 fprintf (stderr, " disable-major\n");
5245 fprintf (stderr, " xdomain-checks\n");
5246 fprintf (stderr, " check-concurrent\n");
5247 fprintf (stderr, " clear-at-gc\n");
5248 fprintf (stderr, " clear-nursery-at-gc\n");
5249 fprintf (stderr, " check-scan-starts\n");
5250 fprintf (stderr, " no-managed-allocator\n");
5251 fprintf (stderr, " print-allowance\n");
5252 fprintf (stderr, " print-pinning\n");
5253 fprintf (stderr, " heap-dump=<filename>\n");
5254 #ifdef SGEN_BINARY_PROTOCOL
5255 fprintf (stderr, " binary-protocol=<filename>\n");
5257 fprintf (stderr, "\n");
5259 usage_printed = TRUE;
5265 if (major_collector.is_parallel) {
5266 if (heap_dump_file) {
5267 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "Cannot do `heap-dump` with the parallel collector.");
5268 fclose (heap_dump_file);
5269 heap_dump_file = NULL;
5272 sgen_env_var_error (MONO_GC_DEBUG_NAME, "Disabling.", "`print-pinning` is not supported with the parallel collector.");
5273 do_pin_stats = FALSE;
5277 if (major_collector.post_param_init)
5278 major_collector.post_param_init (&major_collector);
5280 sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
5282 memset (&remset, 0, sizeof (remset));
5284 sgen_card_table_init (&remset);
5290 mono_gc_get_gc_name (void)
5295 static MonoMethod *write_barrier_method;
5298 sgen_is_critical_method (MonoMethod *method)
5300 return (method == write_barrier_method || sgen_is_managed_allocator (method));
5304 sgen_has_critical_method (void)
5306 return write_barrier_method || sgen_has_managed_allocator ();
5312 emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
5314 memset (nursery_check_return_labels, 0, sizeof (int) * 3);
5315 #ifdef SGEN_ALIGN_NURSERY
5316 // if (ptr_in_nursery (ptr)) return;
5318 * Masking out the bits might be faster, but we would have to use 64 bit
5319 * immediates, which might be slower.
5321 mono_mb_emit_ldarg (mb, 0);
5322 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5323 mono_mb_emit_byte (mb, CEE_SHR_UN);
5324 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5325 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
5327 if (!major_collector.is_concurrent) {
5328 // if (!ptr_in_nursery (*ptr)) return;
5329 mono_mb_emit_ldarg (mb, 0);
5330 mono_mb_emit_byte (mb, CEE_LDIND_I);
5331 mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
5332 mono_mb_emit_byte (mb, CEE_SHR_UN);
5333 mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
5334 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
5337 int label_continue1, label_continue2;
5338 int dereferenced_var;
5340 // if (ptr < (sgen_get_nursery_start ())) goto continue;
5341 mono_mb_emit_ldarg (mb, 0);
5342 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5343 label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
5345 // if (ptr >= sgen_get_nursery_end ())) goto continue;
5346 mono_mb_emit_ldarg (mb, 0);
5347 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5348 label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
5351 nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BR);
5354 mono_mb_patch_branch (mb, label_continue_1);
5355 mono_mb_patch_branch (mb, label_continue_2);
5357 // Dereference and store in local var
5358 dereferenced_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
5359 mono_mb_emit_ldarg (mb, 0);
5360 mono_mb_emit_byte (mb, CEE_LDIND_I);
5361 mono_mb_emit_stloc (mb, dereferenced_var);
5363 if (!major_collector.is_concurrent) {
5364 // if (*ptr < sgen_get_nursery_start ()) return;
5365 mono_mb_emit_ldloc (mb, dereferenced_var);
5366 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
5367 nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
5369 // if (*ptr >= sgen_get_nursery_end ()) return;
5370 mono_mb_emit_ldloc (mb, dereferenced_var);
5371 mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
5372 nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
5379 mono_gc_get_write_barrier (void)
5382 MonoMethodBuilder *mb;
5383 MonoMethodSignature *sig;
5384 #ifdef MANAGED_WBARRIER
5385 int i, nursery_check_labels [3];
5387 #ifdef HAVE_KW_THREAD
5388 int stack_end_offset = -1;
5390 MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
5391 g_assert (stack_end_offset != -1);
5395 // FIXME: Maybe create a separate version for ctors (the branch would be
5396 // correctly predicted more times)
5397 if (write_barrier_method)
5398 return write_barrier_method;
5400 /* Create the IL version of mono_gc_barrier_generic_store () */
5401 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
5402 sig->ret = &mono_defaults.void_class->byval_arg;
5403 sig->params [0] = &mono_defaults.int_class->byval_arg;
5405 mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
5408 #ifdef MANAGED_WBARRIER
5409 emit_nursery_check (mb, nursery_check_labels);
5411 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
5415 LDC_PTR sgen_cardtable
5417 address >> CARD_BITS
5421 if (SGEN_HAVE_OVERLAPPING_CARDS) {
5422 LDC_PTR card_table_mask
5429 mono_mb_emit_ptr (mb, sgen_cardtable);
5430 mono_mb_emit_ldarg (mb, 0);
5431 mono_mb_emit_icon (mb, CARD_BITS);
5432 mono_mb_emit_byte (mb, CEE_SHR_UN);
5433 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
5434 mono_mb_emit_ptr (mb, (gpointer)CARD_MASK);
5435 mono_mb_emit_byte (mb, CEE_AND);
5437 mono_mb_emit_byte (mb, CEE_ADD);
5438 mono_mb_emit_icon (mb, 1);
5439 mono_mb_emit_byte (mb, CEE_STIND_I1);
5442 for (i = 0; i < 3; ++i) {
5443 if (nursery_check_labels [i])
5444 mono_mb_patch_branch (mb, nursery_check_labels [i]);
5446 mono_mb_emit_byte (mb, CEE_RET);
5448 mono_mb_emit_ldarg (mb, 0);
5449 mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_nostore);
5450 mono_mb_emit_byte (mb, CEE_RET);
5453 res = mono_mb_create_method (mb, sig, 16);
5456 mono_loader_lock ();
5457 if (write_barrier_method) {
5458 /* Already created */
5459 mono_free_method (res);
5461 /* double-checked locking */
5462 mono_memory_barrier ();
5463 write_barrier_method = res;
5465 mono_loader_unlock ();
5467 return write_barrier_method;
5471 mono_gc_get_description (void)
5473 return g_strdup ("sgen");
5477 mono_gc_set_desktop_mode (void)
5482 mono_gc_is_moving (void)
5488 mono_gc_is_disabled (void)
5494 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
5501 sgen_get_nursery_clear_policy (void)
5503 return nursery_clear_policy;
5507 sgen_get_array_fill_vtable (void)
5509 if (!array_fill_vtable) {
5510 static MonoClass klass;
5511 static MonoVTable vtable;
5514 MonoDomain *domain = mono_get_root_domain ();
5517 klass.element_class = mono_defaults.byte_class;
5519 klass.instance_size = sizeof (MonoArray);
5520 klass.sizes.element_size = 1;
5521 klass.name = "array_filler_type";
5523 vtable.klass = &klass;
5525 vtable.gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
5528 array_fill_vtable = &vtable;
5530 return array_fill_vtable;
5540 sgen_gc_unlock (void)
5546 sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
5548 major_collector.iterate_live_block_ranges (callback);
5552 sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
5554 major_collector.scan_card_table (FALSE, queue);
5558 sgen_get_major_collector (void)
5560 return &major_collector;
5563 void mono_gc_set_skip_thread (gboolean skip)
5565 SgenThreadInfo *info = mono_thread_info_current ();
5568 info->gc_disabled = skip;
5573 sgen_get_remset (void)
5579 mono_gc_get_vtable_bits (MonoClass *class)
5581 if (sgen_need_bridge_processing () && sgen_is_bridge_class (class))
5582 return SGEN_GC_BIT_BRIDGE_OBJECT;
5587 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
5594 sgen_check_whole_heap_stw (void)
5596 sgen_stop_world (0);
5597 sgen_clear_nursery_fragments ();
5598 sgen_check_whole_heap (FALSE);
5599 sgen_restart_world (0, NULL);
5603 sgen_gc_event_moves (void)
5605 if (moved_objects_idx) {
5606 mono_profiler_gc_moves (moved_objects, moved_objects_idx);
5607 moved_objects_idx = 0;
5611 #endif /* HAVE_SGEN_GC */